Esempio n. 1
0
    def test_backup_and_replication(self):
        node = get_new_node('test')
        replica = get_new_node('repl')

        node.init(allows_streaming=True)
        node.start()
        node.psql('postgres', 'create table abc(a int, b int)')
        node.psql('postgres', 'insert into abc values (1, 2)')
        node.backup('my_backup')

        replica.init_from_backup(node, 'my_backup', has_streaming=True)
        replica.start()
        res = replica.execute('postgres', 'select * from abc')
        self.assertEqual(len(res), 1)
        self.assertEqual(res[0], (1, 2))

        # Insert into master node
        node.psql('postgres', 'insert into abc values (3, 4)')
        # Wait until data syncronizes
        node.poll_query_until(
            'postgres', 'SELECT pg_current_xlog_location() <= replay_location '
            'FROM pg_stat_replication WHERE application_name = \'%s\'' %
            replica.name)
        # time.sleep(0.5)
        # Check that this record was exported to replica
        res = replica.execute('postgres', 'select * from abc')
        self.assertEqual(len(res), 2)
        self.assertEqual(res[1], (3, 4))

        node.stop()
        replica.stop()
    def test_replication(self):
        """Tests how pg_pathman works with replication"""
        node = get_new_node('master')
        replica = get_new_node('repl')

        try:
            # initialize master server
            node = self.start_new_pathman_cluster(allows_streaming=True)
            node.backup('my_backup')

            # initialize replica from backup
            replica.init_from_backup(node, 'my_backup', has_streaming=True)
            replica.start()

            # initialize pg_pathman extension and some test data
            self.init_test_data(node)

            # wait until replica catches up
            self.catchup_replica(node, replica)

            # check that results are equal
            self.assertEqual(
                node.psql('postgres', 'explain (costs off) select * from abc'),
                replica.psql('postgres',
                             'explain (costs off) select * from abc'))

            # enable parent and see if it is enabled in replica
            node.psql('postgres', 'select enable_parent(\'abc\'')

            self.catchup_replica(node, replica)
            self.assertEqual(
                node.psql('postgres', 'explain (costs off) select * from abc'),
                replica.psql('postgres',
                             'explain (costs off) select * from abc'))
            self.assertEqual(node.psql('postgres', 'select * from abc'),
                             replica.psql('postgres', 'select * from abc'))
            self.assertEqual(
                node.execute('postgres', 'select count(*) from abc')[0][0],
                300000)

            # check that direct UPDATE in pathman_config_params invalidates
            # cache
            node.psql(
                'postgres',
                'update pathman_config_params set enable_parent = false')
            self.catchup_replica(node, replica)
            self.assertEqual(
                node.psql('postgres', 'explain (costs off) select * from abc'),
                replica.psql('postgres',
                             'explain (costs off) select * from abc'))
            self.assertEqual(node.psql('postgres', 'select * from abc'),
                             replica.psql('postgres', 'select * from abc'))
            self.assertEqual(
                node.execute('postgres', 'select count(*) from abc')[0][0], 0)
        except Exception as e:
            self.printlog(node.logs_dir + '/postgresql.log')
            self.printlog(replica.logs_dir + '/postgresql.log')
            raise e
Esempio n. 3
0
    def test_foreign_table(self):
        """Test foreign tables"""

        # Start master server
        master = get_new_node('test')
        master.init()
        master.append_conf(
            'postgresql.conf',
            'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n')
        master.start()
        master.psql('postgres', 'create extension pg_pathman')
        master.psql('postgres', 'create extension postgres_fdw')
        master.psql(
            'postgres', '''create table abc(id serial, name text);
            select create_range_partitions('abc', 'id', 0, 10, 2)''')

        # Current user name (needed for user mapping)
        username = master.execute('postgres', 'select current_user')[0][0]

        # Start foreign server
        fserv = get_new_node('fserv')
        fserv.init().start()
        fserv.safe_psql('postgres',
                        'create table ftable(id serial, name text)')
        fserv.safe_psql('postgres',
                        'insert into ftable values (25, \'foreign\')')

        # Create foreign table and attach it to partitioned table
        master.safe_psql(
            'postgres', '''create server fserv
            foreign data wrapper postgres_fdw
            options (dbname 'postgres', host '127.0.0.1', port '{}')'''.format(
                fserv.port))
        master.safe_psql(
            'postgres', '''create user mapping for {0}
            server fserv
            options (user '{0}')'''.format(username))
        master.safe_psql(
            'postgres', '''import foreign schema public limit to (ftable)
            from server fserv into public''')
        master.safe_psql(
            'postgres',
            'select attach_range_partition(\'abc\', \'ftable\', 20, 30)')

        # Check that table attached to partitioned table
        self.assertEqual(master.safe_psql('postgres', 'select * from ftable'),
                         '25|foreign\n')

        # Check that we can successfully insert new data into foreign partition
        master.safe_psql('postgres', 'insert into abc values (26, \'part\')')
        self.assertEqual(
            master.safe_psql('postgres', 'select * from ftable order by id'),
            '25|foreign\n26|part\n')

        # Testing drop partitions (including foreign partitions)
        master.safe_psql('postgres', 'select drop_partitions(\'abc\')')
Esempio n. 4
0
    def test_double_start(self):
        with get_new_node('test') as node:
            node.init().start()

            # can't start node more than once
            with self.assertRaises(StartNodeException):
                node.start()
Esempio n. 5
0
    def test_poll_query_until(self):
        with get_new_node() as node:
            node.init().start()

            get_time = 'select extract(epoch from now())'
            check_time = 'select extract(epoch from now()) - {} >= 5'

            start_time = node.execute(get_time)[0][0]
            node.poll_query_until(query=check_time.format(start_time))
            end_time = node.execute(get_time)[0][0]

            self.assertTrue(end_time - start_time >= 5)

            # check 0 columns
            with self.assertRaises(QueryException):
                node.poll_query_until(
                    query='select from pg_catalog.pg_class limit 1')

            # check None, fail
            with self.assertRaises(QueryException):
                node.poll_query_until(query='create table abc (val int)')

            # check None, ok
            node.poll_query_until(query='create table def()',
                                  expected=None)  # returns nothing

            # check 0 rows equivalent to expected=None
            node.poll_query_until(
                query='select * from pg_catalog.pg_class where true = false',
                expected=None)

            # check arbitrary expected value, fail
            with self.assertRaises(TimeoutException):
                node.poll_query_until(query='select 3',
                                      expected=1,
                                      max_attempts=3,
                                      sleep_time=0.01)

            # check arbitrary expected value, ok
            node.poll_query_until(query='select 2', expected=2)

            # check timeout
            with self.assertRaises(TimeoutException):
                node.poll_query_until(query='select 1 > 2',
                                      max_attempts=3,
                                      sleep_time=0.01)

            # check ProgrammingError, fail
            with self.assertRaises(testgres.ProgrammingError):
                node.poll_query_until(query='dummy1')

            # check ProgrammingError, ok
            with self.assertRaises(TimeoutException):
                node.poll_query_until(query='dummy2',
                                      max_attempts=3,
                                      sleep_time=0.01,
                                      suppress={testgres.ProgrammingError})

            # check 1 arg, ok
            node.poll_query_until('select true')
Esempio n. 6
0
    def test_double_init(self):
        with get_new_node('test') as node:
            node.init()

            # can't initialize node more than once
            with self.assertRaises(InitNodeException):
                node.init()
Esempio n. 7
0
def pg_start(name, snapshot, read_only):
    """Start the database"""
    node = PostgresNode()
    base_dir = os.path.join(zenith_base_dir, 'pg', name)
    node = testgres.get_new_node(name, base_dir=base_dir)
    # TODO pass snapshot as a parameter
    node.start()
Esempio n. 8
0
    def test_status(self):
        # check NodeStatus cast to bool
        self.assertTrue(NodeStatus.Running)

        # check NodeStatus cast to bool
        self.assertFalse(NodeStatus.Stopped)

        # check NodeStatus cast to bool
        self.assertFalse(NodeStatus.Uninitialized)

        # check statuses after each operation
        with get_new_node('test') as node:
            self.assertEqual(node.get_pid(), 0)
            self.assertEqual(node.status(), NodeStatus.Uninitialized)

            node.init()

            self.assertEqual(node.get_pid(), 0)
            self.assertEqual(node.status(), NodeStatus.Stopped)

            node.start()

            self.assertTrue(node.get_pid() > 0)
            self.assertEqual(node.status(), NodeStatus.Running)

            node.stop()

            self.assertEqual(node.get_pid(), 0)
            self.assertEqual(node.status(), NodeStatus.Stopped)

            node.cleanup()

            self.assertEqual(node.get_pid(), 0)
            self.assertEqual(node.status(), NodeStatus.Uninitialized)
Esempio n. 9
0
def is_postgres_fdw_ready():
    with get_new_node().init().start() as node:
        result = node.execute("""
            select count(*) from pg_available_extensions where name = 'postgres_fdw'
        """)

        return result[0][0] > 0
Esempio n. 10
0
    def test_psql(self):
        with get_new_node('test') as node:
            node.init().start()

            # check default params
            with self.assertRaises(QueryException):
                node.psql('postgres')

            # check returned values
            res = node.psql('postgres', 'select 1')
            self.assertEqual(res[0], 0)
            self.assertEqual(res[1], b'1\n')
            self.assertEqual(res[2], b'')

            # check returned values
            res = node.safe_psql('postgres', 'select 1')
            self.assertEqual(res, b'1\n')

            # check feeding input
            node.safe_psql('postgres', 'create table horns (w int)')
            node.safe_psql('postgres',
                           'copy horns from stdin (format csv)',
                           input=b"1\n2\n3\n\.\n")
            sum = node.safe_psql('postgres', 'select sum(w) from horns')
            self.assertEqual(sum, b'6\n')
            node.safe_psql('postgres', 'drop table horns')

            node.stop()

            # check psql on stopped node
            with self.assertRaises(QueryException):
                node.safe_psql('postgres', 'select 1')
Esempio n. 11
0
    def test_status(self):
        self.assertTrue(NodeStatus.Running)
        self.assertFalse(NodeStatus.Stopped)
        self.assertFalse(NodeStatus.Uninitialized)

        # check statuses after each operation
        with get_new_node() as node:
            self.assertEqual(node.pid, 0)
            self.assertEqual(node.status(), NodeStatus.Uninitialized)

            node.init()

            self.assertEqual(node.pid, 0)
            self.assertEqual(node.status(), NodeStatus.Stopped)

            node.start()

            self.assertNotEqual(node.pid, 0)
            self.assertEqual(node.status(), NodeStatus.Running)

            node.stop()

            self.assertEqual(node.pid, 0)
            self.assertEqual(node.status(), NodeStatus.Stopped)

            node.cleanup()

            self.assertEqual(node.pid, 0)
            self.assertEqual(node.status(), NodeStatus.Uninitialized)
Esempio n. 12
0
    def make_simple_node(self,
                         base_dir=None,
                         set_replication=False,
                         initdb_params=[],
                         pg_options={}):

        real_base_dir = os.path.join(self.tmp_path, base_dir)
        shutil.rmtree(real_base_dir, ignore_errors=True)

        node = get_new_node('test', base_dir=real_base_dir)
        node.init(initdb_params=initdb_params)

        # Sane default parameters, not a shit with fsync = off from testgres
        node.append_conf("postgresql.auto.conf", "shared_buffers = 10MB")
        node.append_conf("postgresql.auto.conf", "fsync = on")
        node.append_conf("postgresql.auto.conf", "wal_level = minimal")

        node.append_conf("postgresql.auto.conf",
                         "log_line_prefix = '%t [%p]: [%l-1] '")
        node.append_conf("postgresql.auto.conf", "log_statement = none")
        node.append_conf("postgresql.auto.conf", "log_duration = on")
        node.append_conf("postgresql.auto.conf",
                         "log_min_duration_statement = 0")
        node.append_conf("postgresql.auto.conf", "log_connections = on")
        node.append_conf("postgresql.auto.conf", "log_disconnections = on")

        # Apply given parameters
        for key, value in six.iteritems(pg_options):
            node.append_conf("postgresql.auto.conf", "%s = %s" % (key, value))

        # Allow replication in pg_hba.conf
        if set_replication:
            node.set_replication_conf()
        return node
Esempio n. 13
0
    def test_incorrect_catchup(self):
        with get_new_node() as node:
            node.init(allow_streaming=True).start()

            # node has no master, can't catch up
            with self.assertRaises(TestgresException):
                node.catchup()
Esempio n. 14
0
    def test_backup_wrong_xlog_method(self):
        with get_new_node() as node:
            node.init(allow_streaming=True).start()

            with self.assertRaises(BackupException,
                                   msg='Invalid xlog_method "wrong"'):
                node.backup(xlog_method='wrong')
Esempio n. 15
0
def is_postgres_fdw_ready():
    with get_new_node().init().start() as node:
        result = node.execute("""
            select count(*) from pg_available_extensions where name = 'postgres_fdw'
        """)

        return result[0][0] > 0
Esempio n. 16
0
 def test_users(self):
     with get_new_node('master') as node:
         node.init().start()
         node.psql('postgres', 'create role test_user login')
         value = node.safe_psql('postgres',
                                'select 1',
                                username='******')
         self.assertEqual(value, b'1\n')
 def start_new_pathman_cluster(self, name='test', allows_streaming=False):
     node = get_new_node(name)
     node.init(allows_streaming=allows_streaming)
     node.append_conf('postgresql.conf',
                      'shared_preload_libraries=\'pg_pathman\'\n')
     node.start()
     node.psql('postgres', 'create extension pg_pathman')
     return node
Esempio n. 18
0
    def test_node_exit(self):
        base_dir = None

        with self.assertRaises(QueryException):
            with get_new_node().init() as node:
                base_dir = node.base_dir
                node.safe_psql('select 1')

        # we should save the DB for "debugging"
        self.assertTrue(os.path.exists(base_dir))
        rmtree(base_dir, ignore_errors=True)

        with get_new_node().init() as node:
            base_dir = node.base_dir

        # should have been removed by default
        self.assertFalse(os.path.exists(base_dir))
Esempio n. 19
0
 def test_start_stop(self):
     node = get_new_node('test')
     node.init()
     node.start()
     res = node.execute('postgres', 'select 1')
     self.assertEqual(len(res), 1)
     self.assertEqual(res[0][0], 1)
     node.stop()
Esempio n. 20
0
    def start_node(self):
        node = get_new_node()
        node.init()
        node.append_conf("shared_preload_libraries='sr_plan'\n")
        node.start()
        node.psql('create extension sr_plan')
        node.psql(sql_init)

        return node
Esempio n. 21
0
def pg_create(name, storage_name, snapshot, no_start):
    """Initialize the database"""
    node = PostgresNode()
    base_dir = os.path.join(zenith_base_dir, 'pg', name)
    node = testgres.get_new_node(name, base_dir=base_dir)
    # TODO skip init, instead of that link node with storage or upload it from snapshot
    node.init()
    if (no_start == False):
        node.start()
Esempio n. 22
0
	def start_new_pathman_cluster(self, name='test', allows_streaming=False):
		node = get_new_node(name)
		node.init(allows_streaming=allows_streaming)
		node.append_conf(
			'postgresql.conf',
			'shared_preload_libraries=\'pg_pathman\'\n')
		node.start()
		node.psql('postgres', 'create extension pg_pathman')
		return node
Esempio n. 23
0
    def test_dump(self):
        query_create = 'create table test as select generate_series(1, 2) as val'
        query_select = 'select * from test order by val asc'

        with get_new_node().init().start() as node1:

            node1.execute(query_create)
            for format in ['plain', 'custom', 'directory', 'tar']:
                with removing(node1.dump(format=format)) as dump:
                    with get_new_node().init().start() as node3:
                        if format == 'directory':
                            self.assertTrue(os.path.isdir(dump))
                        else:
                            self.assertTrue(os.path.isfile(dump))
                        # restore dump
                        node3.restore(filename=dump)
                        res = node3.execute(query_select)
                        self.assertListEqual(res, [(1, ), (2, )])
Esempio n. 24
0
    def test_custom_init(self):
        with get_new_node() as node:
            # enable page checksums
            node.init(initdb_params=['-k']).start()

        with get_new_node() as node:
            node.init(
                allow_streaming=True,
                initdb_params=['--auth-local=reject', '--auth-host=reject'])

            hba_file = os.path.join(node.data_dir, 'pg_hba.conf')
            with open(hba_file, 'r') as conf:
                lines = conf.readlines()

                # check number of lines
                self.assertGreaterEqual(len(lines), 6)

                # there should be no trust entries at all
                self.assertFalse(any('trust' in s for s in lines))
Esempio n. 25
0
    def test_replication_slots(self):
        with get_new_node() as node:
            node.init(allow_streaming=True).start()

            with node.replicate(slot='slot1').start() as replica:
                replica.execute('select 1')

                # cannot create new slot with the same name
                with self.assertRaises(TestgresException):
                    node.replicate(slot='slot1')
Esempio n. 26
0
 def test_pg_ctl_wait_option(self):
     with get_new_node() as node:
         node.init().start(wait=False)
         while True:
             try:
                 node.stop(wait=False)
                 break
             except ExecUtilException:
                 # it's ok to get this exception here since node
                 # could be not started yet
                 pass
Esempio n. 27
0
    def test_init_after_cleanup(self):
        with get_new_node('test') as node:
            node.init().start()
            node.status()
            node.safe_psql('postgres', 'select 1')

            node.cleanup()

            node.init().start()
            node.status()
            node.safe_psql('postgres', 'select 1')
Esempio n. 28
0
    def test_unix_sockets(self):
        with get_new_node() as node:
            node.init(unix_sockets=False, allow_streaming=True)
            node.start()

            node.execute('select 1')
            node.safe_psql('select 1')

            with node.replicate().start() as r:
                r.execute('select 1')
                r.safe_psql('select 1')
Esempio n. 29
0
    def test_auto_name(self):
        with get_new_node().init(allow_streaming=True).start() as m:
            with m.replicate().start() as r:

                # check that nodes are running
                self.assertTrue(m.status())
                self.assertTrue(r.status())

                # check their names
                self.assertNotEqual(m.name, r.name)
                self.assertTrue('testgres' in m.name)
                self.assertTrue('testgres' in r.name)
Esempio n. 30
0
    def test_backup_exhaust(self):
        with get_new_node() as node:
            node.init(allow_streaming=True).start()

            with node.backup(xlog_method='fetch') as backup:

                # exhaust backup by creating new node
                with backup.spawn_primary():
                    pass

                # now let's try to create one more node
                with self.assertRaises(BackupException):
                    backup.spawn_primary()
Esempio n. 31
0
    def test_control_data(self):
        with get_new_node() as node:

            # node is not initialized yet
            with self.assertRaises(ExecUtilException):
                node.get_control_data()

            node.init()
            data = node.get_control_data()

            # check returned dict
            self.assertIsNotNone(data)
            self.assertTrue(any('pg_control' in s for s in data.keys()))
Esempio n. 32
0
    def test_promotion(self):
        with get_new_node() as master:
            master.init().start()
            master.safe_psql('create table abc(id serial)')

            with master.replicate().start() as replica:
                master.stop()
                replica.promote()

                # make standby becomes writable master
                replica.safe_psql('insert into abc values (1)')
                res = replica.safe_psql('select * from abc')
                self.assertEqual(res, b'1\n')
Esempio n. 33
0
    def setUp(self):
        current_dir = os.path.dirname(os.path.abspath(__file__))

        self.node = tg.get_new_node("pglist",
                                    os.path.join(current_dir, "tmp_install"))
        try:
            self.node.init()
            self.node.append_conf("postgresql.conf",
                                  "shared_buffers='4GB'\n"
                                  "maintenance_work_mem='2GB'\n"
                                  "max_wal_size='2GB'\n"
                                  "work_mem='50MB'")
            self.node.start()
        except Exception as e:
            self.printlog(os.path.join(self.node.logs_dir, "postgresql.log"))
            raise e
Esempio n. 34
0
    def start_new_pathman_cluster(self, allow_streaming=False, test_data=False):
        node = get_new_node()
        node.init(allow_streaming=allow_streaming)
        node.append_conf("shared_preload_libraries='pg_pathman'\n")
        node.start()
        node.psql('create extension pg_pathman')

        if test_data:
            node.safe_psql("""
                create table abc(id serial, t text);
                insert into abc select generate_series(1, 300000);
                select create_hash_partitions('abc', 'id', 3, partition_data := false);
            """)

            node.safe_psql('vacuum analyze')

        return node
Esempio n. 35
0
    data_prefix = "/tmp/pathman_check_update"
    if os.path.isdir(data_prefix):
        shutil.rmtree(data_prefix)
    dump_new_path = os.path.join(data_prefix, 'dump_new.sql')
    dump_updated_path = os.path.join(data_prefix, 'dump_updated.sql')
    dump_diff_path = os.path.join(data_prefix, 'dump.diff')
    pathman_objs_new_path = os.path.join(data_prefix, 'pathman_objects_new.sql')
    pathman_objs_updated_path = os.path.join(data_prefix, 'pathman_objects_updated.sql')
    pathman_objs_diff_path = os.path.join(data_prefix, 'pathman_objs.diff')
    tmp_pathman_path = os.path.join(data_prefix, "pg_pathman")

    shutil.copytree(repo_dir, tmp_pathman_path)

    reinstall_pathman(tmp_pathman_path, new_branch)
    with testgres.get_new_node('brand_new') as node:
        node.init()
        node.append_conf("shared_preload_libraries='pg_pathman'\n")
        node.start()
        node.safe_psql('postgres', run_sql)
        node.dump(dump_new_path, 'postgres')
        # default user is current OS one
        shell("psql -p {} -h {} -f {} -X -q -a -At > {} 2>&1".format(node.port, node.host, pathman_objs_script, pathman_objs_new_path))
        node.stop()

    # now install old version...
    reinstall_pathman(tmp_pathman_path, old_branch)
    with testgres.get_new_node('updated') as node:
        node.init()
        node.append_conf("shared_preload_libraries='pg_pathman'\n")
Esempio n. 36
0
	def test_pg_dump(self):
		"""
		Test using dump and restore of partitioned table through pg_dump and pg_restore tools.

		Test strategy:
		- test range and hash partitioned tables;
		- for each partitioned table check on restorable side the following quantities:
			* constraints related to partitioning;
			* init callback function and enable parent flag;
			* number of rows in parent and child tables;
			* plan validity of simple SELECT query under partitioned table;
		- check dumping using the following parameters of pg_dump:
			* format = plain | custom;
			* using of inserts and copy.
		- all test cases are carried out on tables half-full with data located in parent part,
			the rest of data - in child tables.
		"""

		import subprocess

		# Init and start postgres instance with preload pg_pathman module
		node = get_new_node('test')
		node.init()
		node.append_conf(
			'postgresql.conf',
			"""
			shared_preload_libraries=\'pg_pathman\'
			pg_pathman.override_copy=false
			""")
		node.start()

		# Init two databases: initial and copy
		node.psql('postgres', 'create database initial')
		node.psql('postgres', 'create database copy')
		node.psql('initial', 'create extension pg_pathman')

		# Create and fillin partitioned table in initial database
		with node.connect('initial') as con:

			# create and initailly fillin tables
			con.execute('create table range_partitioned (i integer not null)')
			con.execute('insert into range_partitioned select i from generate_series(1, 500) i')
			con.execute('create table hash_partitioned (i integer not null)')
			con.execute('insert into hash_partitioned select i from generate_series(1, 500) i')

			# partition table keeping data in base table
			# enable_parent parameter automatically becames true
			con.execute('select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)')
			con.execute('select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)')

			# fillin child tables with remain data
			con.execute('insert into range_partitioned select i from generate_series(501, 1000) i')
			con.execute('insert into hash_partitioned select i from generate_series(501, 1000) i')

			# set init callback
			con.execute("""
				create or replace function init_partition_stub_callback(args jsonb)
				returns void as $$
				begin
				end
				$$ language plpgsql;
			""")
			con.execute('select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')')
			con.execute('select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')')

			# turn off enable_parent option
			con.execute('select set_enable_parent(\'range_partitioned\', false)')
			con.execute('select set_enable_parent(\'hash_partitioned\', false)')

			con.commit()

		# compare strategies
		CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3)
		def cmp_full(con1, con2):
			"""Compare selection partitions in plan and contents in partitioned tables"""

			plan_query = 'explain (costs off, format json) select * from %s'
			content_query = 'select * from %s order by i'
			table_refs = [
					'range_partitioned',
					'only range_partitioned',
					'hash_partitioned',
					'only hash_partitioned'
			]
			for table_ref in table_refs:
				plan_initial = con1.execute(plan_query % table_ref)[0][0][0]['Plan']
				plan_copy = con2.execute(plan_query % table_ref)[0][0][0]['Plan']
				if ordered(plan_initial) != ordered(plan_copy):
					return PLANS_MISMATCH

				content_initial = [x[0] for x in con1.execute(content_query % table_ref)]
				content_copy = [x[0] for x in con2.execute(content_query % table_ref)]
				if content_initial != content_copy:
					return CONTENTS_MISMATCH

				return CMP_OK

		def turnoff_pathman(node):
			node.psql('initial', 'alter system set pg_pathman.enable to off')
			node.reload()

		def turnon_pathman(node):
			node.psql('initial', 'alter system set pg_pathman.enable to on')
			node.psql('copy', 'alter system set pg_pathman.enable to on')
			node.psql('initial', 'alter system set pg_pathman.override_copy to off')
			node.psql('copy', 'alter system set pg_pathman.override_copy to off')
			node.reload()

		# Test dump/restore from init database to copy functionality
		test_params = [
			(None,
			 None,
			 [node.get_bin_path("pg_dump"),
				"-p {}".format(node.port),
				"initial"],
			 [node.get_bin_path("psql"),
				 "-p {}".format(node.port),
				 "copy"],
			 cmp_full),     # dump as plain text and restore via COPY
			(turnoff_pathman,
			 turnon_pathman,
			 [node.get_bin_path("pg_dump"),
				"-p {}".format(node.port),
				"--inserts",
				"initial"],
			 [node.get_bin_path("psql"),
				 "-p {}".format(node.port),
				 "copy"],
			 cmp_full),   # dump as plain text and restore via INSERTs
			(None,
			 None,
			 [node.get_bin_path("pg_dump"),
				"-p {}".format(node.port),
				"--format=custom",
				"initial"],
			 [node.get_bin_path("pg_restore"),
				 "-p {}".format(node.port),
				 "--dbname=copy"],
			 cmp_full),     # dump in archive format
		]

		try:
			FNULL = open(os.devnull, 'w')

			for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params:

				dump_restore_cmd = " | ".join((' '.join(pg_dump_params), ' '.join(pg_restore_params)))

				if (preproc != None):
					preproc(node)

				# transfer and restore data
				p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE)
				stdoutdata, _ = p1.communicate()
				p2 = subprocess.Popen(pg_restore_params, stdin=subprocess.PIPE,
						stdout=FNULL, stderr=FNULL)
				p2.communicate(input=stdoutdata)

				if (postproc != None):
					postproc(node)

				# check validity of data
				with node.connect('initial') as con1, node.connect('copy') as con2:

					# compare plans and contents of initial and copy
					cmp_result = cmp_dbs(con1, con2)
					self.assertNotEqual(cmp_result, PLANS_MISMATCH,
							"mismatch in plans of select query on partitioned tables under the command: %s" % dump_restore_cmd)
					self.assertNotEqual(cmp_result, CONTENTS_MISMATCH,
							"mismatch in contents of partitioned tables under the command: %s" % dump_restore_cmd)

					# compare enable_parent flag and callback function
					config_params_query = """
						select partrel, enable_parent, init_callback from pathman_config_params
					"""
					config_params_initial, config_params_copy = {}, {}
					for row in con1.execute(config_params_query):
						config_params_initial[row[0]] = row[1:]
					for row in con2.execute(config_params_query):
						config_params_copy[row[0]] = row[1:]
					self.assertEqual(config_params_initial, config_params_copy, \
							"mismatch in pathman_config_params under the command: %s" % dump_restore_cmd)

					# compare constraints on each partition
					constraints_query = """
						select r.relname, c.conname, c.consrc from
							pg_constraint c join pg_class r on c.conrelid=r.oid
							where relname similar to '(range|hash)_partitioned_\d+'
					"""
					constraints_initial, constraints_copy = {}, {}
					for row in con1.execute(constraints_query):
						constraints_initial[row[0]] = row[1:]
					for row in con2.execute(constraints_query):
						constraints_copy[row[0]] = row[1:]
					self.assertEqual(constraints_initial, constraints_copy, \
							"mismatch in partitions' constraints under the command: %s" % dump_restore_cmd)

				# clear copy database
				node.psql('copy', 'drop schema public cascade')
				node.psql('copy', 'create schema public')
				node.psql('copy', 'drop extension pg_pathman cascade')

		except:
			raise
		finally:
			FNULL.close()

		# Stop instance and finish work
		node.stop()
		node.cleanup()
Esempio n. 37
0
    def test_update_node_plan1(self):
        '''
        Test scan on all partititions when using update node.
        We can't use regression tests here because 9.5 and 9.6 give
        different plans
        '''

        with get_new_node('test_update_node') as node:
            node.init()
            node.append_conf('postgresql.conf', """
                shared_preload_libraries=\'pg_pathman\'
                pg_pathman.override_copy=false
                pg_pathman.enable_partitionrouter=on
            """)
            node.start()

            # Prepare test database
            node.psql('postgres', 'CREATE EXTENSION pg_pathman;')
            node.psql('postgres', 'CREATE SCHEMA test_update_node;')
            node.psql('postgres', 'CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT)')
            node.psql('postgres', 'INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i;')
            node.psql('postgres', "SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10);")

            node.psql('postgres', """
                create or replace function query_plan(query text) returns jsonb as $$
                declare
                        plan jsonb;
                begin
                        execute 'explain (costs off, format json)' || query into plan;
                        return plan;
                end;
                $$ language plpgsql;
            """)

            with node.connect() as con:
                test_query = "UPDATE test_update_node.test_range SET val = 14 WHERE comment=''15''"
                plan = con.execute('SELECT query_plan(\'%s\')' % test_query)[0][0]
                plan = plan[0]["Plan"]

                # PartitionOverseer
                self.assertEqual(plan["Node Type"], "Custom Scan")
                self.assertEqual(plan["Custom Plan Provider"], 'PartitionOverseer')

                # ModifyTable
                plan = plan["Plans"][0]
                self.assertEqual(plan["Node Type"], "ModifyTable")
                self.assertEqual(plan["Operation"], "Update")
                self.assertEqual(plan["Relation Name"], "test_range")
                self.assertEqual(len(plan["Target Tables"]), 11)

            expected_format = '''
                {
                    "Plans": [
                        {
                            "Plans": [
                                {
                                    "Filter": "(comment = '15'::text)",
                                    "Node Type": "Seq Scan",
                                    "Relation Name": "test_range%s",
                                    "Parent Relationship": "child"
                                }
                            ],
                            "Node Type": "Custom Scan",
                            "Parent Relationship": "child",
                            "Custom Plan Provider": "PartitionRouter"
                        }
                    ],
                    "Node Type": "Custom Scan",
                    "Parent Relationship": "Member",
                    "Custom Plan Provider": "PartitionFilter"
                }
            '''

            for i, f in enumerate([''] + list(map(str, range(1, 10)))):
                num = '_' + f if f else ''
                expected = json.loads(expected_format % num)
                p = ordered(plan["Plans"][i], skip_keys=['Parallel Aware', 'Alias'])
                self.assertEqual(p, ordered(expected))

            node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;')
            node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;')
Esempio n. 38
0
	def test_locks(self):
		"""Test that a session trying to create new partitions waits for other
		sessions if they are doing the same"""

		import threading
		import time

		class Flag:
			def __init__(self, value):
				self.flag = value

			def set(self, value):
				self.flag = value

			def get(self):
				return self.flag

		# There is one flag for each thread which shows if thread have done its work
		flags = [Flag(False) for i in range(3)]

		# All threads synchronize though this lock
		lock = threading.Lock()

		# Define thread function
		def add_partition(node, flag, query):
			""" We expect that this query will wait until another session
			commits or rolls back"""
			node.safe_psql('postgres', query)
			with lock:
				flag.set(True)

		# Initialize master server
		node = get_new_node('master')

		try:
			node.init()
			node.append_conf(
				'postgresql.conf',
				'shared_preload_libraries=\'pg_pathman\'\n')
			node.start()
			node.safe_psql(
				'postgres',
				'create extension pg_pathman; ' +
				'create table abc(id serial, t text); ' +
				'insert into abc select generate_series(1, 100000); ' +
				'select create_range_partitions(\'abc\', \'id\', 1, 50000);'
			)

			# Start transaction that will create partition
			con = node.connect()
			con.begin()
			con.execute('select append_range_partition(\'abc\')')

			# Start threads that suppose to add new partitions and wait some
			# time
			query = [
				'select prepend_range_partition(\'abc\')',
				'select append_range_partition(\'abc\')',
				'select add_range_partition(\'abc\', 500000, 550000)',
			]
			threads = []
			for i in range(3):
				thread = threading.Thread(
					target=add_partition,
					args=(node, flags[i], query[i]))
				threads.append(thread)
				thread.start()
			time.sleep(3)

			# This threads should wait until current transaction finished
			with lock:
				for i in range(3):
					self.assertEqual(flags[i].get(), False)

			# Commit transaction. Since then other sessions can create
			# partitions
			con.commit()

			# Now wait until each thread finishes
			for thread in threads:
				thread.join()

			# Check flags, it should be true which means that threads are
			# finished
			with lock:
				for i in range(3):
					self.assertEqual(flags[i].get(), True)

			# Check that all partitions are created
			self.assertEqual(
				node.safe_psql(
					'postgres',
					'select count(*) from pg_inherits where inhparent=\'abc\'::regclass'
				),
				b'6\n'
			)
		except Exception as e:
			self.printlog(node.logs_dir + '/postgresql.log')
			raise e
Esempio n. 39
0
	def test_replication(self):
		"""Tests how pg_pathman works with replication"""
		node = get_new_node('master')
		replica = get_new_node('repl')

		try:
			# initialize master server
			node = self.start_new_pathman_cluster(allows_streaming=True)
			node.backup('my_backup')

			# initialize replica from backup
			replica.init_from_backup(node, 'my_backup', has_streaming=True)
			replica.start()

			# initialize pg_pathman extension and some test data
			self.init_test_data(node)

			# wait until replica catches up
			self.catchup_replica(node, replica)

			# check that results are equal
			self.assertEqual(
				node.psql('postgres', 'explain (costs off) select * from abc'),
				replica.psql('postgres', 'explain (costs off) select * from abc')
			)

			# enable parent and see if it is enabled in replica
			node.psql('postgres', 'select enable_parent(\'abc\'')

			self.catchup_replica(node, replica)
			self.assertEqual(
				node.psql('postgres', 'explain (costs off) select * from abc'),
				replica.psql('postgres', 'explain (costs off) select * from abc')
			)
			self.assertEqual(
				node.psql('postgres', 'select * from abc'),
				replica.psql('postgres', 'select * from abc')
			)
			self.assertEqual(
				node.execute('postgres', 'select count(*) from abc')[0][0],
				300000
			)

			# check that direct UPDATE in pathman_config_params invalidates
			# cache
			node.psql(
				'postgres',
				'update pathman_config_params set enable_parent = false')
			self.catchup_replica(node, replica)
			self.assertEqual(
				node.psql('postgres', 'explain (costs off) select * from abc'),
				replica.psql('postgres', 'explain (costs off) select * from abc')
			)
			self.assertEqual(
				node.psql('postgres', 'select * from abc'),
				replica.psql('postgres', 'select * from abc')
			)
			self.assertEqual(
				node.execute('postgres', 'select count(*) from abc')[0][0],
				0
			)
		except Exception as e:
			self.printlog(node.logs_dir + '/postgresql.log')
			self.printlog(replica.logs_dir + '/postgresql.log')
			raise e
Esempio n. 40
0
	def test_foreign_table(self):
		"""Test foreign tables"""

		# Start master server
		master = get_new_node('test')
		master.init()
		master.append_conf(
			'postgresql.conf',
			'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n')
		master.start()
		master.psql('postgres', 'create extension pg_pathman')
		master.psql('postgres', 'create extension postgres_fdw')

		# RANGE partitioning test with FDW:
		#   - create range partitioned table in master
		#   - create foreign server
		#   - create foreign table and insert some data into it
		#   - attach foreign table to partitioned one
		#   - try inserting data into foreign partition via parent
		#   - drop partitions
		master.psql(
			'postgres',
			'''create table abc(id serial, name text);
			select create_range_partitions('abc', 'id', 0, 10, 2)''')

		# Current user name (needed for user mapping)
		username = master.execute('postgres', 'select current_user')[0][0]

		# Start foreign server
		fserv = get_new_node('fserv')
		fserv.init().start()
		fserv.safe_psql('postgres', 'create table ftable(id serial, name text)')
		fserv.safe_psql('postgres', 'insert into ftable values (25, \'foreign\')')

		# Create foreign table and attach it to partitioned table
		master.safe_psql(
			'postgres',
			'''create server fserv
			foreign data wrapper postgres_fdw
			options (dbname 'postgres', host '127.0.0.1', port '{}')'''.format(fserv.port)
		)
		master.safe_psql(
			'postgres',
			'''create user mapping for {0}
			server fserv
			options (user '{0}')'''.format(username)
		)
		master.safe_psql(
			'postgres',
			'''import foreign schema public limit to (ftable)
			from server fserv into public'''
		)
		master.safe_psql(
			'postgres',
			'select attach_range_partition(\'abc\', \'ftable\', 20, 30)')

		# Check that table attached to partitioned table
		self.assertEqual(
			master.safe_psql('postgres', 'select * from ftable'),
			b'25|foreign\n'
		)

		# Check that we can successfully insert new data into foreign partition
		master.safe_psql('postgres', 'insert into abc values (26, \'part\')')
		self.assertEqual(
			master.safe_psql('postgres', 'select * from ftable order by id'),
			b'25|foreign\n26|part\n'
		)

		# Testing drop partitions (including foreign partitions)
		master.safe_psql('postgres', 'select drop_partitions(\'abc\')')

		# HASH partitioning with FDW:
		#   - create hash partitioned table in master
		#   - create foreign table
		#   - replace local partition with foreign one
		#   - insert data
		#   - drop partitions
		master.psql(
			'postgres',
			'''create table hash_test(id serial, name text);
			select create_hash_partitions('hash_test', 'id', 2)''')
		fserv.safe_psql('postgres', 'create table f_hash_test(id serial, name text)')

		master.safe_psql(
			'postgres',
			'''import foreign schema public limit to (f_hash_test)
			from server fserv into public'''
		)
		master.safe_psql(
			'postgres',
			'select replace_hash_partition(\'hash_test_1\', \'f_hash_test\')')
		master.safe_psql('postgres', 'insert into hash_test select generate_series(1,10)')

		self.assertEqual(
			master.safe_psql('postgres', 'select * from hash_test'),
			b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n'
		)
		master.safe_psql('postgres', 'select drop_partitions(\'hash_test\')')
Esempio n. 41
0
    def test_locks(self):
        """
        Test that a session trying to create new partitions
        waits for other sessions if they are doing the same
        """

        class Flag:
            def __init__(self, value):
                self.flag = value

            def set(self, value):
                self.flag = value

            def get(self):
                return self.flag

        # There is one flag for each thread which shows if thread have done its work
        flags = [Flag(False) for i in range(3)]

        # All threads synchronize though this lock
        lock = threading.Lock()

        # Define thread function
        def add_partition(node, flag, query):
            """
            We expect that this query will wait until
            another session commits or rolls back
            """
            node.safe_psql(query)
            with lock:
                flag.set(True)

        # Initialize master server
        with get_new_node() as node:
            node.init()
            node.append_conf("shared_preload_libraries='pg_pathman'")
            node.start()

            node.safe_psql("""
                create extension pg_pathman;
                create table abc(id serial, t text);
                insert into abc select generate_series(1, 100000);
                select create_range_partitions('abc', 'id', 1, 50000);
            """)

            # Start transaction that will create partition
            with node.connect() as con:
                con.begin()
                con.execute("select append_range_partition('abc')")

                # Start threads that suppose to add new partitions and wait some
                # time
                query = (
                    "select prepend_range_partition('abc')",
                    "select append_range_partition('abc')",
                    "select add_range_partition('abc', 500000, 550000)",
                )

                threads = []
                for i in range(3):
                    thread = threading.Thread(
                        target=add_partition, args=(node, flags[i], query[i]))
                    threads.append(thread)
                    thread.start()
                time.sleep(3)

                # These threads should wait until current transaction finished
                with lock:
                    for i in range(3):
                        self.assertEqual(flags[i].get(), False)

                # Commit transaction. Since then other sessions can create
                # partitions
                con.commit()

            # Now wait until each thread finishes
            for thread in threads:
                thread.join()

            # Check flags, it should be true which means that threads are
            # finished
            with lock:
                for i in range(3):
                    self.assertEqual(flags[i].get(), True)

            # Check that all partitions are created
            self.assertEqual(
                node.safe_psql(
                    "select count(*) from pg_inherits where inhparent='abc'::regclass"),
                b'6\n')
Esempio n. 42
0
    def test_tablespace(self):
        """ Check tablespace support """

        def check_tablespace(node, tablename, tablespace):
            res = node.execute("select get_tablespace('{}')".format(tablename))
            if len(res) == 0:
                return False

            return res[0][0] == tablespace

        with get_new_node() as node:
            node.init()
            node.append_conf("shared_preload_libraries='pg_pathman'")
            node.start()
            node.psql('create extension pg_pathman')

            # create tablespace
            path = os.path.join(node.data_dir, 'test_space_location')
            os.mkdir(path)
            node.psql("create tablespace test_space location '{}'".format(path))

            # create table in this tablespace
            node.psql('create table abc(a serial, b int) tablespace test_space')

            # create three partitions. Excpect that they will be created in the
            # same tablespace as the parent table
            node.psql("select create_range_partitions('abc', 'a', 1, 10, 3)")
            self.assertTrue(check_tablespace(node, 'abc', 'test_space'))

            # check tablespace for appended partition
            node.psql("select append_range_partition('abc', 'abc_appended')")
            self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space'))

            # check tablespace for prepended partition
            node.psql("select prepend_range_partition('abc', 'abc_prepended')")
            self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space'))

            # check tablespace for prepended partition
            node.psql("select add_range_partition('abc', 41, 51, 'abc_added')")
            self.assertTrue(check_tablespace(node, 'abc_added', 'test_space'))

            # check tablespace for split
            node.psql("select split_range_partition('abc_added', 45, 'abc_splitted')")
            self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space'))

            # now let's specify tablespace explicitly
            node.psql(
                "select append_range_partition('abc', 'abc_appended_2', 'pg_default')"
            )
            node.psql(
                "select prepend_range_partition('abc', 'abc_prepended_2', 'pg_default')"
            )
            node.psql(
                "select add_range_partition('abc', 61, 71, 'abc_added_2', 'pg_default')"
            )
            node.psql(
                "select split_range_partition('abc_added_2', 65, 'abc_splitted_2', 'pg_default')"
            )

            # yapf: disable
            self.assertTrue(check_tablespace(node, 'abc_appended_2',  'pg_default'))
            self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default'))
            self.assertTrue(check_tablespace(node, 'abc_added_2',     'pg_default'))
            self.assertTrue(check_tablespace(node, 'abc_splitted_2',  'pg_default'))
Esempio n. 43
0
	def test_tablespace(self):
		"""Check tablespace support"""

		def check_tablespace(node, tablename, tablespace):
			res = node.execute(
				'postgres',
				'select get_tablespace(\'{}\')'.format(tablename))
			if len(res) == 0:
				return False

			return res[0][0] == tablespace

		node = get_new_node('master')
		node.init()
		node.append_conf(
			'postgresql.conf',
			'shared_preload_libraries=\'pg_pathman\'\n')
		node.start()
		node.psql('postgres', 'create extension pg_pathman')

		# create tablespace
		path = os.path.join(node.data_dir, 'test_space_location')
		os.mkdir(path)
		node.psql(
			'postgres',
			'create tablespace test_space location \'{}\''.format(path))

		# create table in this tablespace
		node.psql(
			'postgres',
			'create table abc(a serial, b int) tablespace test_space')

		# create three partitions. Excpect that they will be created in the
		# same tablespace as the parent table
		node.psql(
			'postgres',
			'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)')
		self.assertTrue(check_tablespace(node, 'abc', 'test_space'))

		# check tablespace for appended partition
		node.psql(
			'postgres',
			'select append_range_partition(\'abc\', \'abc_appended\')')
		self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space'))

		# check tablespace for prepended partition
		node.psql(
			'postgres',
			'select prepend_range_partition(\'abc\', \'abc_prepended\')')
		self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space'))

		# check tablespace for prepended partition
		node.psql(
			'postgres',
			'select add_range_partition(\'abc\', 41, 51, \'abc_added\')')
		self.assertTrue(check_tablespace(node, 'abc_added', 'test_space'))

		# check tablespace for split
		node.psql(
			'postgres',
			'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')')
		self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space'))

		# now let's specify tablespace explicitly
		node.psql(
			'postgres',
			'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')')
		node.psql(
			'postgres',
			'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')')
		node.psql(
			'postgres',
			'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')')
		node.psql(
			'postgres',
			'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')')
		self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default'))
		self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default'))
		self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default'))
		self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default'))
Esempio n. 44
0
	def test_parallel_nodes(self):
		"""Test parallel queries under partitions"""

		import json

		# Init and start postgres instance with preload pg_pathman module
		node = get_new_node('test')
		node.init()
		node.append_conf(
			'postgresql.conf',
			'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n')
		node.start()

		# Check version of postgres server
		# If version < 9.6 skip all tests for parallel queries
		if version < 90600:
			return

		# Prepare test database
		node.psql('postgres', 'create extension pg_pathman')
		node.psql('postgres', 'create table range_partitioned as select generate_series(1, 1e4::integer) i')
		node.psql('postgres', 'alter table range_partitioned alter column i set not null')
		node.psql('postgres', 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 1e3::integer)')
		node.psql('postgres', 'vacuum analyze range_partitioned')

		node.psql('postgres', 'create table hash_partitioned as select generate_series(1, 1e4::integer) i')
		node.psql('postgres', 'alter table hash_partitioned alter column i set not null')
		node.psql('postgres', 'select create_hash_partitions(\'hash_partitioned\', \'i\', 10)')
		node.psql('postgres', 'vacuum analyze hash_partitioned')

		node.psql('postgres', """
			create or replace function query_plan(query text) returns jsonb as $$
			declare
					plan jsonb;
			begin
					execute 'explain (costs off, format json)' || query into plan;
					return plan;
			end;
			$$ language plpgsql;
		""")

		# Test parallel select
		with node.connect() as con:
			con.execute('set max_parallel_workers_per_gather = 2')
			if version >= 100000:
				con.execute('set min_parallel_table_scan_size = 0')
			else:
				con.execute('set min_parallel_relation_size = 0')
			con.execute('set parallel_setup_cost = 0')
			con.execute('set parallel_tuple_cost = 0')

			# Check parallel aggregate plan
			test_query = 'select count(*) from range_partitioned where i < 1500'
			plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0]
			expected = json.loads("""
			[
			  {
				"Plan": {
				  "Node Type": "Aggregate",
				  "Strategy": "Plain",
				  "Partial Mode": "Finalize",
				  "Parallel Aware": false,
				  "Plans": [
					{
					  "Node Type": "Gather",
					  "Parent Relationship": "Outer",
					  "Parallel Aware": false,
					  "Workers Planned": 2,
					  "Single Copy": false,
					  "Plans": [
						{
						  "Node Type": "Aggregate",
						  "Strategy": "Plain",
						  "Partial Mode": "Partial",
						  "Parent Relationship": "Outer",
						  "Parallel Aware": false,
						  "Plans": [
							{
							  "Node Type": "Append",
							  "Parent Relationship": "Outer",
							  "Parallel Aware": false,
							  "Plans": [
								{
								  "Node Type": "Seq Scan",
								  "Parent Relationship": "Member",
								  "Parallel Aware": true,
								  "Relation Name": "range_partitioned_2",
								  "Alias": "range_partitioned_2",
								  "Filter": "(i < 1500)"
								},
								{
								  "Node Type": "Seq Scan",
								  "Parent Relationship": "Member",
								  "Parallel Aware": true,
								  "Relation Name": "range_partitioned_1",
								  "Alias": "range_partitioned_1"
								}
							  ]
							}
						  ]
						}
					  ]
					}
				  ]
				}
			  }
			]
			""")
			self.assertEqual(ordered(plan), ordered(expected))

			# Check count of returned tuples
			count = con.execute('select count(*) from range_partitioned where i < 1500')[0][0]
			self.assertEqual(count, 1499)

			# Check simple parallel seq scan plan with limit
			test_query = 'select * from range_partitioned where i < 1500 limit 5'
			plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0]
			expected = json.loads("""
			[
			  {
				"Plan": {
				  "Node Type": "Limit",
				  "Parallel Aware": false,
				  "Plans": [
					{
					  "Node Type": "Gather",
					  "Parent Relationship": "Outer",
					  "Parallel Aware": false,
					  "Workers Planned": 2,
					  "Single Copy": false,
					  "Plans": [
						{
						  "Node Type": "Append",
						  "Parent Relationship": "Outer",
						  "Parallel Aware": false,
						  "Plans": [
							{
							  "Node Type": "Seq Scan",
							  "Parent Relationship": "Member",
							  "Parallel Aware": true,
							  "Relation Name": "range_partitioned_2",
							  "Alias": "range_partitioned_2",
							  "Filter": "(i < 1500)"
							},
							{
							  "Node Type": "Seq Scan",
							  "Parent Relationship": "Member",
							  "Parallel Aware": true,
							  "Relation Name": "range_partitioned_1",
							  "Alias": "range_partitioned_1"
							}
						  ]
						}
					  ]
					}
				  ]
				}
			  }
			]
			""")
			self.assertEqual(ordered(plan), ordered(expected))

			# Check tuples returned by query above
			res_tuples = con.execute('select * from range_partitioned where i < 1500 limit 5')
			res_tuples = sorted(map(lambda x: x[0], res_tuples))
			expected = [1, 2, 3, 4, 5]
			self.assertEqual(res_tuples, expected)

			# Check the case when none partition is selected in result plan
			test_query = 'select * from range_partitioned where i < 1'
			plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0]
			expected = json.loads("""
			[
			   {
				 "Plan": {
				   "Node Type": "Result",
				   "Parallel Aware": false,
				   "One-Time Filter": "false"
				 }
			   }
			]
			""")
			self.assertEqual(ordered(plan), ordered(expected))

		# Remove all objects for testing
		node.psql('postgres', 'drop table range_partitioned cascade')
		node.psql('postgres', 'drop table hash_partitioned cascade')
		node.psql('postgres', 'drop extension pg_pathman cascade')

		# Stop instance and finish work
		node.stop()
		node.cleanup()