def test_ao_read_check_subtransaction(self): sql_file = os.path.join(self.sql_dir, 'sub_transaction.sql') ans_file = os.path.join(self.ans_dir, 'sub_transaction.ans') out_file = os.path.join(self.output_dir, 'sub_transaction.out') PSQL.run_sql_file(sql_file=sql_file, out_file=out_file) if not Gpdiff.are_files_equal(out_file, ans_file): raise Exception('Subtransaction tests failed !')
def test_outof_shmm_exit_slots(self): """ The issue of MPP-19973 is that a shmem exit callback to reset a temporary namespace is not removed when the temporary namespace is reset. In situations, where a temporary namespace is multiple times reset because of an exception in a subtransaction, the callbacks use up all shmem_exit slots. """ sql_setup_file = local_path('mpp19973_setup.sql') PSQL.run_sql_file(sql_file=sql_setup_file) # Test case setup verification self.assertTrue(table_exists("foo")) self.assertTrue(function_exists("testfn")) sql_file = local_path('mpp19973.sql') out_file = local_path('mpp19973.out') PSQL.run_sql_file(sql_file=sql_file, out_file=out_file, output_to_file=True) # There will be different error messages in the output, but # we should not run out of shmem exit slots. self.assertNotRegexpMatches( open(out_file).read(), "out of on_shmem_exit slots", "Database should not run out of shmem_exit slots")
def test_vacuum_appendonly(self): out_file = os.path.join(self.output_dir, 'vacuum_ao_co.out') ans_file = os.path.join(self.ans_dir, 'vacuum_ao_co.ans') sql_file = os.path.join(self.sql_dir, 'vacuum_ao_co.sql') PSQL.run_sql_file(sql_file, out_file=out_file) if not Gpdiff.are_files_equal(out_file, ans_file): raise Exception('Vacuum table failed for append only tables !')
def test_06_deletetable_visimap_for_uao_tables(self): tinctest.logger.info("-------------------------------") tinctest.logger.info("test_06 Verify that the visimap updates with delete row in uao table test run") tinctest.logger.info("-------------------------------\n") out_file = os.path.join(self.outpath, "deletetablevisimapinfo_06.out") sql_file = os.path.join(self.sqlpath, "deletetablevisimapinfo_06.sql") ans_file = os.path.join(self.anspath, "deletetablevisimapinfo_06.ans") sql_cmd1 = "drop table if exists uao_visimap_test06 ;create table uao_visimap_test06 (i int, j varchar(20), k int ) with (appendonly=true) DISTRIBUTED BY (i);\n" sql_out = PSQL.run_sql_command(sql_cmd=sql_cmd1) self.assertIsNotNone(re.search("CREATE TABLE", sql_out)) sql_cmd2 = "\\pset tuples_only\n\\pset footer off\nSELECT relfilenode FROM pg_class WHERE relname='uao_visimap_test06';\n" with open(sql_file, "w") as f: f.write(sql_cmd2) sql_out = PSQL.run_sql_file(sql_file=sql_file, out_file=out_file, flags="-q") with open(out_file, "r") as f: relid = f.read() aovisimap_cmd = "select count(*) from gp_dist_random('pg_aoseg.pg_aovisimap_%s');\n" % relid.strip() sql_cmd3 = ( "select * from uao_visimap_test06;\n" + aovisimap_cmd + "insert into uao_visimap_test06 select i,'aa'||i,i+10 from generate_series(1,5) as i;\ndelete from uao_visimap_test06 where i=3;\nselect * from uao_visimap_test06;\n" + aovisimap_cmd ) with open(sql_file, "w") as f: f.write(sql_cmd3) sql_out = PSQL.run_sql_file(sql_file=sql_file, out_file=out_file, flags="-q") assert Gpdiff.are_files_equal(out_file, ans_file)
def _generate_explain_analyze_output(self): """ execute explain analyze output for a given query """ ea_sql_file = os.path.join(self.get_out_dir(), os.path.basename(self.sql_file).replace('.sql','_explain_analyze.sql')) with open(ea_sql_file, 'w') as o: with open(self.sql_file, 'r') as f: explain_write = False for line in f: if not line.startswith('--') and not explain_write: #keep all the GUCs o.write('-- start_ignore\n') for guc_string in self.gucs: o.write("SET %s;" %guc_string) o.write(line) for orca_guc_string in self.orcagucs: o.write("%s;\n"%orca_guc_string) # Add gucs to print optimization time to log o.write("SET optimizer_print_optimization_stats=on;\n") o.write("SET client_min_messages='log';\n") o.write('-- end_ignore\n') o.write('explain analyze %s' %line) explain_write = True else: o.write(line); ea_out_file = ea_sql_file.replace('.sql','.out') PSQL.run_sql_file(ea_sql_file, dbname = self.db_name, out_file = ea_out_file) with open(ea_out_file, 'r') as f: self._ea_output = f.read()
def do_test(self, timeout=0, sqlfile=None, host=None, port=None, username=None, password=None, flags='-a', usetemplate=False): """ @summary: Run a PostGIS test case @param timeout: Number of seconds to run sql file before timing out @param sqlfile: The path to sql file (relative to TEST.py directory) @param host: The GPDB master host name to use to connect to database @param port: The GPDB port used to make connections to the database @param username: The database username to use to connect to the database @param password: The password for the database user used to connect to database """ if sqlfile is None: testcase = inspect.stack()[1][3].split('test_')[1] #file = mkpath(testcase +".sql") file = local_path(testcase +".sql") else: #file = mkpath(sqlfile) file = local_path(sqlfile) # run psql on file, and check result #psql.runfile(file,timeout=timeout,host=host,port=port,username=username,password=password,flag=flags) #self.checkResult(ifile=file, optionalFlags=" -B") out_file = local_path(testcase + ".out") ans_file = local_path(testcase +".ans") PSQL.run_sql_file(sql_file = file, out_file = out_file) self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
def do_test(self, timeout=0, sqlfile=None, host=None, port=None, username=None, password=None, flags='-a', usetemplate=False): """ @summary: Run a PostGIS test case @param timeout: Number of seconds to run sql file before timing out @param sqlfile: The path to sql file (relative to TEST.py directory) @param host: The GPDB master host name to use to connect to database @param port: The GPDB port used to make connections to the database @param username: The database username to use to connect to the database @param password: The password for the database user used to connect to database """ if sqlfile is None: testcase = inspect.stack()[1][3].split('test_')[1] #file = mkpath(testcase +".sql") file = local_path(testcase + ".sql") else: #file = mkpath(sqlfile) file = local_path(sqlfile) # run psql on file, and check result #psql.runfile(file,timeout=timeout,host=host,port=port,username=username,password=password,flag=flags) #self.checkResult(ifile=file, optionalFlags=" -B") out_file = local_path(testcase + ".out") ans_file = local_path(testcase + ".ans") PSQL.run_sql_file(sql_file=file, out_file=out_file) self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
def test_ao_co_diagnostics(self): base_dir = os.path.dirname(sys.modules[self.__class__.__module__].__file__) setup_file = os.path.join( base_dir, "gptoolkit_sql", "gptoolkit_setup.sql"); PSQL.run_sql_file(setup_file) oid = self.get_oid('foo'); oidcs = self.get_oid('foocs'); self.assertTrue(self.has_rows('SELECT * FROM gp_toolkit.__gp_aoseg_history(%s)' % oid)) self.assertTrue(self.has_rows('SELECT * FROM gp_toolkit.__gp_aocsseg(%s)' % oidcs)) self.assertTrue(self.has_rows("SELECT * FROM gp_toolkit.__gp_aocsseg_name('foocs')")) self.assertTrue(self.has_rows('SELECT * FROM gp_toolkit.__gp_aocsseg_history(%s)' % oidcs)) self.assertTrue(self.has_rows('SELECT * FROM gp_toolkit.__gp_aoseg_history(%s)' % oid)) self.assertTrue(self.has_zero_rows('SELECT * FROM gp_toolkit.__gp_aovisimap(%s)' % oid)) self.assertTrue(self.has_zero_rows("SELECT * FROM gp_toolkit.__gp_aovisimap_name('foo')")) self.assertTrue(self.has_rows('SELECT * FROM gp_toolkit.__gp_aovisimap_hidden_info(%s)' % oid)) self.assertTrue(self.has_rows("SELECT * FROM gp_toolkit.__gp_aovisimap_hidden_info_name('foo')")) self.assertTrue(self.has_zero_rows('SELECT * FROM gp_toolkit.__gp_aovisimap_entry(%s)' % oid)) self.assertTrue(self.has_zero_rows("SELECT * FROM gp_toolkit.__gp_aovisimap_entry_name('foo')")) self.assertTrue(self.has_rows("SELECT * FROM gp_toolkit.__gp_aoseg_name('foo')")) self.assertTrue(self.has_rows('SELECT * FROM gp_toolkit.__gp_aoseg_history(%s)' % oid, True)) self.assertTrue(self.has_rows('SELECT * FROM gp_toolkit.__gp_aocsseg(%s)' % oidcs, True)) self.assertTrue(self.has_rows("SELECT * FROM gp_toolkit.__gp_aocsseg_name('foocs')", True)) self.assertTrue(self.has_rows('SELECT * FROM gp_toolkit.__gp_aocsseg_history(%s)' % oidcs, True)) self.assertTrue(self.has_rows('SELECT * FROM gp_toolkit.__gp_aoseg_history(%s)' % oid, True)) self.assertTrue(self.has_rows('SELECT * FROM gp_toolkit.__gp_aovisimap(%s)' % oid, True)) self.assertTrue(self.has_rows("SELECT * FROM gp_toolkit.__gp_aovisimap_name('foo')", True)) self.assertTrue(self.has_rows('SELECT * FROM gp_toolkit.__gp_aovisimap_hidden_info(%s)' % oid, True)) self.assertTrue(self.has_rows("SELECT * FROM gp_toolkit.__gp_aovisimap_hidden_info_name('foo')", True)) self.assertTrue(self.has_rows('SELECT * FROM gp_toolkit.__gp_aovisimap_entry(%s)' % oid, True)) self.assertTrue(self.has_rows("SELECT * FROM gp_toolkit.__gp_aovisimap_entry_name('foo')", True)) self.assertTrue(self.has_rows("SELECT * FROM gp_toolkit.__gp_aoseg_name('foo')", True))
def doTest(self, sql_filename): '''Run the file, compare oids in out file ''' sql_file = local_path(sql_filename) out_file = local_path(sql_filename.split('.sql')[0] + '.out') PSQL.run_sql_file(sql_file = sql_file, out_file = out_file) isOk = self.compare_oids(out_file) self.assertTrue(isOk)
def doQuery(self, sqlfile, default=''): sql_file = local_path(sqlfile) filename_prefix = sqlfile.split('.sql')[0] out_file = local_path(filename_prefix + '.out') ans_file = local_path(filename_prefix + '.ans') PSQL.run_sql_file(sql_file = sql_file, out_file = out_file) self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
def do_test_fixture(self, fixture): """ @summary: Runs a setup or teardown routine @param fixture: Set to either 'setup' or 'teardown'. Used to determine sql file suffix. """ testcase1 = inspect.stack()[1][3] testcase = self.id().split(".")[2] init_file = local_path('init_file') init_file_list = [] init_file_list.append(init_file) if fixture == 'setup': sqlfile = local_path(testcase + "_setup.sql") outfile = local_path(testcase + "_setup.out") ansfile = local_path(testcase + "_setup.ans") elif fixture == 'teardown': sqlfile = local_path(testcase + "_teardown.sql") outfile = local_path(testcase + "_teardown.out") ansfile = local_path(testcase + "_teardown.ans") else: raise Exception("do_test_fixture(): Invalid value for fixture. Acceptable values are 'setup' or 'teardown'") # check if setup sql file exists if os.path.isfile(sqlfile): # if exists, run setup sql, and validate result PSQL.run_sql_file(sql_file = sqlfile, out_file = outfile) Gpdiff.are_files_equal(outfile, ansfile, match_sub=init_file_list) else: pass
def setUpClass(cls): """ @description: Create the databases and roles to be used in the test """ PSQL.run_sql_file(local_path('setup.sql'), dbname='postgres') dsp = DspClass() dsp.add_user()
def run_sql_file(self, sql_file, out_file=None, out_dir=None, optimizer=None): """ Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql against the test case databse (self.db_name) and verifies the output with the ans file. If an 'init_file' exists in the same location as the sql_file, this will be used while doing gpdiff. """ result = True self.test_artifacts.append(sql_file) if not out_file: out_file = os.path.join( self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out')) self.test_artifacts.append(out_file) tinctest.logger.info('running the sql testcase file:') tinctest.logger.info(sql_file) if (sql_file.find('hybrid_part_tbl_drop_col') >= 0): default_db = getpass.getuser() dbase = os.getenv('PGDATABASE', default_db) datasetobj = DataSetDatabase(database_name=dbase) tinctest.logger.info('--running dataset reload') datasetobj.reload_dataset() PSQL.run_sql_file(sql_file, dbname=self.db_name, out_file=out_file) return out_file
def do_test(self, timeout=0, sqlfile=None, host=None, port=None, username=None, password=None, flags='-a', ans_version=False): """ @summary: Run a test case @param timeout: Number of seconds to run sql file before timing out @param sqlfile: The path to sql file (relative to TEST.py directory) @param host: The GPDB master host name to use to connect to database @param port: The GPDB port used to make connections to the database @param username: The database username to use to connect to the database @param password: The password for the database user used to connect to database """ (gpdb_version, build) = self.gpdb.GetGpdbVersion() if sqlfile is None: testcase = inspect.stack()[1][3] filename = testcase.split('test_')[1] sql_file = local_path(filename +".sql") out_file = local_path(filename + ".out") ans_file = local_path(filename + ".ans") else: sql_file = local_path(sqlfile) out_file = local_path(sqlfile.split('.')[0] + '.out') ans_file = local_path(sqlfile.split('.')[0] + '.ans') if ans_version: (gpdb_version, _) = self.gpdb.GetGpdbVersion() if gpdb_version.startswith('4.3'): ans_file = ans_file+'.4.3' init_file = local_path('init_file') init_file_list = [] init_file_list.append(init_file) # run psql on file, and check result PSQL.run_sql_file(sql_file=sql_file, out_file=out_file, timeout=timeout, host=host, port=port, username=username, password=password,flags=flags) self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file, match_sub=init_file_list))
def setUpClass(cls): super(EnhancedTableFunctionTest, cls).setUpClass() tinctest.logger.info("*** Running the pre-requisite sql files drop.sql and setup.sql") PSQL.run_sql_file(local_path('sqls/setup/drop.sql')) PSQL.run_sql_file(local_path('sqls/setup/create.sql')) tinctest.logger.info("*** Starting the Enhaced table test")
def test_gprecoverseg_rebalance(self): self.gprec.wait_till_insync_transition() if (self.failover('primary')): PSQL.run_sql_file(local_path('mirror_failover_trigger.sql')) self.gprec.incremental() if (self.gprec.wait_till_insync_transition()): self.assertTrue(self.gprec.rebalance())
def test_06_deletetable_visimap_for_uao_tables(self): tinctest.logger.info("-------------------------------") tinctest.logger.info( 'test_06 Verify that the visimap updates with delete row in uao table test run' ) tinctest.logger.info("-------------------------------\n") out_file = os.path.join(self.outpath, 'deletetablevisimapinfo_06.out') sql_file = os.path.join(self.sqlpath, 'deletetablevisimapinfo_06.sql') ans_file = os.path.join(self.anspath, 'deletetablevisimapinfo_06.ans') sql_cmd1 = "drop table if exists uao_visimap_test06 ;create table uao_visimap_test06 (i int, j varchar(20), k int ) with (appendonly=true) DISTRIBUTED BY (i);\n" sql_out = PSQL.run_sql_command(sql_cmd=sql_cmd1) self.assertIsNotNone(re.search('CREATE TABLE', sql_out)) sql_cmd2 = "\\pset tuples_only\n\\pset footer off\nSELECT relfilenode FROM pg_class WHERE relname='uao_visimap_test06';\n" with open(sql_file, 'w') as f: f.write(sql_cmd2) sql_out = PSQL.run_sql_file(sql_file=sql_file, out_file=out_file, flags='-q') with open(out_file, 'r') as f: relid = f.read() aovisimap_cmd = "select * from gp_dist_random('pg_aoseg.pg_aovisimap_%s');\n" % relid.strip( ) sql_cmd3 = "select * from uao_visimap_test06;\n" + aovisimap_cmd + "insert into uao_visimap_test06 select i,'aa'||i,i+10 from generate_series(1,5) as i;\ndelete from uao_visimap_test06 where i=3;\nselect * from uao_visimap_test06;\n" + aovisimap_cmd with open(sql_file, 'w') as f: f.write(sql_cmd3) sql_out = PSQL.run_sql_file(sql_file=sql_file, out_file=out_file, flags='-q') assert Gpdiff.are_files_equal(out_file, ans_file)
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer=None): """ Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql against the test case databse (self.db_name) and verifies the output with the ans file. If an 'init_file' exists in the same location as the sql_file, this will be used while doing gpdiff. """ result = True self.test_artifacts.append(sql_file) if not out_file: out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql','.out')) self.test_artifacts.append(out_file) tinctest.logger.info('running the sql testcase file:') tinctest.logger.info(sql_file) if (sql_file.find('hybrid_part_tbl_drop_col')>=0): default_db = getpass.getuser() dbase = os.getenv('PGDATABASE',default_db) datasetobj = DataSetDatabase(database_name = dbase) tinctest.logger.info('--running dataset reload') datasetobj.reload_dataset() PSQL.run_sql_file(sql_file, dbname = self.db_name, out_file = out_file) return out_file
def test_01(self): "SPI: plpgsql" sql_file = local_path('query01.sql') out_file = local_path('query01.out') ans_file = local_path('query01.ans') PSQL.run_sql_file(sql_file=sql_file, out_file=out_file) self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
def test_run_sql_file_wth_username(self): sql_file = os.path.join(os.path.dirname(inspect.getfile(self.__class__)),'test.sql') username = getpass.getuser() self.assertTrue(PSQL.run_sql_file(sql_file = sql_file, username = username)) #Invalid username self.assertFalse(PSQL.run_sql_file(sql_file = sql_file, username = '******'))
def _run_and_measure_sql_file(self, sql_file, iteration, ans_file=None): """ Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql against the test case databse (self.db_name) and verifies the output with the ans file. """ result = True self.test_artifacts.append(sql_file) out_file = os.path.join( self.get_out_dir(), os.path.basename(sql_file).replace(".sql", "_iter_%s.out" % iteration)) self.test_artifacts.append(out_file) PSQL.run_sql_file(sql_file, dbname=self.db_name, out_file=out_file) if ans_file is not None: self.test_artifacts.append(ans_file) result = Gpdiff.are_files_equal(out_file, ans_file) if result == False: self.test_artifacts.append(out_file.replace('.out', '.diff')) self.fail('Diff failed between %s and %s' % (out_file, ans_file)) return self._get_runtime(out_file)
def _generate_explain_analyze_output(self): """ execute explain analyze output for a given query """ ea_sql_file = os.path.join( self.get_out_dir(), os.path.basename(self.sql_file).replace('.sql', '_explain_analyze.sql')) with open(ea_sql_file, 'w') as o: with open(self.sql_file, 'r') as f: explain_write = False for line in f: if not line.startswith('--') and not explain_write: #keep all the GUCs o.write('-- start_ignore\n') for guc_string in self.gucs: o.write("SET %s;" % guc_string) o.write(line) for orca_guc_string in self.orcagucs: o.write("%s;\n" % orca_guc_string) # Add gucs to print optimization time to log o.write("SET optimizer_print_optimization_stats=on;\n") o.write("SET client_min_messages='log';\n") o.write('-- end_ignore\n') o.write('explain analyze %s' % line) explain_write = True else: o.write(line) ea_out_file = ea_sql_file.replace('.sql', '.out') PSQL.run_sql_file(ea_sql_file, dbname=self.db_name, out_file=ea_out_file) with open(ea_out_file, 'r') as f: self._ea_output = f.read()
def test_outof_shmm_exit_slots(self): """ The issue of MPP-19973 is that a shmem exit callback to reset a temporary namespace is not removed when the temporary namespace is reset. In situations, where a temporary namespace is multiple times reset because of an exception in a subtransaction, the callbacks use up all shmem_exit slots. """ sql_setup_file = local_path('mpp19973_setup.sql') PSQL.run_sql_file(sql_file=sql_setup_file) # Test case setup verification self.assertTrue(table_exists("foo")) self.assertTrue(function_exists("testfn")) sql_file = local_path('mpp19973.sql') out_file = local_path('mpp19973.out') PSQL.run_sql_file(sql_file=sql_file, out_file=out_file, output_to_file=True) # There will be different error messages in the output, but # we should not run out of shmem exit slots. self.assertNotRegexpMatches(open(out_file).read(), "out of on_shmem_exit slots", "Database should not run out of shmem_exit slots")
def test_43_alter_table_with_oid(self): '''MPP-13870: Alter table Set Without Oids fails in case of inheritance''' sql_file = local_path('alter_table_with_oid.sql') out_file = local_path('alter_table_with_oid.out') ans_file = local_path('alter_table_with_oid.ans') PSQL.run_sql_file(sql_file = sql_file, out_file = out_file) self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
def persistent_Rebuild(self, hostname = None, port = None, type = 'Master'): ''' Rebuild Persistent Object by connecting in Utility mode ''' sql_file = local_path('persistent_Rebuild_%s.sql'%type) now = datetime.datetime.now() timestamp = '%s%s%s%s%s%s%s'%(now.year,now.month,now.day,now.hour,now.minute,now.second,now.microsecond) out_file = sql_file.replace('.sql', timestamp + '.out') PSQL.run_sql_file(sql_file = sql_file, PGOPTIONS = '-c gp_session_role=utility', host = hostname, port = port, out_file = out_file)
def doQuery(self, sqlfile, default=''): sql_file = local_path(sqlfile) filename_prefix = sqlfile.split('.sql')[0] out_file = local_path(filename_prefix + '.out') ans_file = local_path(filename_prefix + '.ans') PSQL.run_sql_file(sql_file=sql_file, out_file=out_file) self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
def test_scalar_consolidation_NEG_paramDiffType(self): """ scalar Consolidation NEG two parameters with different type """ self.runFunctionTest("scalar_consolidation","NEG_paramDiffType") filename = local_path("c_functions/scalar_consolidation/NEG_paramDiffType_cleanup.sql") PSQL.run_sql_file(filename)
def test_with_concurrent_workload(self): """ add new mirrors while concurrent workload in progress, check that mirrors added and current workload won't get affected, in the end, run checkmirrorseg. Note that: adding mirrors while running workload has checkmirrorseg issue with MPP-24311 """ gprecover = GpRecover() self._setup_gpaddmirrors() self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir) sql_setup_file = local_path('sql/ao_heap_table_setup.sql') sql_file = local_path('sql/ao_heap_table.sql') pg_stat_activity = 'SELECT * FROM pg_stat_activity;' PSQL.run_sql_file(sql_setup_file) subprocess.Popen(["psql", "-f", sql_file]) time.sleep(15) subprocess.Popen(["gpaddmirrors", "-ai", self.mirror_config_file, "-d", self.mdd]) time.sleep(15) result = PSQL.run_sql_command(pg_stat_activity, flags='-q -t', dbname='template1') result = result.strip() rows = result.split('\n') self.assertTrue(len(rows) > 1) while len(rows) > 1: result = PSQL.run_sql_command(pg_stat_activity, flags='-q -t', dbname='template1') result = result.strip() rows = result.split('\n') time.sleep(3) gprecover.wait_till_insync_transition() self.verify_config_file_with_gp_config()
def test_with_concurrent_workload(self): """ add new mirrors while concurrent workload in progress, check that mirrors added and current workload won't get affected, in the end, run checkmirrorseg. Note that: adding mirrors while running workload has checkmirrorseg issue with MPP-24311 """ gprecover = GpRecover() self._setup_gpaddmirrors() self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir) sql_setup_file = local_path('sql/ao_heap_table_setup.sql') sql_file = local_path('sql/ao_heap_table.sql') pg_stat_activity = 'SELECT * FROM pg_stat_activity;' PSQL.run_sql_file(sql_setup_file) subprocess.Popen(["psql", "-f", sql_file]) time.sleep(15) subprocess.Popen( ["gpaddmirrors", "-ai", self.mirror_config_file, "-d", self.mdd]) time.sleep(15) result = PSQL.run_sql_command(pg_stat_activity, flags='-q -t', dbname='template1') result = result.strip() rows = result.split('\n') self.assertTrue(len(rows) > 1) while len(rows) > 1: result = PSQL.run_sql_command(pg_stat_activity, flags='-q -t', dbname='template1') result = result.strip() rows = result.split('\n') time.sleep(3) gprecover.wait_till_insync_transition() self.verify_config_file_with_gp_config()
def setUpClass(cls): """ Checking if plperl package installed, otherwise install the package """ super(MDTSQLTestCase, cls).setUpClass() mdt.pre_process_sql() mdt.pre_process_ans() mdt.setup_gpfdist() cmd = 'gpssh --version' res = {'rc': 0, 'stderr': '', 'stdout': ''} run_shell_command(cmd, 'check product version', res) gppkg = Gppkg() product_version = res['stdout'] gppkg.gppkg_install(product_version, 'plperl') setup_user = '******' setup_db = 'create database mdt_db;' setup_sql = local_path('sql/setup/setup.sql') setup_output = local_path('output/setup/setup.out') PSQL.run_sql_command(sql_cmd=setup_user, dbname=os.environ.get('PGDATABASE')) PSQL.run_sql_command(sql_cmd=setup_db, dbname=os.environ.get('PGDATABASE'), username='******') PSQL.run_sql_file(sql_file=setup_sql, out_file=setup_output, dbname='mdt_db', username='******')
def run_explain_indexjoin_on(self, query, explain_file, explain_file_out, planid_stmt, planid): syscmd = "rm -f " + explain_file os.popen(syscmd, 'r') sqlcontent = [ "--start_ignore", "set optimizer=on;", "set optimizer_enable_indexjoin=on;", "set optimizer_enable_bitmapscan=on;", "select disable_xform('CXformInnerJoin2NLJoin');" if not planid_stmt else "", "select disable_xform('CXformInnerJoin2HashJoin');", planid_stmt, "set client_min_messages='log';", "--end_ignore", "EXPLAIN" ] for x in query: sqlcontent.append(x) self._add_tofile(explain_file, sqlcontent, 0) PSQL.run_sql_file(explain_file, dbname = self.db_name, out_file = explain_file_out) if planid != 0: new_file = explain_file_out+str(planid) syscmd_cp = "cp %s %s" % (explain_file_out, new_file) os.popen(syscmd_cp, 'r') if (self._look_for_string(explain_file_out, "Planner produced plan :1") == True): return False, "Unexpected fallback with force indexapply on |" elif (self._look_for_string(explain_file_out, "Index Cond", check_join_cond = True) == False) and self.negativetest == 'False': return False, "Index Scan in join not being used with force indexapply | " + explain_file_out elif (self._look_for_string(explain_file_out, "Index Cond", check_join_cond = True) == True) and self.negativetest == 'True': return False, "Index Scan in join is being used when it shouldn't | " else: return True, None
def test_01(self): "SPI: plpgsql" sql_file = local_path("query01.sql") out_file = local_path("query01.out") ans_file = local_path("query01.ans") PSQL.run_sql_file(sql_file=sql_file, out_file=out_file) self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
def test_gprecoverseg_rebalance(self): self.gprec.wait_till_insync_transition() if(self.failover('primary')): PSQL.run_sql_file(local_path('mirror_failover_trigger.sql')) self.gprec.incremental() if (self.gprec.wait_till_insync_transition()): self.assertTrue(self.gprec.rebalance())
def runFunctionTest(self, functiontype, file): filepath = os.path.join(local_path("c_functions"), functiontype, file) #mapr.replaceTemplate(local_path("%s.sql.in" % (filepath)), localpath) so_loc = local_path('c_functions/functions.so') if not os.path.isfile(so_loc): self.skipTest() input = open(filepath + '.sql.in') output = open(filepath + '.sql', 'w') for s in input.xreadlines(): if string.find(s, '%funclib_path%') >= 0: if string.find(sys.platform, "OSX") == 0: output.write( s.replace( '%funclib_path%', local_path( "c_functions/functions.NOTSUREEXTNAME"))) else: output.write( s.replace('%funclib_path%', local_path("c_functions/functions.so"))) else: output.write(s) output.close() input.close() sqlfile = "%s.sql" % filepath PSQL.run_sql_file(sqlfile) self.doTest("%s.yml" % filepath)
def run_sqls(self,test): ''' @summary : Run the sql @param test: the sql file list ''' tinctest.logger.info("[STLRTest] Running run_sqls") tinctest.logger.info("[STLRTest]Starting new thread to run sql %s"%(test)) PSQL.run_sql_file(local_path(test))
def drop_runaway_udf(dbname=None): tinctest.logger.info( 'Removing Runaway Query Termination testing UDFs by running sql file %s' % local_path('./udfs/uninstall_runaway_test.sql')) PSQL.run_sql_file(sql_file=local_path('./udfs/uninstall_runaway_test.sql'), out_file=local_path('./udfs/uninstall_runaway_test.out'), dbname=dbname)
def setUpClass(cls): super(EnhancedTableFunctionTest, cls).setUpClass() tinctest.logger.info( "*** Running the pre-requisite sql files drop.sql and setup.sql") PSQL.run_sql_file(local_path('sqls/setup/drop.sql')) PSQL.run_sql_file(local_path('sqls/setup/create.sql')) tinctest.logger.info("*** Starting the Enhaced table test")
def create_appendonly_tables(self, row=True): sql_file_name = 'create_ao_table' if row else 'create_co_table' sql_file = os.path.join(self.sql_dir, '%s.sql' % sql_file_name) out_file = os.path.join(self.output_dir, '%s.out' % sql_file_name) ans_file = os.path.join(self.ans_dir, '%s.ans' % sql_file_name) PSQL.run_sql_file(sql_file, out_file=out_file) if not Gpdiff.are_files_equal(out_file, ans_file): raise Exception('Unable to create tables')
def test_gpinitstandby_prompt_for_filespace(self): from mpp.lib.gpfilespace import Gpfilespace gpfile = Gpfilespace() gpfile.create_filespace('fs_walrepl_a') PSQL.run_sql_file(local_path('filespace.sql'), dbname = self.db_name) filespace_loc = self.gp.get_filespace_location() self.create_directory(filespace_loc) self.assertTrue(self.gp.init_with_prompt(filespace_loc))
def clean_files(self): tinctest.logger.info("[STLRTest] Running clean_files") PSQL.run_sql_file(local_path('drop.sql')) PSQL.run_sql_file(local_path('drop_filespace.sql')) tinctest.logger.info("[STLRTest] printing gp segment configuration") (gp_seg_conf) = PSQL.run_sql_command("select * from gp_segment_configuration order by dbid") tinctest.logger.info(gp_seg_conf)
def check_duplicate_entry(self): ans_file = local_path('check_duplicate_entry.ans') out_file=local_path('check_duplicate_entry.out') sql_file=local_path('check_duplicate_entry.sql') PSQL.run_sql_file(sql_file=sql_file, out_file=out_file) assert Gpdiff.are_files_equal(out_file, ans_file)
def test_scalar_consolidation_NEG_paramDiffType(self): """ scalar Consolidation NEG two parameters with different type """ self.runFunctionTest("scalar_consolidation", "NEG_paramDiffType") filename = local_path( "c_functions/scalar_consolidation/NEG_paramDiffType_cleanup.sql") PSQL.run_sql_file(filename)
def test_zzz_functionsetup(self): """ run $GPHOME/share/postgresql/contrib/pgcrypto.sql to create all pgcrypto functions """ # check if sql file exist gphome = os.environ.get('GPHOME') file_loc = os.path.join(gphome, 'share/postgresql/contrib/pgcrypto.sql') if not os.path.exists(file_loc): self.skipTest("file does not exist: $GPHOME/share/postgresql/contrib/pgcrypto.sql") else: PSQL.run_sql_file(file_loc, dbname=os.environ.get('PGDATABASE'))
def test_uao_gpload(self): def create_yaml_file(): database = os.environ.get("PGDATABASE", os.environ["USER"]) user = os.environ.get("PGUSER", os.environ["USER"]) port = os.environ.get("PGPORT", "5432") load_port = os.environ.get("TINC_UAO_LOAD_PORT", "8082") load_dir = os.path.join(os.path.dirname(sys.modules[self.__class__.__module__].__file__), "data") yaml_filename = os.path.join(os.path.dirname(sys.modules[self.__class__.__module__].__file__), "output/gpload.yaml") yaml_file = open(yaml_filename, "w") yaml = """VERSION: 1.0.0.1 DATABASE: %s USER: %s HOST: localhost PORT: %s GPLOAD: INPUT: - SOURCE: LOCAL_HOSTNAME: - localhost PORT: %s FILE: - %s/*.txt - COLUMNS: - id: int - name: text - sponsor: text - FORMAT: text - DELIMITER: ';' - ESCAPE: 'OFF' - ERROR_LIMIT: 25 - LOG_ERRORS: True OUTPUT: - TABLE: customer - MODE: INSERT SQL: """ % (database, user, port, load_port, load_dir) yaml_file.write(yaml) return yaml_filename (setup_file, setup_out_file) = self.get_sql_files("uao_gpload_setup")[0:2] (sql_file, out_file, ans_file) = self.get_sql_files("uao_gpload") yaml_filename = create_yaml_file() PSQL.run_sql_file(setup_file, out_file=setup_out_file) gphome = os.environ["GPHOME"] output_file = os.path.join(os.path.dirname(sys.modules[self.__class__.__module__].__file__), "output/gpload.out") load_process = subprocess.Popen(["%s/bin/gpload" % gphome, "-f", yaml_filename], stderr=subprocess.STDOUT, stdout=open(output_file, "w")) load_process.wait() PSQL.run_sql_file(sql_file, out_file=out_file) result = Gpdiff.are_files_equal(out_file, ans_file) self.assertTrue(result)
def run_sqls(self, test): ''' @summary : Run the sql @param test: the sql file list ''' tinctest.logger.info("[STLRTest] Running run_sqls") tinctest.logger.info("[STLRTest]Starting new thread to run sql %s" % (test)) PSQL.run_sql_file(local_path(test))
def test_PLR0010(self): """Language: PL/R Examples from http://www.joeconway.com/plr/""" self.skipTest("Test not required. Ref: MPP-23940") if self.checkAPPHOMEandLIB("plr","R_HOME"): sql_file = local_path("plr/plr-function.sql") PSQL.run_sql_file(sql_file = sql_file) self.doPLR(1, "plr/plr-test", default='') else: self.skipTest('skipped')
def test_PLR0010(self): """Language: PL/R Examples from http://www.joeconway.com/plr/""" self.skipTest("Test not required. Ref: MPP-23940") if self.checkAPPHOMEandLIB("plr", "R_HOME"): sql_file = local_path("plr/plr-function.sql") PSQL.run_sql_file(sql_file=sql_file) self.doPLR(1, "plr/plr-test", default='') else: self.skipTest('skipped')
def doTest(self, sql_file=None, out_file=None, ans_file=None): PSQL.run_sql_file(sql_file=sql_file, out_file=out_file) init_file_list = [] init_file = local_path('init_file') init_file_list.append(init_file) self.assertTrue( Gpdiff.are_files_equal(out_file, ans_file, match_sub=init_file_list))
def inject_and_resume_fault(self, fault_name, trans_state): self.check_fault_status(fault_name, 'triggered') self.filerep_fault(trans_state) if trans_state == 'failover_to_mirror' : PSQL.run_sql_file(local_path('test_while_ct.sql')) self.resume_faults(fault_name, trans_state) tinctest.logger.info("[STLRTest] printing gp segment configuration") (gp_seg_conf) = PSQL.run_sql_command("select * from gp_segment_configuration order by dbid") tinctest.logger.info(gp_seg_conf)
def run_workload(self, dir, verify = False): tinctest.logger.info("Running workload ...") load_path = local_path(dir) + os.sep for file in os.listdir(load_path): if file.endswith(".sql"): out_file = file.replace(".sql", ".out") PSQL.run_sql_file(sql_file = load_path + file, dbname = self.dbname, port = self.pgport, out_file = load_path + out_file) if verify == True: self.validate_sql_files(load_path)