def do_gpcheckcat(self, dbname=None, alldb=False, online=False, outputFile='checkcat.out', outdir=None): self.dbstate = DbStateClass('run_validation') tinctest.logger.info("[STLRTest] Running do_gpcheckcat") self.dbstate.check_catalog() (gp_seg_conf) = PSQL.run_sql_command("select * from gp_segment_configuration order by dbid") tinctest.logger.info(gp_seg_conf) return True
def __init__(self, methodName): self.pgport = os.environ.get('PGPORT') self.util = Filerepe2e_Util() self.gpconfig = GpConfig() self.config = GPDBConfig() self.gpr = GpRecover(self.config) self.dbstate = DbStateClass('run_validation', self.config) self.gpstart = GpStart() self.gpstop = GpStop() super(FilerepTestCase, self).__init__(methodName)
def __init__(self, methodName): self.filereputil = Filerepe2e_Util() self.config = GPDBConfig() self.gprecover = GpRecover(self.config) self.gpstop = GpStop() self.gpstart = GpStart() self.gpverify = GpdbVerify(config=self.config) self.dbstate = DbStateClass('run_validation', self.config) self.port = os.getenv('PGPORT') super(PgtwoPhaseClass, self).__init__(methodName)
def do_gpcheckcat(self, dbname=None, alldb=False, online=False, outputFile='checkcat.out', outdir=None): self.dbstate = DbStateClass('run_validation') tinctest.logger.info("[STLRTest] Running do_gpcheckcat") self.dbstate.check_catalog() return True
def __init__(self,methodName): self.fileutil = Filerepe2e_Util() self.config = GPDBConfig() self.gprecover = GpRecover(self.config) self.gpstart = GpStart() self.gpstop = GpStop() self.gpfile = Gpfilespace(self.config) self.dbstate = DbStateClass('run_validation', self.config) self.port = os.getenv('PGPORT') self.base = GPDBStorageBaseTestCase() super(SuspendCheckpointCrashRecovery,self).__init__(methodName)
def __init__(self, config=None): if config is not None: self.config = config else: self.config = GPDBConfig() self.filereputil = Filerepe2e_Util() self.gprecover = GpRecover(self.config) self.gpstop = GpStop() self.gpstart = GpStart() self.gpverify = GpdbVerify(config=self.config) self.dbstate = DbStateClass('run_validation', self.config) self.port = os.getenv('PGPORT')
def _validation(self): ''' @summary :gpcheckcat and gpcheckmirrorintegrity ''' ###psql.run_shell_command("CHECKPOINT; CHECKPOINT; CHECKPOINT;CHECKPOINT; CHECKPOINT;") ###sleep(30) # sleep for some time for the segments to be in sync before validation self.dbstate = DbStateClass('run_validation') tinctest.logger.info("[STLRTest] Running _validation") outfile = local_path("subt_checkcat.out") self.dbstate.check_catalog(outputFile=outfile) self.dbstate.check_mirrorintegrity()
def validate_test_CatalogCheck(self, action,storage): file_name =action+'_'+storage out_file = self.base_dir+ "/sql/"+file_name+'.out' ans_file = self.base_dir+ "/expected/"+file_name+'.ans' tinctest.logger.info( 'out-file == %s \n' % out_file) tinctest.logger.info( 'ans-file == %s \n' % ans_file) # Validate Ans file self.validate_sql(ans_file,out_file) if storage == 'multisegfiles': ''' check if multi_segfile_tab file has multiple segfiles per column ''' tablename='multi_segfile_tab' relid = self.get_relid(file_name=tablename ) utilitymodeinfo=self.get_utilitymode_conn_info( relid=relid) u_port=utilitymodeinfo[0] u_host=utilitymodeinfo[1] assert(1 < int(self.get_segment_cnt(relid=relid,host=u_host,port= u_port))) # Check Correctness of the catalog self.dbstate = DbStateClass('run_validation') outfile = local_path("gpcheckcat_"+datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S')+".out") self.dbstate.check_catalog(outputFile=outfile)
def test_mpp23395(self): """ @description Test MPP-20964, uncleaned lock table by pg_terminate_backend @product_version gpdb: [4.3.3.1-],[4.2.8.5-4.2.99.99] """ self.util = Filerepe2e_Util() (ok, out) = self.util.inject_fault(f='dtm_broadcast_commit_prepared', y='reset', seg_id=1) if not ok: raise Exception( "Failed to reset the fault dtm_broadcast_commit_prepared") # setup PSQL.run_sql_command(""" DROP TABLE IF EXISTS mpp23395; """) # Scenario 1: FAULT during Create Table on master sql = ''' CREATE TABLE mpp23395(a int); ''' self.run_sequence(sql, 'dtm_broadcast_commit_prepared', 'fatal', 1) # Scenario 2: FAULT during Drop Table on master, COMMIT case sql = ''' DROP TABLE mpp23395; ''' self.run_sequence(sql, 'dtm_broadcast_commit_prepared', 'fatal', 1) (ok, out) = self.util.inject_fault(f='dtm_broadcast_commit_prepared', y='reset', seg_id=1) if not ok: raise Exception( "Failed to reset the fault dtm_broadcast_commit_prepared") # Scenario 3: FAULT during Create Table on segment, COMMIT case sql = ''' SET debug_dtm_action_target = "protocol"; SET debug_dtm_action_protocol = "commit_prepared"; SET debug_dtm_action_segment = 0; SET debug_dtm_action = "fail_begin_command"; CREATE TABLE mpp23395(a int); ''' self.run_sequence(sql, 'twophase_transaction_commit_prepared', 'error', 2) # Scenario 4: FAULT during Drop Table on segment, COMMIT case sql = ''' SET debug_dtm_action_target = "protocol"; SET debug_dtm_action_protocol = "commit_prepared"; SET debug_dtm_action_segment = 0; SET debug_dtm_action = "fail_begin_command"; DROP TABLE mpp23395; ''' self.run_sequence(sql, 'twophase_transaction_commit_prepared', 'error', 2) # Scenario 5: FAULT during Create Table on master, ABORT case (ok, out) = self.util.inject_fault( f='transaction_abort_after_distributed_prepared', y='error', seg_id=1) if not ok: raise Exception( "Failed to set the error fault for transaction_abort_after_distributed_prepared" ) sql = ''' CREATE TABLE mpp23395(a int); ''' self.run_sequence(sql, 'dtm_broadcast_abort_prepared', 'fatal', 1) (ok, out) = self.util.inject_fault( f='transaction_abort_after_distributed_prepared', y='reset', seg_id=1) if not ok: raise Exception( "Failed to reset the fault transaction_abort_after_distributed_prepared" ) PSQL.run_sql_command(""" CREATE TABLE mpp23395(a int); """) # Scenario 6: FAULT during Drop Table on master, ABORT case (ok, out) = self.util.inject_fault( f='transaction_abort_after_distributed_prepared', y='error', seg_id=1) if not ok: raise Exception( "Failed to set the error fault for transaction_abort_after_distributed_prepared" ) sql = ''' DROP TABLE mpp23395; ''' self.run_sequence(sql, 'dtm_broadcast_abort_prepared', 'fatal', 1) (ok, out) = self.util.inject_fault( f='transaction_abort_after_distributed_prepared', y='reset', seg_id=1) if not ok: raise Exception( "Failed to reset the fault transaction_abort_after_distributed_prepared" ) PSQL.run_sql_command(""" DROP TABLE mpp23395; """) dbstate = DbStateClass('run_validation') dbstate.check_catalog()
def check_mirror_seg(self): dbstate = DbStateClass('run_validation') dbstate.check_mirrorintegrity(master=True)
def test_verify_catalog(self): dbstate = DbStateClass('run_validation') dbstate.check_catalog(alldb=False, dbname=catalog_consistency.dbname)
def test_gpcheckcat(self): tinctest.logger.info( 'Run Checkcat to verify persistent table consistency') dbstate = DbStateClass('run_validation') dbstate.check_catalog(alldb=False, dbname=Steps.dbname)
def test_mpp23395(self): """ @description Test MPP-20964, uncleaned lock table by pg_terminate_backend @product_version gpdb: [4.3.3.1-],[4.2.8.5-4.2.99.99] """ self.util = Filerepe2e_Util() (ok,out) = self.util.inject_fault(f='dtm_broadcast_commit_prepared', y='reset', seg_id=1); if not ok: raise Exception("Failed to reset the fault dtm_broadcast_commit_prepared") # setup PSQL.run_sql_command(""" DROP TABLE IF EXISTS mpp23395; """) # Scenario 1: FAULT during Create Table on master sql = ''' CREATE TABLE mpp23395(a int); ''' self.run_sequence(sql, 'dtm_broadcast_commit_prepared', 'fatal', 1); # Scenario 2: FAULT during Drop Table on master, COMMIT case sql = ''' DROP TABLE mpp23395; ''' self.run_sequence(sql, 'dtm_broadcast_commit_prepared', 'fatal', 1); (ok,out) = self.util.inject_fault(f='dtm_broadcast_commit_prepared', y='reset', seg_id=1); if not ok: raise Exception("Failed to reset the fault dtm_broadcast_commit_prepared") # Scenario 3: FAULT during Create Table on segment, COMMIT case sql = ''' SET dtx_phase2_retry_count = 1; SET debug_dtm_action_target = "protocol"; SET debug_dtm_action_protocol = "commit_prepared"; SET debug_dtm_action_segment = 0; SET debug_dtm_action = "fail_begin_command"; CREATE TABLE mpp23395(a int); ''' self.run_sequence(sql, 'twophase_transaction_commit_prepared', 'error', 2); # Scenario 4: FAULT during Drop Table on segment, COMMIT case sql = ''' SET dtx_phase2_retry_count = 1; SET debug_dtm_action_target = "protocol"; SET debug_dtm_action_protocol = "commit_prepared"; SET debug_dtm_action_segment = 0; SET debug_dtm_action = "fail_begin_command"; DROP TABLE mpp23395; ''' self.run_sequence(sql, 'twophase_transaction_commit_prepared', 'error', 2); # Scenario 5: FAULT during Create Table on master, ABORT case (ok,out) = self.util.inject_fault(f='transaction_abort_after_distributed_prepared', y='error', seg_id=1); if not ok: raise Exception("Failed to set the error fault for transaction_abort_after_distributed_prepared") sql = ''' CREATE TABLE mpp23395(a int); ''' self.run_sequence(sql, 'dtm_broadcast_abort_prepared', 'fatal', 1); (ok,out) = self.util.inject_fault(f='transaction_abort_after_distributed_prepared', y='reset', seg_id=1); if not ok: raise Exception("Failed to reset the fault transaction_abort_after_distributed_prepared") PSQL.run_sql_command(""" CREATE TABLE mpp23395(a int); """) # Scenario 6: FAULT during Drop Table on master, ABORT case (ok,out) = self.util.inject_fault(f='transaction_abort_after_distributed_prepared', y='error', seg_id=1); if not ok: raise Exception("Failed to set the error fault for transaction_abort_after_distributed_prepared") sql = ''' DROP TABLE mpp23395; ''' self.run_sequence(sql, 'dtm_broadcast_abort_prepared', 'fatal', 1); (ok,out) = self.util.inject_fault(f='transaction_abort_after_distributed_prepared', y='reset', seg_id=1); if not ok: raise Exception("Failed to reset the fault transaction_abort_after_distributed_prepared") PSQL.run_sql_command(""" DROP TABLE mpp23395; """) # Scenario 7: FAULT during Create Table on segment, COMMIT case, succeeds on second retry sql = ''' DROP TABLE IF EXISTS mpp23395; SET debug_dtm_action_target = "protocol"; SET debug_dtm_action_protocol = "commit_prepared"; SET debug_dtm_action_segment = 0; SET debug_dtm_action = "fail_begin_command"; CREATE TABLE mpp23395(a int); ''' self.run_sequence(sql, 'finish_prepared_after_record_commit_prepared', 'error', 2, False); # Scenario 8: QE panics after writing prepare xlog record. This should # cause master to broadcast abort but QEs handle the abort in # DTX_CONTEXT_LOCAL_ONLY context. sql = ''' DROP TABLE IF EXISTS mpp23395; CREATE TABLE mpp23395(a int); INSERT INTO mpp23395 VALUES(1), (2), (3); SET debug_abort_after_segment_prepared = true; DELETE FROM mpp23395; ''' # No prepared transactions should remain lingering PSQL.run_sql_command(sql) self.check_no_dangling_prepared_transaction() dbstate = DbStateClass('run_validation') dbstate.check_catalog()
def check_mirror_seg(self, master=False): tinctest.logger.info("running check mirror") dbstate = DbStateClass('run_validation') dbstate.check_mirrorintegrity(master=master)
def test_gpcheckcat(self): dbstate = DbStateClass('run_validation') dbstate.check_catalog(alldb=False, dbname=Verification.dbname)
def run_validation(self): tinctest.logger.info( 'Veriy the integrity between primary and mirror ...') self.dbstate = DbStateClass('run_validation') self.dbstate.check_mirrorintegrity()