def crash_system_after_checkpoint(self, mode): '''Issue checkpoint followed by gpstop -ai immediately ''' newfault = Fault() if mode == 'InSync': #Sleep 10-15 min ntime = randint(600, 900) tinctest.logger.info('sleep for %s sec...' %ntime) sleep(ntime) else: #Sleep 10-15 min ntime = randint(600, 900) tinctest.logger.info('sleep for %s sec...' %ntime) sleep(ntime) #put system in CT is_ct_mode_enabled = self.do_changetracking() if not is_ct_mode_enabled: self.fail("Change Tracking mode not enabled!!") #Sleep 10-15 min ntime = randint(600, 900) tinctest.logger.info('sleep for %s sec...' %ntime) sleep(ntime) #issue checkpoint newfault.issue_checkpoint() newfault.restart_db()
def test_changetracking_kill_primary_reboot(self): sleep(30) #sleep 30sec, so that there will be some inflight Transactions is_ct_mode_enabled = self.do_changetracking() if not is_ct_mode_enabled: self.fail("Change Tracking mode not enabled!!") sleep(100) #sleep 100sec, so that there will be some inflight Transactions #Reboot so that concurrent sessions will end newfault = Fault() newfault.restart_db()
def test_failover_to_mirror_during_transactions(self): tinctest.logger.debug('Fault Injection Tests - starting the failover to mirror scenario') #Sleep introduced so that the concurrently running generate_sqls gets time #to actually generate some load tinctest.logger.info('Sleep introduced of 120 sec') sleep(120) newfault = Fault() newfault.kill_processes_with_role('p') newfault.restart_db() tinctest.logger.debug('Fault Injection Tests - failover to mirror completed')
def test_partition_table_scenario(self): tinctest.logger.debug('Starting the Partition tables scenario') newfault = Fault() if newfault.issue_checkpoint(): tinctest.logger.info('Checkpoint forced..') else: tinctest.logger.info('Unable to force checkpoint - Is the server running?') self.fail('Unable to force checkpoint - Check the server') newfault.restart_db() tinctest.logger.debug('Partition tables scenario completed')
def post_master_reset_during_transaction(self,kill_all = True): tinctest.logger.debug('Fault Injection Tests - starting postmaster reset scenario') #Sleep introduced so that the concurrently running generate_sqls gets time #to actually generate some load tinctest.logger.info('Sleep introduced of 120 sec') sleep(120) newfault = Fault() if kill_all: newfault.kill_all_segment_processes() else: newfault.kill_all_segment_processes(False) result = newfault.restart_db() while result.rc != 0: tinctest.logger.debug("gpstop -air failed - cluster restarting!!") tinctest.logger.debug("executing gpstop -air again...") result = newfault.restart_db() tinctest.logger.debug('Fault Injection Tests - postmaster reset scenario completed')
def test_kill_segments_periodically(self): ''' Create system faults by killing segments randomly ''' newfault = Fault() #1. Kill Mirror Segments newfault.kill_segment_processes_randomly('m') #Sleep 10-15 min ntime = randint(600, 900) tinctest.logger.info('sleep for %s sec. before start killing primary process' %ntime) sleep(ntime) #3. Kill primary Process newfault.kill_segment_processes_randomly('p') #Sleep 10-15 min ntime = randint(600, 900) tinctest.logger.info('sleep for %s sec. befor reboot' %ntime) sleep(ntime) #5. Restart the cluster newfault.restart_db()
def gprestart_db(self): ''' Restarts the Database ''' sleep(5) newfault = Fault() newfault.restart_db()
def gprestart_db(self): ''' Restarts the Database ''' sleep(5) newfault = Fault() newfault.restart_db()
def test_oom_scenario(self): tinctest.logger.debug('Restarting the db in 40 mins for hitting OOM') newfault = Fault() # Sleep for 40 minutes so that the OOM scenario is hit before restarting the system sleep(2400) newfault.restart_db()