예제 #1
0
    def check_segment_roles(self):
        """
        @summary: Checks if the segments are in preferred roles or not.
                    If not, rebalances the cluster.
        
        @return: None
        """

        newfault = Fault()
        # If the segments are not in preferred roles, go for rebalancing the cluster
        if newfault.check_if_not_in_preferred_role():
            tinctest.logger.warn(
                "***** Segments not in their preferred roles : rebalancing the segments..."
            )
            # If rebalancing downs the segments, go for incremental recovery - this is observed sometimes
            if not self.recover_segments('-r', 10):
                tinctest.logger.warn(
                    "***** Segments down after rebalance : Tests cannot proceed further!!"
                )
            # If rebalancing passes proceed for tests
            else:
                tinctest.logger.info(
                    "***** Segments successfully rebalanced : Proceeding with the tests"
                )
        # If segments in preferred roles, proceed for the tests
        else:
            tinctest.logger.info(
                "***** Segments in preferred roles : Proceeding with the tests"
            )
예제 #2
0
 def test_wait_till_segments_in_change_tracking(self):
     """
     [feature]: Wait until segments for into change tracking
     
     """
     newfault = Fault()
     rtrycnt = 0
     while( not newfault.is_changetracking()):
         tinctest.logger.info("Waiting [%s] for DB to go in CT mode" %rtrycnt)
         rtrycnt = rtrycnt + 1
예제 #3
0
 def test_create_symlink_for_seg(self):
     """
     [feature]: Creates a symlink to the data directory for a given segment
     
     """
     
     newfault = Fault()
     seginfo = newfault.get_seginfo(preferred_role='m', content=1)
     newfault.create_remote_symlink(seginfo.getSegmentHostName(), seginfo.getSegmentDataDirectory())
     tinctest.logger.info('Creating symlink for seg %s on host %s' % (seginfo.getSegmentDataDirectory(), seginfo.getSegmentHostName()))
예제 #4
0
 def test_corrupt_persistent_tables(self):
     """
     [feature]: corrupts PT tables for segment that has been marked down 
     
     """
     
     newfault = Fault()
     seginfo = newfault.get_seginfo(preferred_role='p', content=1)
     pt = PTTestCase('corrupt_persistent_table')
     pt.corrupt_persistent_table(seginfo.getSegmentHostName(), seginfo.getSegmentPort())
     tinctest.logger.info('Finished corruption of PT tables')
예제 #5
0
    def test_remove_symlink_for_seg(self):
        """
        [feature]: Remove symlink for datadirectory and restore the orignal directory
                   for a given segment.
        
        """

        newfault = Fault()
        seginfo = newfault.get_seginfo(preferred_role='m', content=1)
        newfault.remove_remote_symlink(seginfo.getSegmentHostName(), seginfo.getSegmentDataDirectory())
        tinctest.logger.info('Removed symlinks for seg %s on host %s' % (seginfo.getSegmentDataDirectory(), seginfo.getSegmentHostName()))
예제 #6
0
 def test_shared_mem_is_cleaned(self):
     """
     [feature]: Check if the shared memory is cleaned
     
     """
     newfault = Fault()
     seginfo = newfault.get_seginfo(preferred_role='p',content=0) 
     cmd = Command('check for shared memory', cmdStr="ipcs -a", ctxt=REMOTE, remoteHost=seginfo.getSegmentHostName())
     cmd.run(validateAfter=True)
     result = cmd.get_results().stdout.split('\n')
     for r in result:
         if r and r.split()[-1] == '0':
             raise Exception('Shared memory not cleaned up for %s' % r)
예제 #7
0
    def test_kill_mirror(self):
        """
        [feature]: Kills mirror gp0 segment 
        
        """

        newfault = Fault()
        hosts = newfault.get_segment_host(preferred_role='m',content=0)
        if not newfault.kill_mirror_gp0(hosts):
            self.fail("Could not the kill the mirror process, cannot proceed further!")
        rtrycnt = 0
        while( not newfault.is_changetracking()):
            tinctest.logger.info("Waiting [%s] for DB to go in CT mode" %rtrycnt)
            rtrycnt = rtrycnt + 1
예제 #8
0
 def check_segment_roles(self):
     """
     @summary: Checks if the segments are in preferred roles or not.
                 If not, rebalances the cluster.
     
     @return: None
     """
     
     newfault = Fault()
     # If the segments are not in preferred roles, go for rebalancing the cluster
     if newfault.check_if_not_in_preferred_role():
         tinctest.logger.warn("***** Segments not in their preferred roles : rebalancing the segments...")
         # If rebalancing downs the segments, go for incremental recovery - this is observed sometimes
         if not self.recover_segments('-r',10):
             tinctest.logger.warn("***** Segments down after rebalance : Tests cannot proceed further!!")
         # If rebalancing passes proceed for tests
         else:
             tinctest.logger.info("***** Segments successfully rebalanced : Proceeding with the tests")
     # If segments in preferred roles, proceed for the tests
     else:
         tinctest.logger.info("***** Segments in preferred roles : Proceeding with the tests")
예제 #9
0
    def test_drop_pg_dirs_on_primary(self):
        """
        [feature]: Drops primary gp0 folder 
        
        """

        newfault = Fault()
        (host, fileLoc) = newfault.get_segment_host_fileLoc()
        newfault.drop_pg_dirs_on_primary(host, fileLoc)
        rtrycnt = 0
        max_rtrycnt = 300
        while( not newfault.is_changetracking()):
            tinctest.logger.info("Waiting [%s] for DB to go into CT mode" %rtrycnt)
            rtrycnt = rtrycnt + 1
예제 #10
0
    def test_use_gpfaultinjector_to_mark_segment_down(self):
        """
        [feature]: Use gpfaultinjector to mark a segment down in the configuration, but the 
        process is still running on the segment.
        
        """

        newfault = Fault()
        seginfo = newfault.get_seginfo(preferred_role='m', content=1)
        newfault.inject_using_gpfaultinjector(fault_name='filerep_consumer', fault_mode='async', fault_type='fault', segdbid=seginfo.getSegmentDbId())
        rtrycnt = 0
        while (not newfault.is_changetracking()):
            tinctest.logger.info("Waiting [%s] for DB to go into CT mode" % rtrycnt)
            rtrycnt += 1
예제 #11
0
    def test_kill_primary_group(self):
        """
        [feature]: Kill a group of primary segments
        
        """

        newfault = Fault()
        seglist = newfault.get_seginfo_for_primaries()
        seglist = seglist[:(len(seglist) + 1 ) / 2]
        for seg in seglist:
            tinctest.logger.info('Killing segment %s' % seg.getSegmentDataDirectory())
            newfault.kill_primary(seg.getSegmentHostName(), seg.getSegmentDataDirectory(), seg.getSegmentPort())
        rtrycnt = 0
        while (not newfault.is_changetracking()):
            tinctest.logger.info('Waiting [%s] for DB to go in CT mode' % rtrycnt)
            rtrycnt += 1
예제 #12
0
    def test_recovery_with_new_loc(self):
        """
        [feature]: Performs recovery by creating a configuration file with new segment locations 
        
        """

        newfault = Fault()
        config = GPDBConfig()
        hosts = newfault.get_segment_host()
        newfault.create_new_loc_config(hosts, orig_filename='recovery.conf', new_filename='recovery_new.conf')
        if not newfault.run_recovery_with_config(filename='recovery_new.conf'):
            self.fail("*** Incremental recovery with config file recovery_new.conf failed")
        rtrycnt = 0
        while (not config.is_not_insync_segments()):
            tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
            rtrycnt = rtrycnt + 1
예제 #13
0
 def check_if_not_in_preferred_role(self):
     ''' Checks if the segments are in preferred role or not '''
     newfault = Fault()
     result = newfault.check_if_not_in_preferred_role()
     if result == True:
        self.fail("Segments are not in preferred roles!!!")
예제 #14
0
 def gprestartdb(self):
     ''' Restarts the Database '''
     newfault = Fault()
     newfault.stop_db()
     newfault.start_db()
     sleep(30)