コード例 #1
0
 def get_substitutions(self):
     subst = {}
     config = GPDBConfig()
     host, _ = config.get_hostandport_of_segment(0)
     subst['@host@'] = 'rh55-qavm44'
     subst['@script@'] = os.path.join(self.get_sql_dir(), 'datagen.py')
     return subst
コード例 #2
0
    def wait_for_shutdown_before_commit(self):
        self.check_system()

        config = GPDBConfig()
        db_id = config.get_dbid(-1,'p')

        test_case_list0 = []
        test_case_list0.append(('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.set_faults', ['fts_wait_for_shutdown', 'infinite_loop'], {'seg_id': db_id}))
        self.test_case_scenario.append(test_case_list0)

        test_case_list1 = []
        test_case_list1.append(('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.set_faults', ['filerep_consumer', 'fault', 'primary']))
        self.test_case_scenario.append(test_case_list1)

        test_case_list2 = []
        test_case_list2.append(('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.check_fault_status', ['fts_wait_for_shutdown'], {'seg_id': db_id}))
        self.test_case_scenario.append(test_case_list2)

        test_case_list3 = []
        test_case_list3.append('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.restart_db_with_no_rc_check')
        self.test_case_scenario.append(test_case_list3)
    
        test_case_list4 = []
        test_case_list4.append('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.cluster_state')
        self.test_case_scenario.append(test_case_list4)
コード例 #3
0
ファイル: genFault.py プロジェクト: zts-myankovskiy/gpdb
 def rebalance_cluster(self):
     config = GPDBConfig()
     self.run_recovery('r')
     rtrycnt = 0
     while ((config.is_not_insync_segments()) == False and rtrycnt <= 5):
         tinctest.logger.info("Waiting [%s] for DB to recover" % rtrycnt)
         sleep(10)
         rtrycnt = rtrycnt + 1
     #Many time it has been observed that gprecoverseg -ar marks segment down
     if config.is_not_insync_segments():
         return True
     else:
         self.run_recovery()
         rtrycnt = 0
         max_rtrycnt = 10
         while ((config.is_not_insync_segments()) == False
                and rtrycnt < max_rtrycnt):
             tinctest.logger.info("waiting [%s] for DB to recover" %
                                  rtrycnt)
             sleep(10)
             rtrycnt = rtrycnt + 1
         if rtrycnt < max_rtrycnt:
             return True
         else:
             tinctest.logger.error(
                 "Segments not up after incremental recovery!!")
             return False
コード例 #4
0
 def __init__(self, methodName, config=None):
     if config is not None:
         self.config = config
     else:
         self.config = GPDBConfig()
     self.gpverify = GpdbVerify(config=self.config)
     super(DbStateClass, self).__init__(methodName)
コード例 #5
0
    def setUpClass(cls):
        # we need an empty db to run the tests
        tinctest.logger.info("recreate database wet using dropdb/createdb")
        cmd = Command('recreatedb', 'dropdb wet; createdb wet')
        cmd.run(validateAfter=False)

        cls.drop_roles()

        super(LegacyWETTestCase, cls).setUpClass()

        source_dir = cls.get_source_dir()
        config = GPDBConfig()
        host, _ = config.get_hostandport_of_segment(0)
        port = mppUtil.getOpenPort(8080)
        tinctest.logger.info("gpfdist host = {0}, port = {1}".format(host, port))

        cls.config = config

        data_dir = os.path.join(source_dir, 'data')
        cls.gpfdist = GPFDIST(port, host, directory=data_dir)
        cls.gpfdist.startGpfdist()

        # WET writes into this directory.
        data_out_dir = os.path.join(cls.gpfdist.getdir(), 'output')
        shutil.rmtree(data_out_dir, ignore_errors=True)
        os.mkdir(data_out_dir)
コード例 #6
0
ファイル: gprecoverseg.py プロジェクト: zts-myankovskiy/gpdb
class GpRecover(GpRecoverseg):
    '''Class for gprecoverseg utility methods '''

    MAX_COUNTER=400

    def __init__(self, config=None):
        if config is not None:
            self.config = config
        else:
            self.config = GPDBConfig()
        self.gphome = os.environ.get('GPHOME')

    def incremental(self, workerPool=False):
        '''Incremental Recoverseg '''
        tinctest.logger.info('Running Incremental gprecoverseg...')
        if workerPool:
            return self.run_using_workerpool()
        else:
            return self.run()

    def full(self):
        '''Full Recoverseg '''
        tinctest.logger.info('Running Full gprecoverseg...')
        return self.run(option = '-F')

    def rebalance(self):
        '''Run gprecoverseg to rebalance the cluster '''
        tinctest.logger.info('Running gprecoverseg rebalance...')
        return self.run(option = '-r')

    def wait_till_insync_transition(self):
        '''
            Poll till all the segments transition to insync state. 
            Number of trials set to MAX_COUNTER
        '''
        counter= 1
        while(not self.config.is_not_insync_segments()):
            if counter > self.MAX_COUNTER:
                raise Exception('Segments did not come insync after 20 minutes')
            else:
                counter = counter + 1
                time.sleep(3) #Wait 3 secs before polling again
        tinctest.logger.info('Segments are synchronized ...')
        return True
        
    def recover_rebalance_segs(self):
        if not self.config.is_balanced_segments():
            # recover
            if not self.incremental():
                raise Exception('Gprecvoerseg failed')
            if not self.wait_till_insync_transition():
                raise Exception('Segments not in sync')
            tinctest.logger.info('Segments recovered and back in sync')

            # rebalance
            if not self.rebalance():
                raise Exception('Gprecvoerseg -r failed')
            if not self.wait_till_insync_transition():
                raise Exception('Segments not in sync')
            tinctest.logger.info('Segments rebalanced and back in sync')
コード例 #7
0
ファイル: test_run_workload.py プロジェクト: 50wu/gpdb
 def get_substitutions(self):
     subst = {}
     config = GPDBConfig()
     host, _ = config.get_hostandport_of_segment(0)
     subst['@host@'] = 'rh55-qavm44'
     subst['@script@'] = os.path.join(self.get_sql_dir(), 'datagen.py')
     return subst
コード例 #8
0
    def wait_for_shutdown_before_commit(self):
        self.check_system()

        config = GPDBConfig()
        db_id = config.get_dbid(-1,'p')

        test_case_list0 = []
        test_case_list0.append(('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.set_faults', ['fts_wait_for_shutdown', 'infinite_loop'], {'seg_id': db_id}))
        self.test_case_scenario.append(test_case_list0)

        test_case_list1 = []
        test_case_list1.append(('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.set_faults', ['filerep_consumer', 'fault', 'primary']))
        self.test_case_scenario.append(test_case_list1)

        test_case_list2 = []
        test_case_list2.append(('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.check_fault_status', ['fts_wait_for_shutdown'], {'seg_id': db_id}))
        self.test_case_scenario.append(test_case_list2)

        test_case_list3 = []
        test_case_list3.append('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.restart_db_with_no_rc_check')
        self.test_case_scenario.append(test_case_list3)
    
        test_case_list4 = []
        test_case_list4.append('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.cluster_state')
        self.test_case_scenario.append(test_case_list4)
コード例 #9
0
    def test_insert_commit_before_truncate(self):
        '''
        @description We suspend the vacuum on master after the first
                     transaction, and connect to segment.  Modify the
                     relation in vacuum and commit the segment local
                     transaction before the truncate transaction starts.
        '''
        fault_name = 'vacuum_relation_end_of_first_round'

        gpdbconfig = GPDBConfig()
        seghost, segport = gpdbconfig.get_hostandport_of_segment(0, 'p')
        filereputil = Filerepe2e_Util()
        filereputil.inject_fault(f=fault_name, y='suspend', seg_id='1')

        # run vacuum in background, it'll be blocked.
        sql_file1, ans_file1, out_file1 = self.get_file_names('conn1')
        psql1 = PSQL(sql_file=sql_file1, out_file=out_file1)
        thread1 = threading.Thread(target=self.run_psql, args=(psql1,))
        thread1.start()

        self.check_fault_triggered(fault_name)

        sql_file2, ans_file2, out_file2 = self.get_file_names('conn2')
        # utility to seg0
        psql2 = PSQL(sql_file=sql_file2, out_file=out_file2,
                     host=seghost, port=segport,
                     PGOPTIONS='-c gp_session_role=utility')
        self.run_psql(psql2)

        # resume vacuum
        filereputil.inject_fault(f=fault_name, y='reset', seg_id='1')
        thread1.join()
        self.assertTrue(Gpdiff.are_files_equal(out_file1, ans_file1))
        self.assertTrue(Gpdiff.are_files_equal(out_file2, ans_file2))
コード例 #10
0
    def __init__(self, methodName):
        self.config = GPDBConfig()
        self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
        self.seg_prefix = os.path.basename(self.mdd).split('-')[0]
        self.master_host = self.config.get_masterhost()
        self.gpinitconfig_template = local_path(
            'configs/gpinitconfig_template')
        self.datadir_config_file = local_path('configs/datadir_config_file')
        self.mirror_config_file = local_path('configs/mirror_config_file')
        self.gpinitconfig_file = local_path('configs/gpinitconfig')
        self.host_file = local_path('configs/hosts')
        self.hosts = self.config.get_hosts(segments=True)

        self.port_base = '40000'
        self.master_port = os.environ.get('PGPORT', '5432')
        self.primary_data_dir = self.config.get_host_and_datadir_of_segment(
            dbid=2)[1]
        # initially set the mirror data dir same to primary's
        self.mirror_data_dir = os.path.join(
            os.path.dirname(os.path.dirname(self.primary_data_dir)), 'mirror')
        self.gpinitsystem = True
        self.number_of_segments = self.config.get_countprimarysegments()
        self.number_of_segments_per_host = self.number_of_segments / len(
            self.hosts)
        self.standby_enabled = False
        self.number_of_parallelism = 4
        self.fs_location = []

        super(GPAddmirrorsTestCase, self).__init__(methodName)
コード例 #11
0
ファイル: gprecoverseg.py プロジェクト: PengJi/gpdb-comments
class GpRecover(GpRecoverseg):
    '''Class for gprecoverseg utility methods '''

    MAX_COUNTER=400

    def __init__(self, config=None):
        if config is not None:
            self.config = config
        else:
            self.config = GPDBConfig()
        self.gphome = os.environ.get('GPHOME')

    def incremental(self, workerPool=False):
        '''Incremental Recoverseg '''
        tinctest.logger.info('Running Incremental gprecoverseg...')
        if workerPool:
            return self.run_using_workerpool()
        else:
            return self.run()

    def full(self):
        '''Full Recoverseg '''
        tinctest.logger.info('Running Full gprecoverseg...')
        return self.run(option = '-F')

    def rebalance(self):
        '''Run gprecoverseg to rebalance the cluster '''
        tinctest.logger.info('Running gprecoverseg rebalance...')
        return self.run(option = '-r')

    def wait_till_insync_transition(self):
        '''
            Poll till all the segments transition to insync state. 
            Number of trials set to MAX_COUNTER
        '''
        counter= 1
        while(not self.config.is_not_insync_segments()):
            if counter > self.MAX_COUNTER:
                raise Exception('Segments did not come insync after 20 minutes')
            else:
                counter = counter + 1
                time.sleep(3) #Wait 3 secs before polling again
        tinctest.logger.info('Segments are synchronized ...')
        return True
        
    def recover_rebalance_segs(self):
        if not self.config.is_balanced_segments():
            # recover
            if not self.incremental():
                raise Exception('Gprecvoerseg failed')
            if not self.wait_till_insync_transition():
                raise Exception('Segments not in sync')
            tinctest.logger.info('Segments recovered and back in sync')

            # rebalance
            if not self.rebalance():
                raise Exception('Gprecvoerseg -r failed')
            if not self.wait_till_insync_transition():
                raise Exception('Segments not in sync')
            tinctest.logger.info('Segments rebalanced and back in sync')
コード例 #12
0
def setUpFilespaceForCTAS(isForHawq):
    config = GPDBConfig()
    if isForHawq:
        filespace = HAWQGpfilespace()
    else:
        filespace = Gpfilespace()
    if config.is_not_insync_segments():
        filespace.create_filespace('tincrepo_qp_ddl_ctas')
コード例 #13
0
 def setUp(self):
     super(GpCheckcatTests, self).setUp()
     self.config = GPDBConfig()
     self.gpcheckcat_test_dir = local_path('gpcheckcat_dir')
     if not os.path.exists(self.gpcheckcat_test_dir):
         os.makedirs(self.gpcheckcat_test_dir, 0777)
     else:
         os.chmod(self.gpcheckcat_test_dir, 0777)
コード例 #14
0
 def __init__(self):
     self.stdby = StandbyVerify()
     self.runmixin = StandbyRunMixin()
     self.runmixin.createdb(dbname='walrepl')
     self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
     self.config = GPDBConfig()
     self.pgutil = GpUtility()
     self.host = socket.gethostname()
コード例 #15
0
ファイル: test_ctas.py プロジェクト: 50wu/gpdb
def setUpFilespaceForCTAS(isForHawq):
      config = GPDBConfig()
      if isForHawq:
            filespace = HAWQGpfilespace()
      else:
            filespace = Gpfilespace()
      if config.is_not_insync_segments():
             filespace.create_filespace('tincrepo_qp_ddl_ctas')
コード例 #16
0
 def __init__(self, methodName):
     self.gp = GpactivateStandby()
     self.verify = StandbyVerify()
     self.config = GPDBConfig()
     self.disk = Disk()
     self.sdby_mdd = os.environ.get('MASTER_DATA_DIRECTORY')
     self.pgport = os.environ.get('PGPORT')
     super(OODClass, self).__init__(methodName)
コード例 #17
0
ファイル: test_crossexec.py プロジェクト: zlyswjtu/gpdb
    def test_insert_unlock_before_truncate(self):
        '''
        @description This is rather complicated.  We suspend the vacuum on
                     master after the first transaction, and connect to
                     segment, modify the relation in question, and release the
                     lock, keep the transaction.  To release the lock, we need
                     a special UDF.  Vacuum is supposed to skip truncate if it
                     sees such in-progress transaction.  Usually this should
                     not happen, but it rather simulates catalog DDL.
        '''
        fault_name = 'vacuum_relation_end_of_first_round'

        gpdbconfig = GPDBConfig()
        seghost, segport = gpdbconfig.get_hostandport_of_segment(0, 'p')
        filereputil = Filerepe2e_Util()
        filereputil.inject_fault(f=fault_name, y='suspend', seg_id='1')

        PSQL.run_sql_command(
            sql_cmd=
            'drop table if exists sync_table; create table sync_table(a int)')
        # Use pygresql to keep the connection and issue commands seprately.
        # thread2 will wait on sync_table before finish its work, so we
        # can keep the transaction open until the vacuum completes its work.
        conn = pygresql.pg.connect(host=seghost,
                                   port=int(segport),
                                   opt='-c gp_session_role=utility')
        conn.query('begin')
        conn.query('lock sync_table in access exclusive mode')

        # run vacuum background, it'll be blocked.
        sql_file1, ans_file1, out_file1 = self.get_file_names('conn1')
        psql1 = PSQL(sql_file=sql_file1, out_file=out_file1)
        thread1 = threading.Thread(target=self.run_psql, args=(psql1, ))
        thread1.start()

        self.check_fault_triggered(fault_name)

        sql_file2, ans_file2, out_file2 = self.get_file_names('conn2')
        # utility to seg0
        psql2 = PSQL(sql_file=sql_file2,
                     out_file=out_file2,
                     host=seghost,
                     port=segport,
                     PGOPTIONS='-c gp_session_role=utility')
        thread2 = threading.Thread(target=self.run_psql, args=(psql2, ))
        thread2.start()

        # resume vacuum
        filereputil.inject_fault(f=fault_name, y='reset', seg_id='1')

        # Once thread1 finishes, we can now release the lock on sync_table,
        # so that thread2 can proceed.
        thread1.join()
        conn.query('commit')
        thread2.join()

        self.assertTrue(Gpdiff.are_files_equal(out_file1, ans_file1))
        self.assertTrue(Gpdiff.are_files_equal(out_file2, ans_file2))
コード例 #18
0
    def get_host_and_db_path(self, dbname, contentid=0):
        ''' Get the host and database path for the content'''
        config = GPDBConfig()
        db_oid = PSQL.run_sql_command("select oid from pg_database where datname='%s'" % dbname, flags='-q -t', dbname='postgres')
        dbid = PSQL.run_sql_command("select dbid from gp_segment_configuration where content=%s and role='p'" % contentid, flags='-q -t', dbname='postgres')
        (host, address) = config.get_host_and_datadir_of_segment(dbid= dbid.strip())

        db_path = os.path.join(address, 'base', db_oid.strip())
        return (host.strip(), db_path)
コード例 #19
0
ファイル: common_utils.py プロジェクト: zts-myankovskiy/gpdb
def copy_files_to_master(filename, location):
    config = GPDBConfig()
    host = config.get_masterhost()
    cmd = 'gpssh -h %s -e "scp %s %s:%s/" ' % (host, filename, host, location)
    tinctest.logger.debug(cmd)
    res = {'rc': 0, 'stderr': '', 'stdout': ''}
    run_shell_command(cmd, 'run scp', res)
    if res['rc'] > 0:
        raise Exception('Copying to host %s failed' % host)
コード例 #20
0
ファイル: common_utils.py プロジェクト: kaknikhil/gpdb
def copy_files_to_master(filename, location):
    config = GPDBConfig()
    host = config.get_masterhost()
    cmd = 'gpssh -h %s -e "scp %s %s:%s/" ' % (host, filename, host, location)
    tinctest.logger.debug(cmd)
    res = {"rc": 0, "stderr": "", "stdout": ""}
    run_shell_command(cmd, "run scp", res)
    if res["rc"] > 0:
        raise Exception("Copying to host %s failed" % host)
コード例 #21
0
 def __init__(self, methodName):
     self.pgport = os.environ.get('PGPORT')
     self.fileutil = Filerepe2e_Util()
     self.gpconfig = GPDBConfig()
     self.gprecover = GpRecover(self.gpconfig)
     self.gpstate = Gpstate()
     self.gpprimarymirror = Gpprimarymirror()
     self.base = GPDBStorageBaseTestCase(self.gpconfig)
     super(FtsTransitions, self).__init__(methodName)
コード例 #22
0
    def is_changetracking(self):
        """
        @summary: return true if system is in change tracking mode
        
        @return: Boolean value representing the whether the cluster is insync or not
        """

        config = GPDBConfig()
        return not config.is_not_insync_segments()
コード例 #23
0
 def __init__(self, methodName):
     self.pgport = os.environ.get('PGPORT')
     self.util = Filerepe2e_Util()
     self.gpconfig = GpConfig()
     self.config = GPDBConfig()
     self.gpr = GpRecover(self.config)
     self.dbstate = DbStateClass('run_validation', self.config)
     self.gpstart = GpStart()
     self.gpstop = GpStop()
     super(FilerepTestCase, self).__init__(methodName)
コード例 #24
0
 def get_substitutions(self):
     """
     Returns sustitution variables.
     """
     config = GPDBConfig()
     host, _ = config.get_hostandport_of_segment(0)
     variables = {
             'HOST': host, 
             }
     return variables
コード例 #25
0
 def __init__(self, methodName):
     self.filereputil = Filerepe2e_Util()
     self.config = GPDBConfig()
     self.gprecover = GpRecover(self.config)
     self.gpstop = GpStop()
     self.gpstart = GpStart()
     self.gpverify = GpdbVerify(config=self.config)
     self.dbstate = DbStateClass('run_validation', self.config)
     self.port = os.getenv('PGPORT')
     super(PgtwoPhaseClass, self).__init__(methodName)
コード例 #26
0
ファイル: test_externalpartition.py プロジェクト: 50wu/gpdb
 def get_substitutions(self):
     """
     Returns sustitution variables.
     """
     config = GPDBConfig()
     host, _ = config.get_hostandport_of_segment(0)
     variables = {
             'HOST': host, 
             }
     return variables
コード例 #27
0
 def __init__(self,methodName):
     self.fileutil = Filerepe2e_Util()
     self.config = GPDBConfig()
     self.gprecover = GpRecover(self.config)
     self.gpstart = GpStart()
     self.gpstop = GpStop()
     self.gpfile = Gpfilespace(self.config)
     self.dbstate = DbStateClass('run_validation', self.config)
     self.port = os.getenv('PGPORT')
     self.base = GPDBStorageBaseTestCase()
     super(SuspendCheckpointCrashRecovery,self).__init__(methodName)
コード例 #28
0
ファイル: __init__.py プロジェクト: yanchaomars/gpdb
 def kill_first_mirror(self):
     mirror_data_loc = self.get_default_fs_loc(role='m',content=0)
     gpconfig = GPDBConfig()
     (host, port) = gpconfig.get_hostandport_of_segment(psegmentNumber = 0, pRole = 'm')    
     cmdString = 'ps -ef|grep -v grep|grep \'%s\'|awk \'{print $2}\'|xargs kill -9'%mirror_data_loc
     remote = Command(name ='kill first mirror', cmdStr = cmdString, ctxt=2, remoteHost=host)
     remote.run() 
     tinctest.logger.info('run command %s'%cmdString)
     rc = remote.get_results().rc    
     result = remote.get_results().stdout
     tinctest.logger.info('Command returning, rc: %s, result: %s'%(rc,result))
コード例 #29
0
ファイル: test_ao_read_check.py プロジェクト: 50wu/gpdb
 def test_pg_aocsseg_corruption(self):
     self.create_appendonly_tables(row=False)
     config = GPDBConfig()
     host, port = config.get_hostandport_of_segment() 
     self.transform_sql_file(os.path.join(self.sql_dir, 'corrupt_pg_aocsseg.sql.t'), 'co1')
     out_file = os.path.join(self.output_dir, 'corrupt_pg_aocsseg.out')
     ans_file = os.path.join(self.ans_dir, 'corrupt_pg_aocsseg.ans')
     sql_file = os.path.join(self.sql_dir, 'corrupt_pg_aocsseg.sql')
     PSQL.run_sql_file_utility_mode(sql_file, out_file=out_file, host=host,
                                    port=port, dbname=os.environ['PGDATABASE'])
     if not Gpdiff.are_files_equal(out_file, ans_file, match_sub=[local_path('sql/init_file')]):
         raise Exception('Corruption test of pg_aocsseg failed for appendonly tables !')
コード例 #30
0
ファイル: test_crossexec.py プロジェクト: 50wu/gpdb
    def test_insert_unlock_before_truncate(self):
        '''
        @description This is rather complicated.  We suspend the vacuum on
                     master after the first transaction, and connect to
                     segment, modify the relation in question, and release the
                     lock, keep the transaction.  To release the lock, we need
                     a special UDF.  Vacuum is supposed to skip truncate if it
                     sees such in-progress transaction.  Usually this should
                     not happen, but it rather simulates catalog DDL.
        '''
        fault_name = 'vacuum_relation_end_of_first_round'

        gpdbconfig = GPDBConfig()
        seghost, segport = gpdbconfig.get_hostandport_of_segment(0, 'p')
        filereputil = Filerepe2e_Util()
        filereputil.inject_fault(f=fault_name, y='suspend', seg_id='1')

        PSQL.run_sql_command(sql_cmd='drop table if exists sync_table; create table sync_table(a int)')
        # Use pygresql to keep the connection and issue commands seprately.
        # thread2 will wait on sync_table before finish its work, so we
        # can keep the transaction open until the vacuum completes its work.
        conn = pygresql.pg.connect(host=seghost, port=int(segport), opt='-c gp_session_role=utility')
        conn.query('begin')
        conn.query('lock sync_table in access exclusive mode')

        # run vacuum background, it'll be blocked.
        sql_file1, ans_file1, out_file1 = self.get_file_names('conn1')
        psql1 = PSQL(sql_file=sql_file1, out_file=out_file1)
        thread1 = threading.Thread(target=self.run_psql, args=(psql1,))
        thread1.start()

        self.check_fault_triggered(fault_name)

        sql_file2, ans_file2, out_file2 = self.get_file_names('conn2')
        # utility to seg0
        psql2 = PSQL(sql_file=sql_file2, out_file=out_file2,
                     host=seghost, port=segport,
                     PGOPTIONS='-c gp_session_role=utility')
        thread2 = threading.Thread(target=self.run_psql, args=(psql2,))
        thread2.start()

        # resume vacuum
        filereputil.inject_fault(f=fault_name, y='reset', seg_id='1')

        # Once thread1 finishes, we can now release the lock on sync_table,
        # so that thread2 can proceed.
        thread1.join()
        conn.query('commit')
        thread2.join()

        self.assertTrue(Gpdiff.are_files_equal(out_file1, ans_file1))
        self.assertTrue(Gpdiff.are_files_equal(out_file2, ans_file2))
コード例 #31
0
ファイル: common_utils.py プロジェクト: kaknikhil/gpdb
def check_logs(search_string_list):
    """
    Check all the segment logs(master/primary/mirror) for keywords in the
    search_string_list
    """
    dbid_list = PSQL.run_sql_command("select dbid from gp_segment_configuration;", flags="-q -t", dbname="postgres")
    dbid_list = dbid_list.split()
    config = GPDBConfig()
    for dbid in dbid_list:
        (host, data_dir) = config.get_host_and_datadir_of_segment(dbid.strip())
        (rc, msg) = search_string(host, search_string_list, data_dir)
        if rc:
            return (False, msg)
    return (True, "No Issues found")
コード例 #32
0
ファイル: fault.py プロジェクト: stefanieqiang/gpdb-1
    def test_do_full_recovery(self):
        """
        [feature]: Performs Full Recovery
        
        """

        config = GPDBConfig()
        recoverseg = GpRecoverseg()
        tinctest.logger.info('Running Full gprecoverseg...')
        recoverseg.run(option = '-F')
        rtrycnt = 0
        while (not config.is_not_insync_segments()):
            tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
            rtrycnt = rtrycnt + 1
コード例 #33
0
ファイル: test_run_workload.py プロジェクト: 50wu/gpdb
 def setUpClass(cls):
     super(PreExpansionWorkloadTests, cls).setUpClass()
     # gpscp the script required for external table in create_base_workload
     scp_file = os.path.join(cls.get_sql_dir(), 'datagen.py')
     gpconfig = GPDBConfig()
     hosts = gpconfig.get_hosts()
     hosts_file = os.path.join(cls.get_out_dir(), 'hostfile')
     with open(hosts_file, 'w') as f:
         f.write('\n'.join(hosts))
     
     res = {'rc':0, 'stderr':'', 'stdout':''}
     run_shell_command("gpscp -f %s %s =:$GPHOME/bin" %(hosts_file, scp_file), 'gpscp script', res)
     if res['rc'] > 0:
         tinctest.logger.warning("Failed to gpscp the required script to all the segments for external table queries. The script might already exist !")
コード例 #34
0
 def setUpClass(cls):
     super(PreExpansionWorkloadTests, cls).setUpClass()
     # gpscp the script required for external table in create_base_workload
     scp_file = os.path.join(cls.get_sql_dir(), 'datagen.py')
     gpconfig = GPDBConfig()
     hosts = gpconfig.get_hosts()
     hosts_file = os.path.join(cls.get_out_dir(), 'hostfile')
     with open(hosts_file, 'w') as f:
         f.write('\n'.join(hosts))
     
     res = {'rc':0, 'stderr':'', 'stdout':''}
     run_shell_command("gpscp -f %s %s =:$GPHOME/bin" %(hosts_file, scp_file), 'gpscp script', res)
     if res['rc'] > 0:
         tinctest.logger.warning("Failed to gpscp the required script to all the segments for external table queries. The script might already exist !")
コード例 #35
0
ファイル: fault.py プロジェクト: stefanieqiang/gpdb-1
    def test_full_recovery_skip_persistent_tables_check(self):
        """
        [feature]: Run recoverseg with persistent tables check option 
        
        """

        config = GPDBConfig()
        recoverseg = GpRecoverseg()
        tinctest.logger.info('Running gprecoverseg...')
        recoverseg.run(option='-F')
        self.assertNotIn('Performing persistent table check', recoverseg.stdout)
        rtrycnt = 0
        while (not config.is_not_insync_segments()):
            tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
            rtrycnt = rtrycnt + 1
コード例 #36
0
ファイル: test_runsqls.py プロジェクト: 50wu/gpdb
    def setUpClass(cls):
        super(other_tests, cls).setUpClass()
        source_dir = cls.get_source_dir()
        config = GPDBConfig()
        host, _ = config.get_hostandport_of_segment(0)
        port = mppUtil.getOpenPort(8080)
        tinctest.logger.info("gpfdist host = {0}, port = {1}".format(host, port))

        data_dir = os.path.join(source_dir, 'data')
        cls.gpfdist = GPFDIST(port, host, directory=data_dir)
        cls.gpfdist.startGpfdist()

        data_out_dir = os.path.join(data_dir, 'output')
        shutil.rmtree(data_out_dir, ignore_errors=True)
        os.mkdir(data_out_dir)
コード例 #37
0
    def check_cluster_health(self, doFullRecovery=False):
        """
        @summary: Checks for the cluster health, tries to recover and rebalance the cluster, 
                    fails the test if not able to do so 
        
        @param doFullRecovery: Boolean value which decides whether to go for full 
                                recovery or not
        @return: None
        """

        tinctest.logger.info(
            "***** Checking the cluster health before starting tests")
        config = GPDBConfig()
        # If the segments are not up, go for recovery
        if not config.is_not_insync_segments():
            tinctest.logger.info("***** Starting the recovery process")
            # if incremental didn't work, go for full recovery
            if not self.recover_segments(' ', 10):
                tinctest.logger.warn(
                    "***** Segments not recovered after incremental recovery")
                if doFullRecovery:
                    # if full also fails, the tests cannot proceed, so fail it
                    if not self.recover_segments('-F', 20):
                        tinctest.logger.error(
                            "***** Segments not recovered even after full recovery - Tests cannot proceed further!!"
                        )
                        self.fail(
                            "Segments are down - Tests cannot proceed further!!"
                        )
                    # if full recovery passes, check for rebalancing the cluster
                    else:
                        tinctest.logger.info(
                            "***** Segments up after full recovery : validating their roles..."
                        )
                        self.check_segment_roles()
                else:
                    self.fail("Segments are down - Tests cannot proceed!!")
            # if incremental recovery passes, check for rebalancing the cluster
            else:
                tinctest.logger.info(
                    "***** Segments up after incremental recovery : validating their roles..."
                )
                self.check_segment_roles()
        # If the segments are up, check for rebalancing the cluster
        else:
            tinctest.logger.info(
                "***** Segments are up : validating their roles...")
            self.check_segment_roles()
コード例 #38
0
ファイル: dbstate.py プロジェクト: PengJi/gpdb-comments
 def __init__(self,methodName,config=None):
     if config is not None:
         self.config = config
     else:
         self.config = GPDBConfig()
     self.gpverify = GpdbVerify(config=self.config)
     super(DbStateClass,self).__init__(methodName)
コード例 #39
0
 def setUpClass(cls):
     super(GPFilespaceTablespaceTest, cls).setUpClass()
     tinctest.logger.info("*** Running the pre-requisite sql files drop.sql and setup.sql")
     PSQL.run_sql_file(local_path('sqls/setup/drop.sql'))
     #separating dropping of filsepaces
     PSQL.run_sql_file(local_path('sqls/setup/drop_filespaces.sql'))
     PSQL.run_sql_file(local_path('sqls/setup/create.sql'))
     tinctest.logger.info("Starting the Filespace Tablespace test.. ")
     config = GPDBConfig()
     filespace = Gpfilespace()       
     filespace_name = 'cdbfast_fs_'
     if config.is_not_insync_segments():
         tinctest.logger.info("***** Creating filespaces...")
         filespace.create_filespace(filespace_name+'sch1')
         filespace.create_filespace(filespace_name+'sch2')
         filespace.create_filespace(filespace_name+'sch3')
コード例 #40
0
    def setUpClass(cls):
        super(other_tests, cls).setUpClass()
        source_dir = cls.get_source_dir()
        config = GPDBConfig()
        host, _ = config.get_hostandport_of_segment(0)
        port = mppUtil.getOpenPort(8080)
        tinctest.logger.info("gpfdist host = {0}, port = {1}".format(
            host, port))

        data_dir = os.path.join(source_dir, 'data')
        cls.gpfdist = GPFDIST(port, host, directory=data_dir)
        cls.gpfdist.startGpfdist()

        data_out_dir = os.path.join(data_dir, 'output')
        shutil.rmtree(data_out_dir, ignore_errors=True)
        os.mkdir(data_out_dir)
コード例 #41
0
ファイル: fault.py プロジェクト: stefanieqiang/gpdb-1
    def test_recovery_with_new_loc(self):
        """
        [feature]: Performs recovery by creating a configuration file with new segment locations 
        
        """

        newfault = Fault()
        config = GPDBConfig()
        hosts = newfault.get_segment_host()
        newfault.create_new_loc_config(hosts, orig_filename='recovery.conf', new_filename='recovery_new.conf')
        if not newfault.run_recovery_with_config(filename='recovery_new.conf'):
            self.fail("*** Incremental recovery with config file recovery_new.conf failed")
        rtrycnt = 0
        while (not config.is_not_insync_segments()):
            tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
            rtrycnt = rtrycnt + 1
コード例 #42
0
ファイル: fault.py プロジェクト: stefanieqiang/gpdb-1
 def test_invalid_state_recoverseg(self):
     """
     [feature]: Sets the ENV_VAR and runs the incremental recoverseg
     
     """
     '''  '''
     # setting the ENV_VAR
     os.environ[ENV_VAR] = '1'
     recoverseg = GpRecoverseg()
     config = GPDBConfig()
     tinctest.logger.info('Running Incremental gprecoverseg...')
     recoverseg.run()
     rtrycnt = 0
     while (not config.is_not_insync_segments()):
         tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
         rtrycnt = rtrycnt + 1
コード例 #43
0
 def setUpClass(cls):
     super(GPFilespaceTablespaceTest, cls).setUpClass()
     tinctest.logger.info("*** Running the pre-requisite sql files drop.sql and setup.sql")
     PSQL.run_sql_file(local_path('sqls/setup/drop.sql'))
     #separating dropping of filsepaces
     PSQL.run_sql_file(local_path('sqls/setup/drop_filespaces.sql'))
     PSQL.run_sql_file(local_path('sqls/setup/create.sql'))
     tinctest.logger.info("Starting the Filespace Tablespace test.. ")
     config = GPDBConfig()
     filespace = Gpfilespace()       
     filespace_name = 'cdbfast_fs_'
     if config.is_not_insync_segments():
         tinctest.logger.info("***** Creating filespaces...")
         filespace.create_filespace(filespace_name+'sch1')
         filespace.create_filespace(filespace_name+'sch2')
         filespace.create_filespace(filespace_name+'sch3')
コード例 #44
0
    def __init__(self, methodName):
        self.config = GPDBConfig()
        self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
        self.seg_prefix = os.path.basename(self.mdd).split('-')[0]
        self.master_host = self.config.get_masterhost()
        self.gpinitconfig_template = local_path('configs/gpinitconfig_template')
        self.datadir_config_file = local_path('configs/datadir_config_file') 
        self.mirror_config_file = local_path('configs/mirror_config_file')
        self.gpinitconfig_file = local_path('configs/gpinitconfig')
        self.host_file = local_path('configs/hosts')
        self.hosts = self.config.get_hosts(segments = True)

        self.port_base = '40000'
        self.master_port = os.environ.get('PGPORT', '5432')
        self.primary_data_dir = self.config.get_host_and_datadir_of_segment(dbid = 2)[1]
        # initially set the mirror data dir same to primary's
        self.mirror_data_dir = os.path.join(os.path.dirname(os.path.dirname(self.primary_data_dir)), 'mirror')
        self.gpinitsystem = True
        self.number_of_segments = self.config.get_countprimarysegments()
        self.number_of_segments_per_host = self.number_of_segments / len(self.hosts)
        self.standby_enabled = False
        self.number_of_parallelism = 4
        self.fs_location = []

        super(GPAddmirrorsTestCase, self).__init__(methodName)
コード例 #45
0
ファイル: __init__.py プロジェクト: PengJi/gpdb-comments
 def __init__(self):
     self.stdby = StandbyVerify()
     self.runmixin = StandbyRunMixin()
     self.runmixin.createdb(dbname='walrepl')        
     self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
     self.config = GPDBConfig()
     self.pgutil = GpUtility()
     self.host = socket.gethostname()
コード例 #46
0
ファイル: __init__.py プロジェクト: 50wu/gpdb
 def __init__(self,methodName):
     self.gp = GpactivateStandby()
     self.verify = StandbyVerify()
     self.config = GPDBConfig()
     self.disk = Disk()
     self.sdby_mdd = os.environ.get('MASTER_DATA_DIRECTORY')
     self.pgport = os.environ.get('PGPORT')
     super(OODClass,self).__init__(methodName)
コード例 #47
0
ファイル: genFault.py プロジェクト: LJoNe/gpdb
 def get_host_port_mapping(self,role):
     """ 
     Returns a dictionary having key as hostname and value as a list of port nos.
     For e.g {'vm9':['22001','22000'] , 'vm10':{'42000','42001'}...}
     """        
     config = GPDBConfig()
     no_of_segments = config.get_countprimarysegments()
     hosts_dict = {}
     counter = 0
     while counter < no_of_segments:
         (host,port) = config.get_hostandport_of_segment(counter,role)
         if hosts_dict.has_key(host):
             hosts_dict[host].append(port)
         else:
             hosts_dict[host] = list()
             hosts_dict[host].append(port)
         counter += 1
     return hosts_dict
コード例 #48
0
ファイル: __init__.py プロジェクト: LJoNe/gpdb
 def __init__(self, methodName):
     self.pgport = os.environ.get('PGPORT')
     self.fileutil = Filerepe2e_Util()
     self.gpconfig = GPDBConfig()
     self.gprecover = GpRecover(self.gpconfig)
     self.gpstate = Gpstate()
     self.gpprimarymirror = Gpprimarymirror()
     self.base = GPDBStorageBaseTestCase(self.gpconfig)
     super(FtsTransitions,self).__init__(methodName)
コード例 #49
0
ファイル: test_read.py プロジェクト: HaozhouWang/gpdb
    def setUpClass(cls):
        super(LegacyRETTestCase, cls).setUpClass()

        cls.split_tbl()

        source_dir = cls.get_source_dir()
        config = GPDBConfig()
        host, _ = config.get_hostandport_of_segment(0)
        port = mppUtil.getOpenPort(8080)
        tinctest.logger.info("gpfdist host = {0}, port = {1}".format(host, port))

        data_dir = os.path.join(source_dir, 'data')
        cls.gpfdist = GPFDIST(port, host, directory=data_dir)
        cls.gpfdist.startGpfdist()

        # Some test writes data into disk temporarily.
        data_out_dir = os.path.join(data_dir, 'output')
        shutil.rmtree(data_out_dir, ignore_errors=True)
        os.mkdir(data_out_dir)
コード例 #50
0
ファイル: __init__.py プロジェクト: 50wu/gpdb
 def __init__(self, methodName):    
     self.pgport = os.environ.get('PGPORT')
     self.util = Filerepe2e_Util()
     self.gpconfig = GpConfig()
     self.config = GPDBConfig()
     self.gpr = GpRecover(self.config)
     self.dbstate = DbStateClass('run_validation',self.config)
     self.gpstart = GpStart()
     self.gpstop = GpStop()
     super(FilerepTestCase,self).__init__(methodName)
コード例 #51
0
class GPAddmirrorsTestCase(MPPTestCase):

    def __init__(self, methodName):
        self.config = GPDBConfig()
        self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
        self.seg_prefix = os.path.basename(self.mdd).split('-')[0]
        self.master_host = self.config.get_masterhost()
        self.gpinitconfig_template = local_path('configs/gpinitconfig_template')
        self.datadir_config_file = local_path('configs/datadir_config_file') 
        self.mirror_config_file = local_path('configs/mirror_config_file')
        self.gpinitconfig_file = local_path('configs/gpinitconfig')
        self.host_file = local_path('configs/hosts')
        self.hosts = self.config.get_hosts(segments = True)

        self.port_base = '40000'
        self.master_port = os.environ.get('PGPORT', '5432')
        self.primary_data_dir = self.config.get_host_and_datadir_of_segment(dbid = 2)[1]
        # initially set the mirror data dir same to primary's
        self.mirror_data_dir = os.path.join(os.path.dirname(os.path.dirname(self.primary_data_dir)), 'mirror')
        self.gpinitsystem = True
        self.number_of_segments = self.config.get_countprimarysegments()
        self.number_of_segments_per_host = self.number_of_segments / len(self.hosts)
        self.standby_enabled = False
        self.number_of_parallelism = 4
        self.fs_location = []

        super(GPAddmirrorsTestCase, self).__init__(methodName)

    def setUp(self):
        super(GPAddmirrorsTestCase, self).setUp()

    def _setup_gpaddmirrors(self, port_offset=1000):
        """
        Takes care of creating all the directories required for gpaddmirrors
        and generating input files for gpaddmirrors
        """
        # Generate gpaddmirrors config files
        try:
            self._generate_gpaddmirrors_input_files(port_offset)
        except Exception, e:
            tinctest.logger.exception("Encountered exception during generation of input files: %s" % e)
            raise