예제 #1
0
 def __init__(self, methodName):
     self.gp = GpactivateStandby()
     self.verify = StandbyVerify()
     self.config = GPDBConfig()
     self.disk = Disk()
     self.sdby_mdd = os.environ.get('MASTER_DATA_DIRECTORY')
     self.pgport = os.environ.get('PGPORT')
     super(OODClass, self).__init__(methodName)
예제 #2
0
 def __init__(self):
     self.stdby = StandbyVerify()
     self.runmixin = StandbyRunMixin()
     self.runmixin.createdb(dbname='walrepl')
     self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
     self.config = GPDBConfig()
     self.pgutil = GpUtility()
     self.host = socket.gethostname()
예제 #3
0
 def __init__(self, methodName):
     self.gphome = os.environ.get('GPHOME')
     self.pgport = os.environ.get('PGPORT')
     self.pgdatabase = os.environ.get('PGDATABASE')
     self.stdby_host = 'localhost'
     self.master_dd = os.environ.get('MASTER_DATA_DIRECTORY')
     self.pgutil = GpUtility()
     self.stdby = StandbyVerify()
     super(WalReplKillProcessTestCase, self).__init__(methodName)
예제 #4
0
파일: __init__.py 프로젝트: xuegang/gpdb
 def __init__(self):
     self.gpinit = GpinitStandby()
     self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
     self.config = GPDBConfig()
     self.pgutil = GpUtility()
     self.verify = StandbyVerify()
     self.host = socket.gethostname()
     self.standby_loc = os.path.join(
         os.path.split(self.mdd)[0], self.standby_dirname)
     self.standby = Standby(self.standby_loc, self.standby_port)
예제 #5
0
파일: __init__.py 프로젝트: 50wu/gpdb
 def __init__(self,methodName):
     self.gp = GpactivateStandby()
     self.verify = StandbyVerify()
     self.config = GPDBConfig()
     self.disk = Disk()
     self.sdby_mdd = os.environ.get('MASTER_DATA_DIRECTORY')
     self.pgport = os.environ.get('PGPORT')
     super(OODClass,self).__init__(methodName)
예제 #6
0
 def __init__(self):
     self.stdby = StandbyVerify()
     self.runmixin = StandbyRunMixin()
     self.runmixin.createdb(dbname='walrepl')        
     self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
     self.config = GPDBConfig()
     self.pgutil = GpUtility()
     self.host = socket.gethostname()
예제 #7
0
 def __init__(self,methodName):
     self.gphome = os.environ.get('GPHOME')
     self.pgport = os.environ.get('PGPORT')
     self.pgdatabase = os.environ.get('PGDATABASE')
     self.stdby_host = 'localhost'
     self.master_dd = os.environ.get('MASTER_DATA_DIRECTORY')
     self.pgutil = GpUtility()
     self.stdby = StandbyVerify()
     super(WalReplKillProcessTestCase,self).__init__(methodName)
예제 #8
0
파일: __init__.py 프로젝트: xuegang/gpdb
 def __init__(self):
     self.gpinit = GpinitStandby()
     self.mdd = os.environ.get("MASTER_DATA_DIRECTORY")
     self.config = GPDBConfig()
     self.pgutil = GpUtility()
     self.verify = StandbyVerify()
     self.host = socket.gethostname()
     self.standby_loc = os.path.join(os.path.split(self.mdd)[0], self.standby_dirname)
     self.standby = Standby(self.standby_loc, self.standby_port)
예제 #9
0
class OODClass(MPPTestCase):
    def __init__(self, methodName):
        self.gp = GpactivateStandby()
        self.verify = StandbyVerify()
        self.config = GPDBConfig()
        self.disk = Disk()
        self.sdby_mdd = os.environ.get('MASTER_DATA_DIRECTORY')
        self.pgport = os.environ.get('PGPORT')
        super(OODClass, self).__init__(methodName)

    def initiate_standby(self):
        self.gp.create_standby(local='no')

    def check_standby(self):
        self.assertFalse(self.verify.check_standby_processes())

    def get_standby_dbid(self):
        std_sql = "select dbid from gp_segment_configuration where content='-1' and role='m';"
        standby_dbid = PSQL.run_sql_command(std_sql,
                                            flags='-q -t',
                                            dbname='template1')
        return standby_dbid.strip()

    def restart_standby(self):
        sdby_host = self.config.get_master_standbyhost()
        stdby_dbid = self.get_standby_dbid()
        cmd = "pg_ctl -D %s -o '-p %s -b %s -z 2 --silent-mode=true -i -M master -C -1 -x 0 -E' start &" % (
            self.sdby_mdd, self.pgport, stdby_dbid)
        self.assertTrue(
            self.gp.run_remote(sdby_host, cmd, self.pgport, self.sdby_mdd))
        self.assertTrue(self.verify.check_standby_processes())

    def check_diskusage(self, host):  # This now checks for only /data
        (rc, result) = self.disk.get_disk_usage(host, '/data')
        if rc != 0:
            raise Exception(
                "The specified mount /data is not present for the device")
        else:
            available_usage = result
            return available_usage

    def _fill(self, filename, host):
        cmd_prefix = "ssh " + host + " \""
        cmd_postfix = "\""
        location = '/data'
        if not os.path.isdir('%s/diskfill/' % location):
            os.makedirs('%s/diskfill/' % location)
        cmd_str = cmd_prefix + "dd if=/dev/zero bs=16384K count=2000 of=" + location + "/diskfill/" + filename + cmd_postfix
        cmd = Command(name='Fill Disk', cmdStr=cmd_str)
        tinctest.logger.info(" %s" % cmd)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        if result.rc != 0:
            tinctest.logger.error('disk fill not working. Its already full')

    def filldisk(self):
        host = self.config.get_master_standbyhost()
        disk_usage = self.check_diskusage(host)
        i = 0
        while (int(disk_usage.strip()) > 1000000):
            filename = 'new_space_%s' % i
            self._fill(filename, host)
            i += 1
            disk_usage = self.check_diskusage(host)

    def remove_fillfiles(self, filename, host):
        location = '/data'
        cmd_str = "ssh %s rm %s/diskfill/%s*" % (host, location, filename)
        cmd = Command(name='Remove fill files', cmdStr=cmd_str)
        tinctest.logger.info(" %s" % cmd)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        if result.rc != 0:
            raise Exception('Unable to delete the fill files')
        return

    def cleanup(self):
        host = self.config.get_master_standbyhost()
        self.remove_fillfiles('new_space', host)
        #Recover segemnts in case segments and standby were on the same host
        cmd = Command(name='gprecoverseg', cmdStr='gprecoverseg -a')
        tinctest.logger.info(" %s" % cmd)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        if result.rc != 0:
            raise Exception('gprecoverseg failed')
        while (self.config.is_not_insync_segments() == False):
            tinctest.logger.info('Waiting for DB to be in sync')
예제 #10
0
파일: test_gpstart.py 프로젝트: 50wu/gpdb
class GpstartTestCase(MPPTestCase):
    '''
    testcase for gpstart

    gpstart may return status code 1 as well as 0 in the success case.  The
    difference is whether it produces WARNING or not, but here we don't care.
    '''
    origin_mdd = os.environ.get('MASTER_DATA_DIRECTORY')

    def __init__(self,methodName):
        self.gputil = GpUtility()
        self.stdby = StandbyVerify()
        super(GpstartTestCase,self).__init__(methodName)

    def setUp(self):
        self.gputil.check_and_start_gpdb()
        stdby_presence = self.gputil.check_standby_presence()
        # We should forcibly recreate standby, as it might has been promoted.
        if stdby_presence:
            self.gputil.remove_standby()
        self.gputil.install_standby()

    def tearDown(self):
        self.gputil.remove_standby()
        
    """
    Gpstart test cases in recovery mode
    """

    def test_gpstart_from_master(self):
        """
        tag
        """
        self.gputil.check_and_stop_gpdb()
        (rc, stdout) = self.gputil.run('gpstart -a ')
        self.assertIn(rc, (0, 1))
        self.assertTrue(self.gputil.gpstart_and_verify())
        sleep(2)
        self.assertTrue(self.stdby.check_gp_segment_config(),'standby master not cofigured')
        self.assertTrue(self.stdby.check_pg_stat_replication(),'standby not in replication status')
        self.assertTrue(self.stdby.check_standby_processes(), 'standby processes not running')
        (rc, output) = self.gputil.run(command = 'ps -ef|grep "wal sender "|grep -v grep')
        self.assertIsNotNone(output)

    def test_gpstart_master_only(self):
        """
        tag
        """
        self.gputil.check_and_stop_gpdb()
        (rc, stdout) = self.gputil.run('export GPSTART_INTERNAL_MASTER_ONLY=1; '
                                   'gpstart -a -m ')
        self.assertIn(rc, (0, 1))
        self.assertTrue(self.gputil.gpstart_and_verify())
        (rc,output) = self.gputil.run('PGDATABASE=template1 '
                                  "PGOPTIONS='-c gp_session_role=utility' "
                                  'psql')
        self.assertEqual(rc, 0)
        (rc, output) = self.gputil.run('psql template1')
        # should fail due to master only mode
        self.assertEqual(rc, 2)
        self.gputil.run('gpstop -a -m')
        self.gputil.run('gpstart -a')


    def test_gpstart_restricted_mode_master(self):
        """Test -R option with standby."""

        self.gputil.check_and_stop_gpdb()
        (rc, stdout) = self.gputil.run('gpstart -a -R')
        self.assertIn(rc, (0, 1))
        self.assertTrue(self.gputil.gpstart_and_verify())
        (rc,output) = self.gputil.run(command = 'psql template1')
        self.assertIn(rc, (0, 1))
        self.gputil.run('gpstop -ar')


    def test_gpstart_master_w_timeout(self):
        """Test -t option with standby."""

        self.gputil.check_and_stop_gpdb()
        (rc, output) = self.gputil.run('gpstart -a -t 30')
        self.assertIn(rc, (0, 1))
        self.assertTrue(self.gputil.gpstart_and_verify())
        self.gputil.run('gpstop -ar')

    def test_gpstart_no_standby(self):
        """Test -y with standby configured."""

        self.gputil.check_and_stop_gpdb()
        (rc, stdout) = self.gputil.run('gpstart -a -y')
        self.assertIn(rc, (0, 1))
        self.assertTrue(self.gputil.gpstart_and_verify())
        self.assertFalse(self.stdby.check_standby_processes(),
                         'gpstart without standby failed, standby was running')
        self.gputil.run('gpstop -ar')

    def test_gpstart_wo_standby(self):
        """Test -y without standby configured."""

        self.gputil.remove_standby()
        self.gputil.check_and_stop_gpdb()
        (rc, stdout) = self.gputil.run('gpstart -a -y')
        self.assertIn(rc, (0, 1))
        self.assertTrue(self.gputil.gpstart_and_verify())
        self.assertFalse(self.stdby.check_standby_processes(), 'standby processes presented')
        self.gputil.run('gpstop -ar')

    """
    Gpstart, test case in failover mode
    """

    def test_gpstart_master_only_after_failover(self):
        """
        for test purpose, failing back to old master should
              remove standby from primary after activate standby
        """
        tinctest.logger.info("start master only with -m option after failover")
        activatestdby = GpactivateStandby()
        standby_host = activatestdby.get_current_standby()
        standby_mdd = activatestdby.get_standby_dd()
        standby_port = activatestdby.get_standby_port()
        activatestdby.activate()
        self.stdby._run_remote_command(standby_host,command = 'gpstop -a')
        stdout = self.stdby._run_remote_command(standby_host,command = 'export  GPSTART_INTERNAL_MASTER_ONLY=1; gpstart -a -m')
        self.assertNotRegexpMatches(stdout,"ERROR","Start master only after failover failed")
        self.assertTrue(self.gputil.gpstart_and_verify(master_dd = standby_mdd, host = standby_host))
        self.stdby._run_remote_command(standby_host,command = 'gpstop -a -m')
        self.gputil.run(command = 'gpstop -ar')
        self.gputil.failback_to_original_master(self.origin_mdd, standby_host, standby_mdd, standby_port)

    def test_gpstart_master_after_failover(self):
        """
        failover, start from new master, then recover the cluster back to
        have the old master active.
        """
        tinctest.logger.info("failover, and run gpstart master test")
        self.gputil.check_and_start_gpdb()
        activatestdby = GpactivateStandby()
        standby_host = activatestdby.get_current_standby()
        standby_mdd = activatestdby.get_standby_dd()
        standby_port = activatestdby.get_standby_port()
        activatestdby.activate()
        self.stdby._run_remote_command(standby_host, command = 'gpstop -a')
        stdout = self.stdby._run_remote_command(standby_host,command = 'gpstart -a')
        self.assertNotRegexpMatches(stdout,"FATAL","ERROR")
        self.assertTrue(self.gputil.gpstart_and_verify(master_dd = standby_mdd, host = standby_host))
        self.gputil.failback_to_original_master(self.origin_mdd, standby_host, standby_mdd, standby_port)

    def test_gpstart_original_master_after_promote(self):
        """
        failover, start from new master, then recover the cluster back to
        have the old master active.
        """
        tinctest.logger.info("activate and run gpstart for original master")
        activatestdby = GpactivateStandby()
        standby_host = activatestdby.get_current_standby()
        standby_mdd = activatestdby.get_standby_dd()
        standby_port = activatestdby.get_standby_port()
        activatestdby.activate()
        (rc, stdout) = self.gputil.run('gpstart -a -v')
	self.gputil.run('pg_controldata %s' % self.origin_mdd)
	self.stdby._run_remote_command(standby_host, command = 'pg_controldata %s' % standby_mdd)
        self.assertNotEqual(rc, 0)
        # This below error message comes from gpstart product code (if its modified change it here as well.)
        self.assertRegexpMatches(stdout,"Standby activated, this node no more can act as master.")
        self.gputil.failback_to_original_master(self.origin_mdd, standby_host, standby_mdd, standby_port)
예제 #11
0
class WalReplKillProcessTestCase(TINCTestCase):

    # this is not hard code, will be updated
    stdby_host = 'localhost'
    stdby_port = '5432'

    def __init__(self, methodName):
        self.gphome = os.environ.get('GPHOME')
        self.pgport = os.environ.get('PGPORT')
        self.pgdatabase = os.environ.get('PGDATABASE')
        self.stdby_host = 'localhost'
        self.master_dd = os.environ.get('MASTER_DATA_DIRECTORY')
        self.pgutil = GpUtility()
        self.stdby = StandbyVerify()
        super(WalReplKillProcessTestCase, self).__init__(methodName)

    def killProcess_byPid(self, signal=9, pid_toKill=[], host="localhost"):
        pid_list = ""
        for pid in pid_toKill:
            pid_list = pid_list + " " + str(pid)

        kill_cmd = "%s/bin/gpssh -h %s -e 'kill -%s  %s'" % (
            os.environ.get('GPHOME'), host, signal, pid_list)
        (rc, result) = self.pgutil.run(kill_cmd)
        if rc == 0:
            tinctest.logger.info("Process killed, %s" % result)
            return True
        else:
            tinctest.logger.error(
                "Killing process error, Status Code non zero, cmd: %s\n" %
                kill_cmd)
            return False

    def kill_walstartup(self):
        gpstdby = GpinitStandby()
        stdby_host = gpstdby.get_standbyhost()
        activate_stdby = GpactivateStandby()
        stdby_port = activate_stdby.get_standby_port()
        pid_list = []
        startup_pid = self.pgutil.get_pid_by_keyword(host=stdby_host,
                                                     pgport=stdby_port,
                                                     keyword="startup process")
        if int(startup_pid) == -1:
            tinctest.logger.error("error:startup process does not exist!")
            return False
        else:
            pid_list.append(startup_pid)
            self.killProcess_byPid(pid_toKill=pid_list, host=stdby_host)

    def kill_walreceiver(self):
        gpstdby = GpinitStandby()
        stdby_host = gpstdby.get_standbyhost()
        activate_stdby = GpactivateStandby()
        stdby_port = activate_stdby.get_standby_port()
        pid_list = []
        walreceiver_pid = self.pgutil.get_pid_by_keyword(
            host=stdby_host, pgport=stdby_port, keyword="wal receiver process")
        if int(walreceiver_pid) == -1:
            tinctest.logger.error(
                "error: wal receiver process does not exist!")
            return False
        else:
            pid_list.append(walreceiver_pid)
            self.killProcess_byPid(pid_toKill=pid_list, host=stdby_host)

    def kill_walsender_check_postmaster_reset(self):
        pid_list = []
        walsender_old_pid = self.pgutil.get_pid_by_keyword(
            pgport=self.pgport, keyword="wal sender process")
        if int(walsender_old_pid) == -1:
            tinctest.logger.error(
                "error: process wal sender does not exist on host")
            return False
        else:
            pid_list.append(walsender_old_pid)
            self.killProcess_byPid(pid_toKill=pid_list)
        sleep(2)
        walsender_new_pid = self.pgutil.get_pid_by_keyword(
            pgport=self.pgport, keyword="wal sender process")
        if walsender_old_pid == walsender_new_pid:
            raise Exception(
                "Killing walsender failed to force postmaster reset")
        else:
            return True

    def kill_transc_backend_check_reset(self):
        dict_process = {
            'stats collector process': -1,
            'writer process': -1,
            'checkpointer process': -1,
            'seqserver process': -1,
            'ftsprobe process': -1,
            'sweeper process': -1,
            'wal sender process': -1
        }
        for process in dict_process:
            pid = self.pgutil.get_pid_by_keyword(pgport=self.pgport,
                                                 keyword=process)
            dict_process[process] = pid
        self.kill_transc_backend()
        for process in dict_process:
            pid = self.pgutil.get_pid_by_keyword(pgport=self.pgport,
                                                 keyword=process)
            delay = 1
            while dict_process.get(process) == pid and delay < 5:
                pid = self.pgutil.get_pid_by_keyword(pgport=self.pgport,
                                                     keyword=process)
                sleep(1)
                delay = delay + 1
            if delay == 5:
                tinctest.logger.error(
                    "Killing transaction backend process failed to force postmaster reset: %s"
                    % process)
                raise Exception(
                    "Killing transaction backend process failed to force postmaster reset child process"
                )

    def kill_transc_backend(self):
        pid_list = []
        sql = "SELECT procpid FROM pg_stat_activity WHERE datname='{0}' AND current_query like 'INSERT INTO%'".format(
            self.pgdatabase)
        tinctest.logger.info(
            "running sql command to get transaction backend process: ---  %s" %
            sql)
        procid = PSQL.run_sql_command(sql,
                                      flags='-q -t',
                                      dbname=self.pgdatabase)
        count = 1
        while not procid.strip() and count < 5:
            sleep(1)
            count += 1
            procid = PSQL.run_sql_command(sql,
                                          flags='-q -t',
                                          dbname=self.pgdatabase)
        if procid.strip():
            tinctest.logger.info("got procid to kill: %s " % procid)
            pid_list.append(procid)
            self.killProcess_byPid(pid_toKill=pid_list)
        else:
            tinctest.logger.error("There is no active backend process")

    def check_stdby_stop(self):
        gpstdby = GpinitStandby()
        stdby_host = gpstdby.get_standbyhost()
        activate_stdby = GpactivateStandby()
        stdby_port = activate_stdby.get_standby_port()
        master_pid = self.pgutil.get_pid_by_keyword(host=stdby_host,
                                                    pgport=stdby_port,
                                                    keyword="master",
                                                    option="bin")
        if int(master_pid) != -1:
            raise Exception("standby should stop but failed!")

    def start_stdby(self):
        gpstdby = GpinitStandby()
        stdby_host = gpstdby.get_standbyhost()
        stdby_dbid = self.get_standby_dbid()
        activate_stdby = GpactivateStandby()
        stdby_mdd = activate_stdby.get_standby_dd()
        stdby_port = activate_stdby.get_standby_port()
        cmd = "pg_ctl -D %s -o '-p %s --gp_dbid=%s --gp_num_contents_in_cluster=2 --silent-mode=true -i -M master --gp_contentid=-1 -E' start &" % (
            stdby_mdd, stdby_port, stdby_dbid)
        self.run_remote(stdby_host, cmd, stdby_port, stdby_mdd)

    def run_remote(self, standbyhost, rmt_cmd, pgport='', standbydd=''):
        '''Runs remote command and returns rc, result '''
        export_cmd = "source %s/greenplum_path.sh;export PGPORT=%s;export MASTER_DATA_DIRECTORY=%s" % (
            self.gphome, pgport, standbydd)
        remote_cmd = "gpssh -h %s -e \"%s; %s\"" % (standbyhost, export_cmd,
                                                    rmt_cmd)
        cmd = Command(name='Running Remote command', cmdStr='%s' % remote_cmd)
        tinctest.logger.info(" %s" % cmd)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        return result.rc, result.stdout

    def check_mirror_seg(self):
        dbstate = DbStateClass('run_validation')
        dbstate.check_mirrorintegrity(master=True)

    def get_standby_dbid(self):
        std_sql = "select dbid from gp_segment_configuration where content='-1' and role='m';"
        standby_dbid = PSQL.run_sql_command(std_sql,
                                            flags='-q -t',
                                            dbname='template1')
        return standby_dbid.strip()

    def run_transaction_backend(self):
        tinctest.logger.info("local path for backend.sql is %s" %
                             local_path(''))
        for file in os.listdir(local_path('')):
            if fnmatch.fnmatch(file, 'backend.sql'):
                PSQL.run_sql_file(local_path(file))

    def get_down_segment(self):
        query = 'select * from gp_segment_configuration where mode <> \'s\' and status <>\'u\''
        result = PSQL.run_sql_command(query, flags='-q -t', dbname='template1')
        return result.strip()

    def check_gpdb_status(self):
        down_segments = self.get_down_segment()
        self.assertEqual(down_segments, '')

    def gpstart_helper(self):
        '''helper method to run in scenario test'''
        (rc, result) = self.pgutil.run('gpstart -a')
        self.assertIn(rc, (0, 1))

    def gpstop_helper(self):
        '''helper method to run in scenario test'''
        cmd = Command('run gpstop', cmdStr='gpstop -a')
        cmd.run(validateAfter=True)

    def gpinitstandby_helper(self):
        '''helper method to create a new standby'''
        self.pgutil.install_standby()

    def removestandby_helper(self):
        ''' helper method to remove standby'''
        self.pgutil.remove_standby()

    def verify_standby_sync(self):
        if (self.stdby.check_gp_segment_config()) and (
                self.stdby.check_pg_stat_replication()) and (
                    self.stdby.check_standby_processes()):
            return True
        else:
            raise Exception('standby and master out of sync!')

    def kill_standby_postmaster(self):
        pid_list = []
        delay = 0
        postmaster_pid = self.pgutil.get_pid_by_keyword(
            host=WalReplKillProcessTestCase.stdby_host,
            pgport=WalReplKillProcessTestCase.stdby_port,
            keyword="master",
            option="bin")
        while int(postmaster_pid) == -1 and delay < 20:
            sleep(1)
            delay = delay + 1
            postmaster_pid = self.pgutil.get_pid_by_keyword(
                host=WalReplKillProcessTestCase.stdby_host,
                pgport=WalReplKillProcessTestCase.stdby_port,
                keyword="master",
                option="bin")
        if int(postmaster_pid) == -1 or delay == 20:
            tinctest.logger.error(
                "error: standby postmaster process does not exist!")
            return False
        else:
            pid_list.append(postmaster_pid)
            return self.killProcess_byPid(
                pid_toKill=pid_list,
                host=WalReplKillProcessTestCase.stdby_host)

    def initial_setup(self):
        keyword = 'rh55-qavm65'
        config = GPDBConfig()
        (seg_host,
         seg_port) = config.get_hostandport_of_segment(psegmentNumber=0,
                                                       pRole='p')
        cur_path = local_path('')
        dir1 = os.path.join(cur_path, 'dml', 'sql',
                            'insert_from_external.sql.in')
        dir2 = os.path.join(cur_path, 'dml', 'sql', 'insert_from_external.sql')
        dir3 = os.path.join(cur_path, 'dml', 'expected',
                            'insert_from_external.ans.in')
        dir4 = os.path.join(cur_path, 'dml', 'expected',
                            'insert_from_external.ans')

        f1 = open(dir1, 'r')
        f2 = open(dir2, 'w')
        f3 = open(dir3, 'r')
        f4 = open(dir4, 'w')
        for line in f1:
            f2.write(line.replace(keyword, seg_host))
        f1.close()
        f2.close()

        for line in f3:
            f4.write(line.replace(keyword, seg_host))
        f3.close()
        f4.close()

        dir5 = os.path.join(cur_path, 'dml', 'sql',
                            'insert_with_gpload.sql.in')
        dir6 = os.path.join(cur_path, 'dml', 'sql', 'insert_with_gpload.sql')
        yaml_path = local_path('dml/sql/config/gpl.yaml')
        f5 = open(dir5, 'r')
        f6 = open(dir6, 'w')
        for line in f5:
            f6.write(line.replace('gpl.yaml', yaml_path))
        f5.close()
        f6.close()

        dir7 = os.path.join(cur_path, 'dml', 'sql', 'config', 'gpl.yaml.in')
        dir8 = os.path.join(cur_path, 'dml', 'sql', 'config', 'gpl.yaml')
        f7 = open(dir7, 'r')
        f8 = open(dir8, 'w')
        for line in f7:
            if 'DATABASE' in line:
                f8.write(line.replace('tangp3', os.environ.get('PGDATABASE')))
            elif 'USER' in line:
                f8.write(line.replace('tangp3', os.environ.get('USER')))
            elif 'HOST' in line:
                f8.write(line.replace('rh55-qavm61', socket.gethostname()))
            elif 'PORT' in line and '5432' in line:
                f8.write(line.replace('5432', os.environ.get('PGPORT')))
            elif 'mydata' in line:
                f8.write(
                    line.replace('mydata',
                                 local_path('dml/sql/gpload/mydata')))
            else:
                f8.write(line)
        f7.close()
        f8.close()

        dir9 = os.path.join(cur_path, 'dml', 'expected',
                            'insert_with_gpload.ans.in')
        dir10 = os.path.join(cur_path, 'dml', 'expected',
                             'insert_with_gpload.ans')
        f9 = open(dir9, 'r')
        f10 = open(dir10, 'w')
        for line in f9:
            f10.write(line.replace('gpl.yaml', yaml_path))
        f9.close()
        f10.close()

        dir11 = os.path.join(cur_path, 'dml', 'sql',
                             'select_from_copy_table.sql.in')
        dir12 = os.path.join(cur_path, 'dml', 'sql',
                             'select_from_copy_table.sql')
        f11 = open(dir11, 'r')
        f12 = open(dir12, 'w')
        for line in f11:
            if 'tenk.data' in line:
                f12.write(
                    line.replace('tenk.data',
                                 local_path('dml/sql/_data/tenk.data')))
            else:
                f12.write(line)
        f11.close()
        f12.close()

        dir13 = os.path.join(cur_path, 'dml', 'expected',
                             'select_from_copy_table.ans.in')
        dir14 = os.path.join(cur_path, 'dml', 'expected',
                             'select_from_copy_table.ans')
        f13 = open(dir13, 'r')
        f14 = open(dir14, 'w')
        for line in f13:
            if 'tenk.data' in line:
                f14.write(
                    line.replace('tenk.data',
                                 local_path('dml/sql/_data/tenk.data')))
            else:
                f14.write(line)
        f13.close()
        f14.close()

        external_table = local_path('dml/sql/_data/quote.csv')
        clean_file = 'rm -rf /tmp/quote.csv'
        rmt_cmd = "gpssh -h %s -e '%s' " % (seg_host, clean_file)
        cmd = Command(name='Running a remote command', cmdStr=rmt_cmd)
        cmd.run(validateAfter=False)
        command = 'scp %s %s:/tmp' % (external_table, seg_host)
        cmd = Command(name='run %s' % command, cmdStr='%s' % command)
        try:
            cmd.run(validateAfter=True)
        except Exception, e:
            tinctest.logger.error("Error running command %s\n" % e)
예제 #12
0
파일: test_gpstart.py 프로젝트: 50wu/gpdb
 def __init__(self,methodName):
     self.gputil = GpUtility()
     self.stdby = StandbyVerify()
     super(GpstartTestCase,self).__init__(methodName)
예제 #13
0
파일: __init__.py 프로젝트: 50wu/gpdb
class OODClass(MPPTestCase):
    

    def __init__(self,methodName):
        self.gp = GpactivateStandby()
        self.verify = StandbyVerify()
        self.config = GPDBConfig()
        self.disk = Disk()
        self.sdby_mdd = os.environ.get('MASTER_DATA_DIRECTORY')
        self.pgport = os.environ.get('PGPORT')
        super(OODClass,self).__init__(methodName)

    def initiate_standby(self):
        self.gp.create_standby(local='no')


    def check_standby(self):
        self.assertFalse(self.verify.check_standby_processes())

    def get_standby_dbid(self):
       std_sql = "select dbid from gp_segment_configuration where content='-1' and role='m';"
       standby_dbid = PSQL.run_sql_command(std_sql, flags = '-q -t', dbname= 'template1')
       return standby_dbid.strip()

    def restart_standby(self):
        sdby_host =  self.config.get_master_standbyhost()
        stdby_dbid = self.get_standby_dbid()
        cmd="pg_ctl -D %s -o '-p %s --gp_dbid=%s --gp_num_contents_in_cluster=2 --silent-mode=true -i -M master --gp_contentid=-1 -x 0 -E' start &"%(self.sdby_mdd, self.pgport, stdby_dbid)
        self.assertTrue(self.gp.run_remote(sdby_host,cmd, self.pgport, self.sdby_mdd))
        self.assertTrue(self.verify.check_standby_processes())

    def check_diskusage(self, host):  # This now checks for only /data
        (rc, result) = self.disk.get_disk_usage(host, '/data')
        if rc != 0:
            raise Exception ("The specified mount /data is not present for the device")
        else:
            available_usage = result
            return available_usage

    def _fill(self, filename, host):
        cmd_prefix = "ssh " +host+ " \""
        cmd_postfix = "\""
        location = '/data'
        if not os.path.isdir('%s/diskfill/' % location):
            os.makedirs('%s/diskfill/' % location)
        cmd_str = cmd_prefix + "dd if=/dev/zero bs=16384K count=2000 of=" +location+ "/diskfill/" + filename +cmd_postfix
        cmd = Command(name='Fill Disk', cmdStr=cmd_str)
        tinctest.logger.info(" %s" % cmd)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        if result.rc !=0:
            tinctest.logger.error('disk fill not working. Its already full')

    
    def filldisk(self):
        host =  self.config.get_master_standbyhost()
        disk_usage = self.check_diskusage(host)
        i = 0
        while(int(disk_usage.strip()) >1000000):
            filename = 'new_space_%s' % i
            self._fill(filename, host)
            i +=1
            disk_usage = self.check_diskusage(host)

    def remove_fillfiles(self, filename, host):
        location = '/data'
        cmd_str = "ssh %s rm %s/diskfill/%s*" % (host,location, filename)
        cmd = Command(name='Remove fill files', cmdStr=cmd_str)
        tinctest.logger.info(" %s" % cmd)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        if result.rc !=0:
            raise Exception('Unable to delete the fill files')
        return

        
    def cleanup(self):
        host =  self.config.get_master_standbyhost()
        self.remove_fillfiles('new_space', host)    
        #Recover segemnts in case segments and standby were on the same host
        cmd = Command(name='gprecoverseg', cmdStr='gprecoverseg -a')
        tinctest.logger.info(" %s" % cmd)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        if result.rc !=0:
            raise Exception('gprecoverseg failed')
        while(self.config.is_not_insync_segments() == False):
                tinctest.logger.info('Waiting for DB to be in sync')    
예제 #14
0
class GpinitStandby(object):
    '''Class for gpinitstandby operations 
       Disclaimer: Some of these may repeat with the mpp/lib version'''
    def __init__(self):
        self.stdby = StandbyVerify()
        self.runmixin = StandbyRunMixin()
        self.runmixin.createdb(dbname='walrepl')
        self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
        self.config = GPDBConfig()
        self.pgutil = GpUtility()
        self.host = socket.gethostname()

    def run(self, option=''):
        '''Runs gpinitstandby and returns True if successfull '''
        gpinitstandby_cmd = 'gpinitstandby -a %s' % option
        cmd = Command(name='Running Gpinitstandby',
                      cmdStr="%s" % gpinitstandby_cmd)
        tinctest.logger.info(" %s" % cmd)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        if result.rc != 0:
            return False
        return True

    def verify_gpinitstandby(self, primary_pid):
        '''Verify the presence of standby in recovery mode '''
        if (self.stdby.check_gp_segment_config()) and (
                self.stdby.check_pg_stat_replication()) and (
                    self.stdby.check_standby_processes()
                ) and self.compare_primary_pid(primary_pid):
            return True
        return False

    def get_masterhost(self):
        std_sql = "select hostname from gp_segment_configuration where content=-1 and role='p';"
        master_host = PSQL.run_sql_command(std_sql,
                                           flags='-q -t',
                                           dbname='postgres')
        return master_host.strip()

    def get_standbyhost(self):
        std_sql = "select hostname from gp_segment_configuration where content='-1' and role='m';"
        standby_host = PSQL.run_sql_command(std_sql,
                                            flags='-q -t',
                                            dbname='postgres')
        return standby_host.strip()

    def get_filespace_location(self):
        fs_sql = "select fselocation from pg_filespace_entry where fselocation like '%fs_walrepl_a%' and fsedbid=1;"
        filespace_loc = PSQL.run_sql_command(fs_sql,
                                             flags='-q -t',
                                             dbname='postgres')
        return filespace_loc.strip()

    def get_standbyhostnode(self):
        '''
        Function used to obtain the hostname of one of the segment node inorder to use it as the standby master node" 
        @return : returns the hostname of the segment node which can be used as the standby master node
        '''
        hostlist = self.config.get_hosts()
        standby = ''
        for host in hostlist:
            if host.strip() != self.host:
                standby = host.strip()
        if len(standby) > 0:
            return standby
        else:
            tinctest.logger.error(
                'No segment host other than master available to have remote standby'
            )

    def get_primary_pid(self):
        pid = self.pgutil.get_pid_by_keyword(pgport=os.environ.get('PGPORT'),
                                             keyword=self.mdd)
        if int(pid) == -1:
            raise WalReplException(
                'Unable to get pid of primary master process')
        else:
            return int(pid)

    def compare_primary_pid(self, initial_pid):
        final_pid = self.get_primary_pid()
        if initial_pid == final_pid:
            return True
        return False

    def create_dir_on_standby(self, standby, location):
        fs_cmd = "gpssh -h %s -e 'rm -rf %s; mkdir -p %s' " % (
            standby, location, location)
        cmd = Command(
            name='Make dierctory on standby before running the command',
            cmdStr=fs_cmd)
        tinctest.logger.info('%s' % cmd)
        cmd.run(validateAfter=True)
        result = cmd.get_results()
        if result.rc != 0:
            raise WalReplException('Unable to create directory on standby')
        else:
            return True

    def initstand_by_with_default(self):
        master_host = self.get_masterhost()
        gp_cmd = "/bin/bash -c 'gpinitstandby -s %s'" % (master_host)
        cmd = Command(name='Running the command', cmdStr=gp_cmd)
        tinctest.logger.info('%s' % cmd)
        cmd.run(validateAfter=False)
        sleep(2)
        result = cmd.get_results()
        lines = result.stdout.splitlines()
        for line in lines:
            if 'Data directory already exists' in line:
                return True
        return False

    def init_with_prompt(self, filespace_loc):
        standby = self.get_standbyhostnode()
        gp_cmd = "/bin/bash -c 'gpinitstandby -s %s -a'" % (standby)
        logfile = open(local_path('install2.log'), 'w')

        child = pexpect.spawn(gp_cmd, timeout=400)
        child.logfile = logfile
        sleep(5)
        check = child.expect(
            ['.* Enter standby filespace location for filespace.*', ' '])
        child.sendline(filespace_loc)

        sleep(10)
        check = child.expect(['.*Successfully created standby master.*'])
        if check != 0:
            tinctest.logger.error('gpinitstandy failed')
            return False
        child.close()
        return True
예제 #15
0
파일: __init__.py 프로젝트: 50wu/gpdb
class OOMClass(object):
    '''Class for methods required for OOM testcase'''

    standby_port = '5433'
    standby_dirname = 'newstandby'

    def __init__(self):
        self.gpinit = GpinitStandby()
        self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
        self.config = GPDBConfig()
        self.pgutil = GpUtility()
        self.verify = StandbyVerify()
        self.host = socket.gethostname()
        self.standby_loc = os.path.join(os.path.split(self.mdd)[0],
                                        self.standby_dirname)
        self.standby = Standby(self.standby_loc, self.standby_port)

    def create_standby(self):
        self.pgutil.clean_dir(self.host,self.standby_loc)
        self.gpinit.run(option = '-P %s -s %s -F pg_system:%s' % (self.standby_port, self.host, self.standby_loc))

    def setup_oom(self):
        # Build it before testing.
        thisdir = os.path.dirname(__file__)
        builddir = os.path.join(thisdir, 'lib')
        subprocess.check_call(['make', '-C', builddir, 'install'])

        #Copy oom_malloc.so and wrapper.sh to all the segment nodes
        for host in config.get_hosts() :
            if host.strip() == self.host :
                continue
            cmd = "gpssh -h %s -e 'mkdir -p %s'; scp %s/oom_malloc.so %s:%s/; scp %s/wrapper.sh %s:%s/" % (host.strip(), builddir, builddir, host.strip(), builddir, builddir, host.strip(), builddir)
            self.pgutil.run(cmd)

    def touch_malloc(self):
        # Touch file oom_malloc in standby directory
        cmd = 'touch %s/oom_malloc' % self.standby_loc
        self.pgutil.run(cmd)

    def startdb(self):
        (rc, result) = self.pgutil.run('gpstart -a --wrapper %s' % (local_path('lib/wrapper.sh')))
        if rc != 0 and 'Could not start standby master' in result :
            return False
        return True

    def restartdb(self):
        # Remove file oom_malloc from standby
        cmd = 'rm %s/oom_malloc' % self.standby_loc
        self.pgutil.run(cmd)
        (rc, result) = self.pgutil.run('gpstop -ar')
        if rc == 0 and (self.verify.check_pg_stat_replication()):
            return True
        return False

    def psql_and_oom(self):
        #Touch oom_malloc in standby_dir and issue PSQL : Check if processes are gone
        self.touch_malloc()
        PSQL.run_sql_command('Drop table if exists wal_oomt1;Create table wal_oomt1(a1 int, a2 text) with(appendonly=true);')
        sleep(2)
        if not (self.verify.check_standby_processes()):
            return True
        return False 

    def start_standby(self):
        # Remove oom_malloc and start standby : Check if all processes are back
        cmd = 'rm %s/oom_malloc' % self.standby_loc
        self.pgutil.run(cmd)
        res = self.standby.start()
        sleep(2)
        if (self.verify.check_standby_processes()) :
            return True
        return False
예제 #16
0
class WalReplKillProcessTestCase(TINCTestCase):
  
   # this is not hard code, will be updated  
   stdby_host = 'localhost'
   stdby_port = '5432'
 
   def __init__(self,methodName):
       self.gphome = os.environ.get('GPHOME')
       self.pgport = os.environ.get('PGPORT')
       self.pgdatabase = os.environ.get('PGDATABASE')
       self.stdby_host = 'localhost'
       self.master_dd = os.environ.get('MASTER_DATA_DIRECTORY')
       self.pgutil = GpUtility()
       self.stdby = StandbyVerify()
       super(WalReplKillProcessTestCase,self).__init__(methodName)


   def killProcess_byPid(self, signal=9, pid_toKill=[], host="localhost"):
       pid_list = ""
       for pid in pid_toKill:
           pid_list = pid_list + " " + str(pid)

       kill_cmd = "%s/bin/gpssh -h %s -e 'kill -%s  %s'" % (os.environ.get('GPHOME'), host, signal, pid_list)
       (rc, result) = self.pgutil.run(kill_cmd)
       if rc == 0:
           tinctest.logger.info("Process killed, %s" % result)
           return True
       else:
           tinctest.logger.error("Killing process error, Status Code non zero, cmd: %s\n"%kill_cmd)
           return False

   def kill_walstartup(self):
       gpstdby = GpinitStandby()
       stdby_host = gpstdby.get_standbyhost()
       activate_stdby = GpactivateStandby()
       stdby_port = activate_stdby.get_standby_port()
       pid_list = []
       startup_pid = self.pgutil.get_pid_by_keyword(host=stdby_host, pgport=stdby_port, keyword="startup process")
       if int(startup_pid) == -1:
           tinctest.logger.error("error:startup process does not exist!")
           return False
       else:
           pid_list.append(startup_pid)
           self.killProcess_byPid(pid_toKill=pid_list, host=stdby_host)

   def kill_walreceiver(self):
       gpstdby = GpinitStandby()
       stdby_host = gpstdby.get_standbyhost()
       activate_stdby = GpactivateStandby()
       stdby_port = activate_stdby.get_standby_port()
       pid_list = []
       walreceiver_pid = self.pgutil.get_pid_by_keyword(host=stdby_host, pgport=stdby_port, keyword="wal receiver process")
       if int(walreceiver_pid) == -1:
           tinctest.logger.error("error: wal receiver process does not exist!")
           return False
       else:  
           pid_list.append(walreceiver_pid)
           self.killProcess_byPid(pid_toKill=pid_list, host=stdby_host)


   def kill_walsender_check_postmaster_reset(self):
       pid_list = []
       walsender_old_pid=self.pgutil.get_pid_by_keyword(pgport=self.pgport,keyword="wal sender process")
       if int(walsender_old_pid) == -1:
           tinctest.logger.error("error: process wal sender does not exist on host")
           return False
       else:
           pid_list.append(walsender_old_pid)
           self.killProcess_byPid(pid_toKill=pid_list)
       sleep(2)
       walsender_new_pid=self.pgutil.get_pid_by_keyword(pgport=self.pgport,keyword="wal sender process")
       if walsender_old_pid == walsender_new_pid:
          raise Exception("Killing walsender failed to force postmaster reset")
       else:
          return True         

   def kill_transc_backend_check_reset(self):
       dict_process = { 'stats collector process': -1, 'writer process': -1,
                       'checkpointer process': -1,'seqserver process': -1,
                       'ftsprobe process': -1,'sweeper process': -1,'wal sender process': -1}
       for process in dict_process:
           pid = self.pgutil.get_pid_by_keyword(pgport=self.pgport,keyword=process)
           dict_process[process] = pid 
       self.kill_transc_backend()
       for process in dict_process:
           pid = self.pgutil.get_pid_by_keyword(pgport=self.pgport,keyword=process)
           delay = 1
           while dict_process.get(process) == pid and delay < 5:
                pid = self.pgutil.get_pid_by_keyword(pgport=self.pgport,keyword=process)
                sleep(1)
                delay = delay +1
           if delay == 5:
                tinctest.logger.error("Killing transaction backend process failed to force postmaster reset: %s"%process)
                raise Exception("Killing transaction backend process failed to force postmaster reset child process") 
 
   def kill_transc_backend(self):
       pid_list = []
       sql = "SELECT procpid FROM pg_stat_activity WHERE datname='{0}' AND current_query like 'INSERT INTO%'".format(self.pgdatabase)    
       tinctest.logger.info("running sql command to get transaction backend process: ---  %s"%sql)
       procid = PSQL.run_sql_command(sql, flags = '-q -t', dbname= self.pgdatabase)
       count = 1
       while not procid.strip() and  count < 5:
             sleep(1)
             count += 1
             procid = PSQL.run_sql_command(sql, flags = '-q -t', dbname= self.pgdatabase)
       if procid.strip():
             tinctest.logger.info("got procid to kill: %s " % procid)
             pid_list.append(procid)
             self.killProcess_byPid(pid_toKill = pid_list)
       else:
             tinctest.logger.error("There is no active backend process")   

    
   def check_stdby_stop(self):
       gpstdby = GpinitStandby()
       stdby_host = gpstdby.get_standbyhost()
       activate_stdby = GpactivateStandby()
       stdby_port = activate_stdby.get_standby_port()     
       master_pid = self.pgutil.get_pid_by_keyword(host=stdby_host, pgport=stdby_port, keyword="master", option = "bin")
       if int(master_pid) != -1:
           raise Exception("standby should stop but failed!")        


   def start_stdby(self):
       gpstdby = GpinitStandby()
       stdby_host = gpstdby.get_standbyhost()
       stdby_dbid = self.get_standby_dbid()
       activate_stdby = GpactivateStandby()
       stdby_mdd = activate_stdby.get_standby_dd()
       stdby_port = activate_stdby.get_standby_port()
       cmd="pg_ctl -D %s -o '-p %s --gp_dbid=%s --gp_num_contents_in_cluster=2 --silent-mode=true -i -M master --gp_contentid=-1 -x 0 -E' start &"%(stdby_mdd, stdby_port, stdby_dbid)
       self.run_remote(stdby_host,cmd,stdby_port,stdby_mdd)
       

   def run_remote(self, standbyhost, rmt_cmd, pgport = '', standbydd = ''):
       '''Runs remote command and returns rc, result '''
       export_cmd = "source %s/greenplum_path.sh;export PGPORT=%s;export MASTER_DATA_DIRECTORY=%s" % (self.gphome, pgport, standbydd) 
       remote_cmd = "gpssh -h %s -e \"%s; %s\"" % (standbyhost, export_cmd, rmt_cmd)
       cmd = Command(name='Running Remote command', cmdStr='%s' % remote_cmd)
       tinctest.logger.info(" %s" % cmd)
       cmd.run(validateAfter=False)
       result = cmd.get_results()
       return result.rc,result.stdout


   def check_mirror_seg(self):
       dbstate = DbStateClass('run_validation')
       dbstate.check_mirrorintegrity(master=True)


   def get_standby_dbid(self):
       std_sql = "select dbid from gp_segment_configuration where content='-1' and role='m';"
       standby_dbid = PSQL.run_sql_command(std_sql, flags = '-q -t', dbname= 'template1')
       return standby_dbid.strip()


   def run_transaction_backend(self):
       tinctest.logger.info("local path for backend.sql is %s"%local_path(''))
       for file in os.listdir(local_path('')):
           if fnmatch.fnmatch(file,'backend.sql'):
                PSQL.run_sql_file(local_path(file))
                

   def get_down_segment(self):
       query = 'select * from gp_segment_configuration where mode <> \'s\' and status <>\'u\''
       result = PSQL.run_sql_command(query, flags = '-q -t', dbname='template1')
       return result.strip()


   def check_gpdb_status(self):
       down_segments = self.get_down_segment()
       self.assertEqual(down_segments,'')


   def gpstart_helper(self):
       '''helper method to run in scenario test'''
       (rc, result) = self.pgutil.run('gpstart -a')
       self.assertIn(rc,(0,1))

   def gpstop_helper(self):
       '''helper method to run in scenario test'''
       cmd = Command('run gpstop', cmdStr = 'gpstop -a')
       cmd.run(validateAfter=True)

   def gpinitstandby_helper(self):
       '''helper method to create a new standby'''
       self.pgutil.install_standby()
 
   def removestandby_helper(self):
       ''' helper method to remove standby'''
       self.pgutil.remove_standby()

   def verify_standby_sync(self):
       if (self.stdby.check_gp_segment_config()) and (self.stdby.check_pg_stat_replication()) and (self.stdby.check_standby_processes()):   
           return True
       else:
           raise Exception('standby and master out of sync!')

   def kill_standby_postmaster(self):
       pid_list = []
       delay = 0
       postmaster_pid = self.pgutil.get_pid_by_keyword(host=WalReplKillProcessTestCase.stdby_host, pgport=WalReplKillProcessTestCase.stdby_port, keyword="master", option="bin")
       while int(postmaster_pid) == -1 and delay < 20:
           sleep(1)
           delay = delay + 1
           postmaster_pid = self.pgutil.get_pid_by_keyword(host=WalReplKillProcessTestCase.stdby_host, pgport=WalReplKillProcessTestCase.stdby_port, keyword="master", option="bin")
       if int(postmaster_pid) == -1 or delay == 20:
           tinctest.logger.error("error: standby postmaster process does not exist!")
           return False
       else:
           pid_list.append(postmaster_pid)
           return self.killProcess_byPid(pid_toKill=pid_list, host=WalReplKillProcessTestCase.stdby_host)                                                                                            

   def initial_setup(self):
       keyword = 'rh55-qavm65'
       config = GPDBConfig()
       (seg_host,seg_port) = config.get_hostandport_of_segment(psegmentNumber = 0, pRole = 'p')
       cur_path = local_path('')
       dir1 = os.path.join(cur_path, 'dml', 'sql','insert_from_external.sql.in')
       dir2 = os.path.join(cur_path, 'dml', 'sql','insert_from_external.sql')
       dir3 = os.path.join(cur_path, 'dml', 'expected','insert_from_external.ans.in')
       dir4 = os.path.join(cur_path, 'dml', 'expected','insert_from_external.ans')

       f1 = open(dir1,'r')
       f2 = open(dir2,'w')
       f3 = open(dir3,'r')
       f4 = open(dir4,'w')
       for line in f1:
           f2.write(line.replace(keyword,seg_host))
       f1.close()
       f2.close()

       for line in f3:
           f4.write(line.replace(keyword,seg_host))
       f3.close()
       f4.close()

       dir5 = os.path.join(cur_path, 'dml', 'sql','insert_with_gpload.sql.in')
       dir6 = os.path.join(cur_path, 'dml', 'sql','insert_with_gpload.sql')
       yaml_path = local_path('dml/sql/config/gpl.yaml')
       f5 = open(dir5,'r')
       f6 = open(dir6,'w')
       for line in f5:
           f6.write(line.replace('gpl.yaml',yaml_path))
       f5.close()
       f6.close()

       dir7 = os.path.join(cur_path,'dml','sql','config','gpl.yaml.in')
       dir8 = os.path.join(cur_path,'dml','sql','config','gpl.yaml')
       f7 = open(dir7,'r')
       f8 = open(dir8,'w')
       for line in f7:
           if 'DATABASE' in line:
               f8.write(line.replace('tangp3',os.environ.get('PGDATABASE')))
           elif 'USER' in line:
               f8.write(line.replace('tangp3',os.environ.get('USER')))
           elif 'HOST' in line:
               f8.write(line.replace('rh55-qavm61',socket.gethostname()))
           elif 'PORT' in line and '5432' in line:
               f8.write(line.replace('5432',os.environ.get('PGPORT')))
           elif 'mydata' in line:
               f8.write(line.replace('mydata',local_path('dml/sql/gpload/mydata')))
           else:
               f8.write(line)
       f7.close()
       f8.close()

       dir9 = os.path.join(cur_path,'dml','expected','insert_with_gpload.ans.in')
       dir10 = os.path.join(cur_path,'dml','expected','insert_with_gpload.ans')
       f9 = open(dir9,'r')
       f10 = open(dir10,'w')
       for line in f9:
           f10.write(line.replace('gpl.yaml',yaml_path))
       f9.close()
       f10.close()

       dir11 = os.path.join(cur_path,'dml','sql','select_from_copy_table.sql.in')
       dir12 = os.path.join(cur_path,'dml','sql','select_from_copy_table.sql')
       f11 = open(dir11,'r')
       f12 = open(dir12,'w')
       for line in f11:
           if 'tenk.data' in line:
               f12.write(line.replace('tenk.data',local_path('dml/sql/_data/tenk.data')))
           else:
               f12.write(line)
       f11.close()
       f12.close()

       dir13 = os.path.join(cur_path,'dml','expected','select_from_copy_table.ans.in')
       dir14 = os.path.join(cur_path,'dml','expected','select_from_copy_table.ans')
       f13 = open(dir13,'r')
       f14 = open(dir14,'w')
       for line in f13:
           if 'tenk.data' in line:
               f14.write(line.replace('tenk.data',local_path('dml/sql/_data/tenk.data')))
           else:
               f14.write(line)
       f13.close()
       f14.close()


       external_table = local_path('dml/sql/_data/quote.csv')
       clean_file = 'rm -rf /tmp/quote.csv'
       rmt_cmd = "gpssh -h %s -e '%s' " % (seg_host, clean_file)
       cmd = Command(name='Running a remote command', cmdStr = rmt_cmd)
       cmd.run(validateAfter=False)
       command = 'scp %s %s:/tmp'%(external_table,seg_host)
       cmd = Command(name='run %s'%command, cmdStr = '%s' % command)
       try:
           cmd.run(validateAfter=True)
       except Exception, e:
           tinctest.logger.error("Error running command %s\n" % e)
예제 #17
0
class GpinitStandby(object):
    '''Class for gpinitstandby operations 
       Disclaimer: Some of these may repeat with the mpp/lib version'''
    def __init__(self):
        self.stdby = StandbyVerify()
        self.runmixin = StandbyRunMixin()
        self.runmixin.createdb(dbname='walrepl')        
        self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
        self.config = GPDBConfig()
        self.pgutil = GpUtility()
        self.host = socket.gethostname()
 
    def run(self, option = ''):
        '''Runs gpinitstandby and returns True if successfull '''
        gpinitstandby_cmd = 'gpinitstandby -a %s' % option
        cmd = Command(name='Running Gpinitstandby', cmdStr="%s" % gpinitstandby_cmd)
        tinctest.logger.info(" %s" % cmd)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        if result.rc != 0:
            return False
        return True

    def verify_gpinitstandby(self, primary_pid):  
        '''Verify the presence of standby in recovery mode '''
        if (self.stdby.check_gp_segment_config()) and (self.stdby.check_pg_stat_replication()) and (self.stdby.check_standby_processes())and self.compare_primary_pid(primary_pid) :
            return True
        return False

    def get_masterhost(self):
        std_sql = "select hostname from gp_segment_configuration where content=-1 and role='p';"
        master_host = PSQL.run_sql_command(std_sql, flags = '-q -t', dbname= 'postgres')
        return master_host.strip()

    def get_standbyhost(self):
        std_sql = "select hostname from gp_segment_configuration where content='-1' and role='m';"
        standby_host = PSQL.run_sql_command(std_sql, flags = '-q -t', dbname= 'postgres')
        return standby_host.strip()

    def get_filespace_location(self):
        fs_sql = "select fselocation from pg_filespace_entry where fselocation like '%fs_walrepl_a%' and fsedbid=1;"
        filespace_loc = PSQL.run_sql_command(fs_sql, flags = '-q -t', dbname= 'postgres')
        return filespace_loc.strip()

    def get_standbyhostnode(self):
        '''
        Function used to obtain the hostname of one of the segment node inorder to use it as the standby master node" 
        @return : returns the hostname of the segment node which can be used as the standby master node
        '''
        hostlist = self.config.get_hosts()
        standby = ''
        for host in hostlist:
            if host.strip() != self.host:
                standby = host.strip()
        if len(standby) > 0 :
            return standby
        else:
            tinctest.logger.error('No segment host other than master available to have remote standby')

    def get_primary_pid(self):
        pid = self.pgutil.get_pid_by_keyword(pgport=os.environ.get('PGPORT'), keyword=self.mdd)
        if int(pid) == -1:
            raise WalReplException('Unable to get pid of primary master process')
        else:
            return int(pid)

    def compare_primary_pid(self, initial_pid):
        final_pid = self.get_primary_pid()
        if initial_pid == final_pid :
            return True
        return False

    def create_dir_on_standby(self, standby, location):
        fs_cmd = "gpssh -h %s -e 'rm -rf %s; mkdir -p %s' " % (standby, location, location)
        cmd = Command(name='Make dierctory on standby before running the command', cmdStr = fs_cmd)
        tinctest.logger.info('%s' % cmd)
        cmd.run(validateAfter=True)
        result = cmd.get_results()
        if result.rc != 0:
            raise WalReplException('Unable to create directory on standby')
        else:
            return True
      
    def initstand_by_with_default(self):
        master_host = self.get_masterhost()
        gp_cmd =  "/bin/bash -c 'gpinitstandby -s %s'" % (master_host)
        logfile = open(local_path('install.log'),'w')

        child = pexpect.spawn(gp_cmd, timeout=400)
        child.logfile = logfile
        sleep(2)
        check = child.expect(['.* Enter standby filespace location for filespace pg_system .*', ' '])
        if check != 0:
            child.close()

        l_file = open(local_path('install.log'),'r')
        lines = l_file.readlines()
        for line in lines:
            if 'default: NA' in line:
                return True
        return False

    def init_with_prompt(self,filespace_loc):
        standby = self.get_standbyhostnode() 
        gp_cmd =  "/bin/bash -c 'gpinitstandby -s %s -a'" % (standby)
        logfile = open(local_path('install2.log'),'w')

        child = pexpect.spawn(gp_cmd, timeout=400)
        child.logfile = logfile
        sleep(5)
        check = child.expect(['.* Enter standby filespace location for filespace.*', ' '])
        child.sendline(filespace_loc)

        sleep(10)
        check = child.expect(['.*Successfully created standby master.*'])
        if check != 0:
            tinctest.logger.error('gpinitstandy failed')
            return False
        child.close()
        return True
예제 #18
0
파일: __init__.py 프로젝트: xuegang/gpdb
class OOMClass(object):
    '''Class for methods required for OOM testcase'''

    standby_port = '5433'
    standby_dirname = 'newstandby'

    def __init__(self):
        self.gpinit = GpinitStandby()
        self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
        self.config = GPDBConfig()
        self.pgutil = GpUtility()
        self.verify = StandbyVerify()
        self.host = socket.gethostname()
        self.standby_loc = os.path.join(
            os.path.split(self.mdd)[0], self.standby_dirname)
        self.standby = Standby(self.standby_loc, self.standby_port)

    def create_standby(self):
        self.pgutil.clean_dir(self.host, self.standby_loc)
        self.gpinit.run(option='-P %s -s %s -F pg_system:%s' %
                        (self.standby_port, self.host, self.standby_loc))

    def setup_oom(self):
        # Build it before testing.
        thisdir = os.path.dirname(__file__)
        builddir = os.path.join(thisdir, 'lib')
        subprocess.check_call(['make', '-C', builddir, 'install'])

        #Copy oom_malloc.so and wrapper.sh to all the segment nodes
        for host in config.get_hosts():
            if host.strip() == self.host:
                continue
            cmd = "gpssh -h %s -e 'mkdir -p %s; scp %s/oom_malloc.so %s:%s/;scp %s/wrapper.sh %s:%s/'" % (
                host.strip(), builddir, builddir, host.strip(), builddir,
                builddir, host.strip(), builddir)
            self.pgutil.run(cmd)

    def touch_malloc(self):
        # Touch file oom_malloc in standby directory
        cmd = 'touch %s/oom_malloc' % self.standby_loc
        self.pgutil.run(cmd)

    def startdb(self):
        (rc, result) = self.pgutil.run('gpstart -a --wrapper %s' %
                                       (local_path('lib/wrapper.sh')))
        if rc != 0 and 'Could not start standby master' in result:
            return False
        return True

    def restartdb(self):
        # Remove file oom_malloc from standby
        cmd = 'rm %s/oom_malloc' % self.standby_loc
        self.pgutil.run(cmd)
        (rc, result) = self.pgutil.run('gpstop -ar')
        if rc == 0 and (self.verify.check_pg_stat_replication()):
            return True
        return False

    def psql_and_oom(self):
        #Touch oom_malloc in standby_dir and issue PSQL : Check if processes are gone
        self.touch_malloc()
        PSQL.run_sql_command(
            'Drop table if exists wal_oomt1;Create table wal_oomt1(a1 int, a2 text) with(appendonly=true);'
        )
        sleep(2)
        if not (self.verify.check_standby_processes()):
            return True
        return False

    def start_standby(self):
        # Remove oom_malloc and start standby : Check if all processes are back
        cmd = 'rm %s/oom_malloc' % self.standby_loc
        self.pgutil.run(cmd)
        res = self.standby.start()
        sleep(2)
        if (self.verify.check_standby_processes()):
            return True
        return False