Пример #1
0
 def test_LocalExecutionContext_uses_ampersand_multiple(self):
     self.subject = LocalExecutionContext(None)
     cmd = Command('test', cmdStr='ls /tmp')
     cmd.propagate_env_map['foo'] = 1
     cmd.propagate_env_map['bar'] = 1
     self.subject.execute(cmd)
     self.assertEquals("bar=1 && foo=1 && ls /tmp", cmd.cmdStr)
Пример #2
0
    def drop_database(dbname, retries=5, sleep_interval=5):
        """
        Execute dropdb against the given database.

        @type dbname: string
        @param dbname: Name of the database to be deleted

        @type retires: integer
        @param retries: Number of attempts to drop the database.

        @type sleep_interval: integer
        @param sleep_interval: Time in seconds between retry attempts

        @rtype: boolean
        @return: True if successful, False otherwise

        @raise PSQLException: When the database does not exist
        """
        # TBD: Use shell when available
        if not PSQL.database_exists(dbname):
            tinctest.logger.error("Database %s does not exist." % dbname)
            raise PSQLException("Database %s does not exist" % dbname)
        cmd = Command(name="drop database", cmdStr="dropdb %s" % (dbname))
        tinctest.logger.debug("Dropping database: %s" % cmd)
        count = 0
        while count < retries:
            cmd.run(validateAfter=False)
            result = cmd.get_results()
            tinctest.logger.debug("Output - %s" % result)
            if result.rc == 0 and not result.stderr:
                return True
            time.sleep(sleep_interval)
            count += 1
        return False
Пример #3
0
 def test_hawq_gpinitsystem(self):
     try:
         self.gpdbinit.run()
         cmd = Command(name='run gpstop', cmdStr='export MASTER_DATA_DIRECTORY=%s; gpstop -a' % (self.mdd))
         cmd.run()
     except:
         self.fail("Gpinitsystem Failed")                
Пример #4
0
def get_latest_full_ts_with_nbu(context):
    if context.dump_prefix:
        get_rpt_files_cmd = (
            "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=%sgp_dump_*.rpt"
            % (context.netbackup_service_host, context.dump_prefix)
        )
    else:
        get_rpt_files_cmd = (
            "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=gp_dump_*.rpt"
            % context.netbackup_service_host
        )

    cmd = Command("Query NetBackup server to get the list of report files backed up", get_rpt_files_cmd)
    cmd.run(validateAfter=True)
    files_list = cmd.get_results().stdout.strip().split("\n")

    for line in files_list:
        fname = line.strip()
        if fname == "":
            continue
        if context.backup_dir is not None and context.backup_dir not in fname:
            continue
        if ("No object matched the specified predicate" in fname) or ("No objects of the format" in fname):
            return None
        restore_file_with_nbu(context, path=fname)
        timestamp = get_full_ts_from_report_file(context, report_file=fname)
        logger.debug("Timestamp = %s" % timestamp)
        if timestamp is not None:
            return timestamp

    raise Exception("No full backup found for given incremental on the specified NetBackup server")
Пример #5
0
def impl(context, directory):
    names = ["Name", "Data", "Data for Name"]
    types = ["TABLE", "TABLE DATA", "EXTERNAL TABLE", "ACL", "CONSTRAINT", "COMMENT", "PROCEDURAL LANGUAGE", "SCHEMA", "AOSTORAGEOPTS"]
    master_dump_dir = directory if len(directory.strip()) != 0 else master_data_dir
    timestamp = context.backup_timestamp
    metadata_file = '%s/db_dumps/%s/gp_dump_1_1_%s.gz' % (master_dump_dir, timestamp[0:8], timestamp)
    tmp_metadata_file = '/tmp/behave_metadata_file'

    cmd = Command(name="Unzip conetnts of metadata dump file to temp file", cmdStr='zcat %s > %s' % (metadata_file, tmp_metadata_file))
    cmd.run(validateAfter=True)

    try:
        with open(tmp_metadata_file, 'r') as fd:
            lines = fd.readlines() 
            if len(lines) == 0:
                raise Exception('Metadata file has no data')
            for line in lines:
                if (line[:3] == comment_start_expr):
                    if (line.startswith(comment_expr) or line.startswith(comment_data_expr_a) or line.startswith(comment_data_expr_b)):
                        name_k, type_k, schema_k = get_comment_keys(line)
                        if (name_k not in names and type_k != "Type" and schema_k != "Schema"):
                            raise Exception("Unknown key in the comment line of the metdata_file '%s'. Please check and confirm if the key is correct" % (metadata_file))
                        name_v, type_v, schema_v = get_comment_values(line)
                        if (type_v not in types):
                            raise Exception("Value of Type in the comment line '%s' of the metadata_file '%s' does not fall under the expected list %s. Please check if the value is correct" %(type_v, metadata_file, types))
    finally:
        if os.path.exists(tmp_metadata_file):
            os.remove(tmp_metadata_file)
Пример #6
0
 def _run_remote_command(self, host, command):
     rmt_cmd = "gpssh -h %s -e '%s' " % (host, command)
     cmd = Command(name='Running a remote command', cmdStr = rmt_cmd)
     cmd.run(validateAfter=False)
     result = cmd.get_results()
     tinctest.logger.info('%s\n%s' %(rmt_cmd, result.stdout))
     return result.stdout
Пример #7
0
    def create_filespace(self, filespace):
        '''
        @param filespace: Filespace Name
        '''
        if self.exists(filespace) is True:
            tinctest.logger.info('Filespace %s exists' % filespace)
            return

        file1 = local_path(filespace)
        f1 = open(file1+".fs.config","w")
        f1.write('filespace:%s\n' % filespace)
        f1.write('fsysname:hdfs\n')
        fsrep = PSQL.run_sql_command("select fsrep from pg_filespace where fsname='dfs_system';", flags = '-t -q', dbname='postgres')
        f1.write('fsreplica:%s\n' % fsrep.strip())

        dfs_loc_cmd = "SELECT substring(fselocation from length('hdfs:// ') for (position('/' in substring(fselocation from length('hdfs:// ')))-1)::int) FROM pg_filespace pgfs, pg_filespace_entry pgfse  WHERE pgfs.fsname = 'dfs_system' AND fsedbid = 2 AND pgfse.fsefsoid=pgfs.oid ;"
        dfs_loc = PSQL.run_sql_command(dfs_loc_cmd,flags = '-t -q', dbname='postgres')
        for record in self.config.record:
            if record.content == -1:
                fileloc = '%s/hdfs_%s' % (os.path.split(record.datadir)[0], filespace)
                f1.write("%s:%s:%s/%s\n" % (record.hostname, record.dbid, fileloc, os.path.split(record.datadir)[1]))
                cmd = "gpssh -h %s -e 'rm -rf %s; mkdir -p %s'"  % (record.hostname, fileloc, fileloc)
                run_shell_command(cmd)
            else:
                f1.write("%s:%s:[%s/%s/%s]\n" % (record.hostname, record.dbid, dfs_loc.strip(), filespace, os.path.split(record.datadir)[1]))
        f1.close()
        filespace_cmd = '%s/bin/gpfilespace -c %s' % (self.gphome, f1.name)
        cmd = Command(name='Gpfilespace command', cmdStr="%s" % (filespace_cmd))
        tinctest.logger.info(" %s" % cmd)
        cmd.run(validateAfter=True)
        result = cmd.get_results()
        if result.rc != 0:
            raise GPfilespaceException('"gpfilespace creation filespace FAILED".  Output = %s ' % resutl.stdout)
Пример #8
0
def get_latest_full_ts_with_nbu(dbname, backup_dir, dump_prefix, netbackup_service_host, netbackup_block_size):
    if dump_prefix:
        get_rpt_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=%sgp_dump_*.rpt" % (netbackup_service_host, dump_prefix)
    else:
        get_rpt_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=gp_dump_*.rpt" % netbackup_service_host

    cmd = Command("Query NetBackup server to get the list of report files backed up", get_rpt_files_cmd)
    cmd.run(validateAfter=True)
    files_list = cmd.get_results().stdout.split('\n')

    for line in files_list:
        fname = line.strip()
        if fname == '':
            continue
        if backup_dir not in fname:
            continue
        if ("No object matched the specified predicate" in fname) or ("No objects of the format" in fname):
            return None
        restore_file_with_nbu(netbackup_service_host, netbackup_block_size, fname)
        timestamp = get_full_ts_from_report_file(dbname, fname, dump_prefix, netbackup_service_host=netbackup_service_host, netbackup_block_size=netbackup_block_size)
        logger.debug('Timestamp = %s' % timestamp)
        if timestamp is not None:
            return timestamp

    raise Exception('No full backup found for given incremental on the specified NetBackup server')
Пример #9
0
    def execute(self):
        entry = ValidateVerificationEntry(token = self.token).run()
        if not entry['verdone']:   
            raise WrongStateError("Only finished verification tasks may be cleaned up.")

        path = os.path.join(get_masterdatadir(), 'pg_verify', self.token)
        Command('cleanup', 'rm -rf %s' % path).run(validateAfter=True)
        #RemoveTree(path).run()

        to_clean = ValidateVerification(content = entry['vercontent'],
                                        primaries_only = False).run()
        pool = WorkerPool(min(len(to_clean), self.batch_default))
        for seg in to_clean:
            host = seg.getSegmentHostName()
            path = os.path.join(seg.getSegmentDataDirectory(), 'pg_verify', "*%s*" % self.token)
            cmd = Command('cleanup', 'rm -f %s' % path, remoteHost=host)
            pool.addCommand(cmd)

        logger.info('Waiting for clean commands to complete...')
        pool.wait_and_printdots(len(to_clean))

        for cmd in pool.getCompletedItems():
            res = cmd.get_results()
            if not res.wasSuccessful():
                logger.error('Failed to send cleanup on %s' % cmd.host)
                logger.error('Error: %s' % res.stderr)
                raise CleanVerificationError()
        RemoveVerificationEntry(token = self.token).run()
        logger.info('Verification %s has been cleaned.' % self.token)
Пример #10
0
    def test_ao_malloc_failure(self):
        """
        @product_version gpdb: [4.3.5.1 -]
        """
        PSQL.run_sql_command('DROP table if exists ao_read_malloc')
        PSQL.run_sql_command(
            'create table ao_read_malloc (a int) with (appendonly=true, compresstype=quicklz)'
        )
        PSQL.run_sql_command('insert into ao_read_malloc '
                             'select * from generate_series(1, 1000)')

        gpfaultinjector = Command(
            'fault injector', 'source $GPHOME/greenplum_path.sh; '
            'gpfaultinjector -f malloc_failure '
            '-y error -H ALL -r primary')
        gpfaultinjector.run()

        res = {'rc': 0, 'stdout': '', 'stderr': ''}
        PSQL.run_sql_command(
            sql_cmd='select count(*) from ao_read_malloc', results=res)
        logger.info(res)

        self.assertTrue("ERROR:  fault triggered" in res['stderr'])
        self.assertFalse(
            "ERROR:  could not temporarily connect to one or more segments" in
            res['stderr'])

        logger.info('Pass')
Пример #11
0
    def run_repair_script(self, repair_script_dir, dbname=None, alldb=True, online=False, testname=None, outputFile=None, host=None, port=None):
        '''
        @summary : Run the gpcehckcat repair script generated by gpcehckcat
        '''

        if not os.path.exists(repair_script_dir):
            repair_script_dir = '%s/%s' % (self.tinchome, repair_script_dir)

        tinctest.logger.debug('Using repair script dir ... %s' % repair_script_dir)
        repair_scripts = glob.glob(repair_script_dir + '/*.sh')

        ok = 0
        for repair_script in repair_scripts:
            repair_cmd = "/bin/bash %s" % str(repair_script).strip()
            tinctest.logger.info('Running repair script ... %s' % repair_cmd)
            if host and host not in (socket.gethostname(), 'localhost'):
                cmd = Command(name=' Running Gpcheckcat.. ', cmdStr = repair_cmd, ctxt=REMOTE, remoteHost=host)
            else:
                cmd = Command(name=' Running Gpcheckcat.. ', cmdStr = repair_cmd)
            cmd.run(validateAfter=False)
            result = cmd.get_results()
            # Get Error Code from running repair script
            if result.rc != 0:
                ok = result.rc

        if ok != 0:
            return False

        return True
Пример #12
0
def impl(context):
    cmd = """ps ux | grep "/bin/postgres \-D %s " | grep -v grep""" % (context.remote_mirror_datadir)
    cmd=Command(name='user command', cmdStr=cmd, ctxt=REMOTE, remoteHost=context.remote_mirror_segdbname)
    cmd.run(validateAfter=True)
    res = cmd.get_results()
    if not res.stdout.strip():
        raise Exception('Mirror segment "%s" not active on "%s"' % (context.remote_mirror_datadir, context.remote_mirror_segdbname))
Пример #13
0
    def run_gpfaultinjector(self, fault_type, fault_name):
        cmd_str = 'gpfaultinjector -s 1 -y {0} -f {1}'.format(
                        fault_type, fault_name)
        cmd = Command(cmd_str, cmd_str)
        cmd.run()

        return cmd.get_results()
Пример #14
0
 def check_gpfdist_process(self, wait=60, port=None, raise_assert=True):
     """
     Check for the gpfdist process
     Wait at least 60s until gpfdist starts, else raise an exception
     """
     if port is None:
         port = self.port
     count = 0
     # handle escape of string's quotation for localhost and remote host
     if self.host in ('127.0.0.1',socket.gethostbyname(socket.gethostname()),socket.gethostname(),'localhost'):
         cmdStr = "%s -ef | grep \'gpfdist -d %s -p %s\' | grep -v grep"%(self.ps_command, self.dir, port)
     else:
         cmdStr = 'gpssh -h %s -e "%s -ef | grep \'gpfdist -d %s -p %s\' |grep -v grep"'%(self.host, self.ps_command, self.dir, port)
     cmd = Command(self.name, cmdStr, self.ctxt, self.host)
     # run the command for 5 time
     while count < wait:
         cmd.run()      
         results = cmd.get_results()      
         if results.rc == 0:
             return True                
         count = count + 1
         time.sleep(1)
     if raise_assert:
         raise GPFDISTError("Could not start gpfdist process")
     else :
         return False
Пример #15
0
    def setUpClass(cls):
        # we need an empty db to run the tests
        tinctest.logger.info("recreate database wet using dropdb/createdb")
        cmd = Command('recreatedb', 'dropdb wet; createdb wet')
        cmd.run(validateAfter=False)

        cls.drop_roles()

        super(LegacyWETTestCase, cls).setUpClass()

        source_dir = cls.get_source_dir()
        config = GPDBConfig()
        host, _ = config.get_hostandport_of_segment(0)
        port = mppUtil.getOpenPort(8080)
        tinctest.logger.info("gpfdist host = {0}, port = {1}".format(host, port))

        cls.config = config

        data_dir = os.path.join(source_dir, 'data')
        cls.gpfdist = GPFDIST(port, host, directory=data_dir)
        cls.gpfdist.startGpfdist()

        # WET writes into this directory.
        data_out_dir = os.path.join(cls.gpfdist.getdir(), 'output')
        shutil.rmtree(data_out_dir, ignore_errors=True)
        os.mkdir(data_out_dir)
Пример #16
0
def impl(context):
    host = gethostname()
    psql_cmd = 'psql -U "gpadmin/kerberos-test" -h %s template1 -c """select 1;"""' % host
    cmd = Command(name='psql connection with kerberos user',
           cmdStr=psql_cmd)
    cmd.run(validateAfter=True)
    results = cmd.get_results()
Пример #17
0
 def remove_remote_symlink(self, host, datadir):
     datadir_root = os.path.dirname(datadir)
     segdir = os.path.basename(datadir)
     sysCmd = 'rm -f {datadir}; mv {datadir_root}/link/{segdir} {datadir_root}/{segdir}; rmdir {datadir_root}/link;'\
             .format(datadir_root=datadir_root, datadir=datadir, segdir=segdir)
     cmd = Command('remove symlinks and restore the data directory', cmdStr=sysCmd, ctxt=REMOTE, remoteHost=host)
     cmd.run(validateAfter=True)
Пример #18
0
    def setUp(self):
        # Remove standby if present.
        # Though the initial intention of tests was to verify
        # without depending on management utility scripts,
        # the reality after all is some other tests might have
        # left a standby and there is not good way other than
        # the gp management script to remove it.
        cmd_str = 'gpinitstandby -a -r'
        cmd = Command(name='gpinitstandby -r', cmdStr=cmd_str)
        tinctest.logger.info(cmd_str)
        cmd.run(validateAfter=False)

        # For each test case we create a fresh standby and start it.
        self.db_name = self.__class__.db_name
        self.standby = Standby(self.standby_datadir,
                                        self.standby_port)

        self.standby.stop()
        shutil.rmtree(self.basepath, True)
        try:
            os.makedirs(self.basepath)
        except OSError, e:
            if e.errno != 17:
                raise
            pass
Пример #19
0
 def check_gpfdist_process(self, wait=60, port=None, raise_assert=True):
     """
     Check for the gpfdist process
     Wait at least 60s until gpfdist starts, else raise an exception
     @var wait: wait at least 60s for gpfdist
     @var port: Port Number
     @var raise_assert: raise gpfdist error by default
     """
     if port is None:
         port = self.port
     process_started = False
     count = 0
     while (not process_started and count<wait):
         cmd_str = "%s -ef | grep \"gpfdist -p %s\" | grep -v grep" % (self.ps_command, port)
         cmd = Command(name='check for gpfdist', cmdStr=cmd_str)
         cmd.run()
         
         content = cmd.get_results().stdout
         if len(content)>0:
             if content.find("gpfdist -p %s" % port)>0:
                 process_started = self.is_gpfdist_connected(port)
                 if process_started:
                     return True
         count = count + 1
         time.sleep(1)
     if raise_assert:
         raise GpfdistError("Could not start gpfdist process")
     else:
         print "Could not start gpfdist process"
Пример #20
0
 def create_remote_symlink(self, host, datadir):
     datadir_root = os.path.dirname(datadir)
     segdir = os.path.basename(datadir)
     sysCmd = 'mkdir -p {datadir_root}/link; mv {datadir} {datadir_root}/link/{segdir}; ln -s {datadir_root}/link/{segdir} {datadir}'\
            .format(datadir_root=datadir_root, datadir=datadir, segdir=segdir)
     cmd = Command('create remote symlinks', cmdStr=sysCmd, ctxt=REMOTE, remoteHost=host)
     cmd.run(validateAfter=True)
Пример #21
0
def get_full_timestamp_for_incremental_with_nbu(netbackup_service_host, netbackup_block_size, incremental_timestamp):
    if dump_prefix:
        get_inc_files_cmd = (
            "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=%sgp_dump_*_increments"
            % (netbackup_service_host, dump_prefix)
        )
    else:
        get_inc_files_cmd = (
            "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=gp_dump_*_increments"
            % netbackup_service_host
        )

    cmd = Command("Query NetBackup server to get the list of increments files backed up", get_inc_files_cmd)
    cmd.run(validateAfter=True)
    files_list = cmd.get_results().stdout.split("\n")

    for line in files_list:
        fname = line.strip()
        restore_file_with_nbu(netbackup_service_host, netbackup_block_size, fname)
        contents = get_lines_from_file(fname)
        if incremental_timestamp in contents:
            full_timestamp = get_timestamp_from_increments_filename(fname)
            return full_timestamp

    return None
Пример #22
0
 def test_get_host_for_command_for_local_uses_local_hostname(self):
     cmd = Command('name', 'hostname') 
     cmd.run(validateAfter=True)
     hostname = cmd.get_results().stdout.strip()
     result = get_host_for_command(True, cmd)
     expected_result = hostname 
     self.assertEqual(result, expected_result)
Пример #23
0
def get_info():
    """
    Get the current platform
    @return: type platform of the current system
    @rtype : String
    """
    myos = platform.system()
    if myos == "Darwin":
        return 'OSX'
    elif myos == "Linux":
        if os.path.exists("/etc/SuSE-release"):
            return 'SUSE'
        elif os.path.exists("/etc/redhat-release"):
            cmd_str = "cat /etc/redhat-release"
            cmd = Command("run cat for RHEL version", cmd_str)
            cmd.run()
            result = cmd.get_results()
            msg = result.stdout
            if msg.find("5") != -1:
                return 'RHEL5'
            else:
                return 'RHEL6'
    elif myos == "SunOS":
        return 'SOL'
    return None
Пример #24
0
    def wait_kill_and_verify_dump_agent_on_master(self, datadir, wait_log_msg, verify_log_msg):
        """
        """
        status_file_prefix = self.dump_file_prefix + '_status_*_1_'

        cur_dump_date_folder = self.get_cur_date_as_dump_folder()
        log_dir = os.path.join(datadir, DEFAULT_DUMP_LOC, cur_dump_date_folder)

        last_timestamp = self.get_latest_matching_file(log_dir, status_file_prefix)
        if last_timestamp:
            tinctest.logger.info('The latest timestamp matched for file: %s is %s, wait for new status file' % (status_file_prefix, last_timestamp))
        else:
            tinctest.logger.info('Found no existing file matching %s, wait for new status file' % status_file_prefix)
        dump_agent_pid = self.get_dump_proc_pid(datadir)
        tinctest.logger.info("Obtained segment dump agent process id %s" % dump_agent_pid)

        self.backup_timestamp = self.get_latest_log_timestamp(log_dir, status_file_prefix, last_timestamp)
        log_file_path = self.get_latest_matching_file_path(log_dir, status_file_prefix)

        self.wait_for_log_msg(log_file_path, wait_log_msg)
        tinctest.logger.info("Crash segment dump agent with kill -9 %s" % dump_agent_pid)
        kill_cmd = Command(name = 'kill dump_agent', cmdStr='kill -9 %s' % dump_agent_pid)
        kill_cmd.run(validateAfter = True)

        self.wait_gpcrondump_exit()
        self.verify_dump_crash_detected(datadir, cur_dump_date_folder, verify_log_msg)
Пример #25
0
 def create_standby_master(self, gparr):
     standby_host = self.get_standby_host(gparr)
     if standby_host is None:
         raise Exception('Unable to get standby host')
     cmd = Command('create a standby master', cmdStr='gpinitstandby -s %s -a' % standby_host)
     cmd.run(validateAfter=True)
     return standby_host
Пример #26
0
    def GetSegmentInSync(self, sleepTime=60, repeatCnt=120, greenplum_path=""):
        """
        @param sleepTime: Number of seconds to sleep before retry
        @param repeatCnt: Number of times to repeat retry. Default is 2 hours
        @return: Return True when the number of segment servers that are in resync is 0 rows
        """
        inSync = ""
        for cnt in range(repeatCnt):
            data = ""
            try:
                cmd = "psql gptest -c \"SELECT dbid, content, role, preferred_role, status, mode, address, fselocation, port, replication_port FROM gp_segment_configuration, pg_filespace_entry where dbid = fsedbid and mode = 'r'\""
                if greenplum_path:
                    cmd = "%s %s" % (greenplum_path, cmd)
                # use Command instead of ShellCommand
                #rc, data = self.generalUtil.ShellCommand(cmd)
                generalUtil = Command(name='psql gptest -c',cmdStr=cmd)
                generalUtil.run()
                rc = generalUtil.get_results().rc
                data = generalUtil.get_results().stdout
                if rc == 0:
		            if True in ['(0 rows)' in x for x in data]:
			            return rc, True
                time.sleep(sleepTime)
            except Exception, e:
                traceback.print_exc()
                print "ERRORFOUND GetSegmentInSync %s" % (str(e))
                #PrettyPrint('ERRORFOUND GetSegmentInSync', data) TODO
                print 'ERRORFOUND GetSegmentInSync', data
Пример #27
0
 def __init__(self, query, dbname):
     self.dbname = dbname
     self.query = query
     self.result = 1
     self.completed = False
     self.halt = False
     Command.__init__(self, 'on unlock', 'on unlock', ctxt=None, remoteHost=None)
Пример #28
0
    def check_integrityresults(self):
        """
        Check gpverify results from the last token
        @return: True or False, -1 is still running
        """
        sql = "select vertoken from gp_verification_history order by 1 desc limit 1"
        out= PSQL.run_sql_command(sql, flags='-q -t', dbname='postgres')
        last_token = out.strip()

        if not last_token:
            return 0 # No records of gpverify

        cmd = Command(name='gpverify', cmdStr="gpverify --results --token %s" % (last_token))
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        state = result.stdout[len(result.stdout)-2]
        if state.find(self.GPVERIFY_STATE["success"])>0:
            tinctest.logger.info("gpverify at %s: Successful" % last_token)
            return True
        elif state.find(self.GPVERIFY_STATE["running"])>0:
            tinctest.logger.info("gpverify at %s: Running" % last_token)
            return -1
        elif state.find(self.GPVERIFY_STATE["fail"])>0:
            tinctest.logger.info("gpverify at %s: Failed" % last_token)
            return False
        else:
            tinctest.logger.info("gpverify has not start")
            return 0
Пример #29
0
    def test_autovacuum_signaling(self):
        """
        Raise the nextXid to oldest_frozenxid + autovacuum_freeze_max_age.
        Run a transaction.
        Ensure that no autovacuum daemon is started.
        """
        dburl = dbconn.DbURL()
        with dbconn.connect(dburl) as conn:
            oldest_xid = int(dbconn.execSQLForSingleton(conn, 'select get_oldest_xid()'))
            autovacuum_freeze_max_age = int(dbconn.execSQLForSingleton(conn, 'show autovacuum_freeze_max_age'))
            autovacuum_xid_limit = xid_sum(oldest_xid, autovacuum_freeze_max_age)
            logger.info('Raising master xid to autovacuum_xid_limit %d' % autovacuum_xid_limit)
            dbconn.execSQLForSingleton(conn, "select spoof_next_xid('%d'::xid)" % autovacuum_xid_limit)

        # A new connection to the postmaster, at this point, will ensure that we roll through
        # the ServerLoop and potentially fork an autovacuum process... if enabled.
        # Burn a transaction to trigger any undesirable behavior that we're disabling.
        with dbconn.connect(dburl) as conn:
            self.assertEqual(1, int(dbconn.execSQLForSingleton(conn, 'select 1')))

        cmd = Command('check for autovacuum',
                      'ps -ef | grep -v grep | grep postgres | grep autovacuum')
        cmd.run()
        self.assertEqual(cmd.get_results().stdout, "", "Seriously? Found a postgres autovacuum process!")

        self._basic_sanity_check('clean')
Пример #30
0
    def test_autovacuum_signaling_on_segment(self):
        """
        Same as above, but on a segment.
        """
        # connect to the master to build gparray
        primary, _ = self._get_primary_mirror_pair()
        logger.info('Isolated segment %d at %s:%d' % (primary.dbid, primary.hostname, primary.port))
        dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port)

        with dbconn.connect(dburl, utility=True) as conn:
            oldest_xid = int(dbconn.execSQLForSingleton(conn, 'select get_oldest_xid()'))
            autovacuum_freeze_max_age = int(dbconn.execSQLForSingleton(conn, 'show autovacuum_freeze_max_age'))
            autovacuum_xid_limit = xid_sum(oldest_xid, autovacuum_freeze_max_age)
            logger.info('Raising segment xid to autovacuum_xid_limit %d' % autovacuum_xid_limit)
            dbconn.execSQLForSingleton(conn, "select spoof_next_xid('%d'::xid)" % autovacuum_xid_limit)

        # A new connection to the postmaster, at this point, will ensure that we roll through
        # the ServerLoop and potentially fork an autovacuum process... if enabled.
        with dbconn.connect(dburl, utility=True) as conn:
            self.assertEqual(1, int(dbconn.execSQLForSingleton(conn, 'select 1')))

        cmd = Command('check for autovacuum',
                      'ssh %s ps -ef | grep -v grep | grep postgres | grep autovacuum' % primary.hostname)
        cmd.run()
        self.assertEqual(cmd.get_results().stdout, "", "Seriously? Found a postgres autovacuum process!")

        self._basic_sanity_check('clean')
Пример #31
0
def run_pool_command(host_list, cmd_str, batch_default, check_results=True):
    pool = WorkerPool(numWorkers=min(len(host_list), batch_default))

    for host in host_list:
        cmd = Command(host, cmd_str, ctxt=REMOTE, remoteHost=host)
        pool.addCommand(cmd)

    pool.join()
    pool.haltWork()
    if check_results:
        pool.check_results()
Пример #32
0
    def run(self, option=' ', validate=True, results=True):
        '''
        @type option: string
        @param option: gprecoverseg option (-F or -r)
        '''
        if not (set(option.split()) <= set(['-F', '-r', ' '])):
            raise GpRecoversegException('Not a valid option with gprecoverseg')
        rcvr_cmd = 'gprecoverseg -a  %s' % option
        cmd = Command(name='Run gprecoverseg',
                      cmdStr='source %s/greenplum_path.sh;%s' %
                      (self.gphome, rcvr_cmd))
        tinctest.logger.info("Running gprecoverseg : %s" % cmd)
        cmd.run(validateAfter=validate)

        if results:
            result = cmd.get_results()
            self.rc, self.stdout, self.stderr = result.rc, result.stdout, result.stderr
            if result.rc != 0 or result.stderr:
                return False
            return True
Пример #33
0
Файл: db.py Проект: xuegang/gpdb
    def setUp(self):
        # Assume setup is done if db exists
        output = PSQL.run_sql_command(
            "select 'command_found_' || datname from pg_database where datname like '"
            + self.db_name + "'")
        if 'command_found_' + self.db_name in output:
            return
        cmd = Command('dropdb', "dropdb " + self.db_name)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        cmd = Command('createdb', "createdb " + self.db_name)
        cmd.run(validateAfter=True)
        result = cmd.get_results()

        self.perform_transformation(local_path('setup.sql'),
                                    local_path('setup.sql.t'))
        PSQL.run_sql_file(local_path('setup.sql.t'),
                          out_file=local_path('setup.out'),
                          dbname=self.db_name)
        return True
Пример #34
0
 def setUp(self):
     super(XidlimitsTests, self).setUp()
     Command('re-build regress.so', 'make -C %s xidhelper.so' %
             local_path('.')).run(validateAfter=True)
     SQLTemplateTestCase.perform_transformation_on_sqlfile(
         local_path('load_xidhelper.sql'),
         local_path('load_xidhelper.sql.t'),
         {'@source@': local_path('xidhelper.so')})
     PSQL.run_sql_file(sql_file=local_path('load_xidhelper.sql.t'),
                       out_file=local_path('load_xidhelper.out.t'))
     self.gparray = GpArray.initFromCatalog(dbconn.DbURL(), utility=True)
Пример #35
0
 def get_walsender_pid(self):
     ''' get wal sender pid '''
     pid_cmd = "ps -ef|grep \'%s\' |grep -v grep" % ('wal sender')
     cmd = Command('Get the pid of the wal sender process', cmdStr=pid_cmd)
     tinctest.logger.info('%s' % cmd)
     cmd.run(validateAfter=False)
     result = cmd.get_results()
     while (result.rc != 0):
         cmd.run(validateAfter=False)
         result = cmd.get_results()
     tinctest.logger.info(result)
     pid = result.stdout.splitlines()[0].split()[1].strip()
     return pid
Пример #36
0
def restore_file_with_nbu(context, filetype=None, path=None, dbid=1, hostname=None, timestamp=None):
    if filetype and path:
        raise Exception("Cannot supply both a file type and a file path to restore_file_with_nbu")
    if filetype is None and path is None:
        raise Exception("Cannot call restore_file_with_nbu with no type or path argument")

    if timestamp is None:
        timestamp = context.timestamp
    if filetype:
        path = context.generate_filename(filetype, dbid=dbid, timestamp=timestamp)
    command_string = "gp_bsa_restore_agent --netbackup-service-host %s" % context.netbackup_service_host
    if context.netbackup_block_size is not None:
        command_string += " --netbackup-block-size %s" % context.netbackup_block_size

    command_string += " --netbackup-filename %s > %s" % (path, path)
    logger.debug("Command string inside restore_file_with_nbu: %s\n", command_string)
    if hostname is None:
        Command("restoring metadata files to master", command_string).run(validateAfter=True)
    else:
        Command("restoring metadata files to segment", command_string, ctxt=REMOTE, remoteHost=hostname).run(validateAfter=True)
Пример #37
0
    def stop(self, wait=60, port=None):
        """
        kill the gpfdist process
        @var wait: wait at least 60s for gpfdist
        @var port: Port Number
        @note: previously call cdbfast.GPDFIST.killGpfdist
        """
        if port is None:
            port = self.port
        cmd_str = '%s -ef | grep "gpfdist -p %s" | grep -v grep | awk \'{print $2}\' | xargs kill 2>&1 > /dev/null' % (
            self.ps_command, port)
        cmd = Command(name='stop gpfdist', cmdStr=cmd_str)
        cmd.run()

        is_released = False
        count = 0
        while (not is_released and count < wait):
            is_released = self.is_port_released()
            count = count + 1
            time.sleep(1)
Пример #38
0
 def tearDown(self):
     # Wait for the process we spawned.
     if self.proc is not None:
         tinctest.logger.info("tearDown: waiting for proc")
         (rc, out, err) = self.proc.communicate2()
         tinctest.logger.info("runNoWait rc: %d, output: %s, err: %s" %
                               (rc, out, err))
     # Restarting the stopped segment(s) should be enough but it
     # will require constructing the right pg_ctl command, which is
     # a hassle.
     if self.skipRestart:
         tinctest.logger.info("tearDown: cleaning up fault")
         cmd = Command("Reset fault if one is injected already",
                       self.faultcmd % "reset")
         cmd.run(validateAfter=True)
         tinctest.logger.info(cmd.get_results().printResult())
     else:
         cmd = Command("restarting GPDB", "gpstop -air")
         cmd.run(validateAfter=True)
         tinctest.logger.debug(cmd.get_results().printResult())
    def _create_test_db(self):

        testdb_exists = True
        with dbconn.connect(dbconn.DbURL()) as conn:
            row = dbconn.execSQLForSingletonRow(conn, "select count(*) from pg_database where datname='%s'" % self.TEST_DB)
          
        if row[0] == 0: 
          testdb_exists = False 
       
        if not testdb_exists:             
            Command('create a test database', 'createdb %s' % self.TEST_DB).run(validateAfter=True)
Пример #40
0
    def drop_caches(remoteHost='localhost'):
        """
        Refresh the system caches
        rc=0, drop caches successfully with no warning(s) or error(s)
        rc=1, drop caches successfully with warning(s) but no error(s)
        rc=2, drop caches with error(s), raise TINCSystemException

        @type remoteHost: string
        @param remoteHost: Host name of the machine where drop caches should be executed
        """
        cmdStr = "echo 'echo 3  > /proc/sys/vm/drop_caches' |sudo -s"
        cmd = Command('drop caches', cmdStr, ctxt=REMOTE, remoteHost=remoteHost)
        cmd.run()
        result = cmd.get_results()
        if result.rc > 1:
            msg = "drop caches failed with rc=%s and stderr=%s" % \
                    (result.rc, result.stderr)
            tinctest.logger.warning(msg)
            raise TINCSystemException(msg)
        tinctest.logger.info("drop caches success with %s" % result)
Пример #41
0
 def setUp(self):
     """
     The method that sub-classes must implement to setup a particular database.
     
     @rtype: boolean
     @return: True if db is already present; False if it is not present a new db was created
              Raises TINCDatagenException if db creation failed
     """
     # Assume setup is done if db exists
     output = PSQL.run_sql_command(
         "select 'command_found_' || datname from pg_database where datname like '"
         + self.db_name + "'")
     if 'command_found_' + self.db_name in output:
         return True
     cmd = Command('createdb', "createdb " + self.db_name)
     cmd.run(validateAfter=True)
     result = cmd.get_results()
     if result.rc != 0:
         raise TINCDatagenException('createdb failed')
     return False
Пример #42
0
def impl(context, directory):
    names = ["Name", "Data", "Data for Name"]
    types = [
        "TABLE", "TABLE DATA", "EXTERNAL TABLE", "ACL", "CONSTRAINT",
        "COMMENT", "PROCEDURAL LANGUAGE", "SCHEMA", "AOSTORAGEOPTS"
    ]
    master_dump_dir = directory if len(
        directory.strip()) != 0 else master_data_dir
    timestamp = context.backup_timestamp
    metadata_file = '%s/db_dumps/%s/gp_dump_1_1_%s.gz' % (
        master_dump_dir, timestamp[0:8], timestamp)
    tmp_metadata_file = '/tmp/behave_metadata_file'

    cmd = Command(name="Unzip conetnts of metadata dump file to temp file",
                  cmdStr='zcat %s > %s' % (metadata_file, tmp_metadata_file))
    cmd.run(validateAfter=True)

    try:
        with open(tmp_metadata_file, 'r') as fd:
            lines = fd.readlines()
            if len(lines) == 0:
                raise Exception('Metadata file has no data')
            for line in lines:
                if (line[:3] == comment_start_expr):
                    if (line.startswith(comment_expr)
                            or line.startswith(comment_data_expr_a)
                            or line.startswith(comment_data_expr_b)):
                        name_k, type_k, schema_k = get_comment_keys(line)
                        if (name_k not in names and type_k != "Type"
                                and schema_k != "Schema"):
                            raise Exception(
                                "Unknown key in the comment line of the metdata_file '%s'. Please check and confirm if the key is correct"
                                % (metadata_file))
                        name_v, type_v, schema_v = get_comment_values(line)
                        if (type_v not in types):
                            raise Exception(
                                "Value of Type in the comment line '%s' of the metadata_file '%s' does not fall under the expected list %s. Please check if the value is correct"
                                % (type_v, metadata_file, types))
    finally:
        if os.path.exists(tmp_metadata_file):
            os.remove(tmp_metadata_file)
Пример #43
0
    def config_primaries_for_replication(self, gpArray):
        logger.info(
            "Starting to modify pg_hba.conf on primary segments to allow replication connections"
        )
        replicationStr = ". {0}/greenplum_path.sh; echo 'host  replication {1} samenet trust {2}' >> {3}/pg_hba.conf; pg_ctl -D {3} reload"

        try:
            for segmentPair in gpArray.getSegmentList():
                allow_pair_hba_line_entries = []
                if self.__options.hba_hostnames:
                    mirror_hostname, _, _ = socket.gethostbyaddr(
                        segmentPair.mirrorDB.getSegmentHostName())
                    hba_line_entry = "\nhost all {0} {1} trust".format(
                        unix.getUserName(), mirror_hostname)
                    allow_pair_hba_line_entries.append(hba_line_entry)
                else:
                    mirror_ips = unix.InterfaceAddrs.remote(
                        'get mirror ips',
                        segmentPair.mirrorDB.getSegmentHostName())
                    for ip in mirror_ips:
                        cidr_suffix = '/128' if ':' in ip else '/32'
                        cidr = ip + cidr_suffix
                        hba_line_entry = "\nhost all {0} {1} trust".format(
                            unix.getUserName(), cidr)
                        allow_pair_hba_line_entries.append(hba_line_entry)
                cmdStr = replicationStr.format(
                    os.environ["GPHOME"], unix.getUserName(),
                    " ".join(allow_pair_hba_line_entries),
                    segmentPair.primaryDB.datadir)
                logger.debug(cmdStr)
                cmd = Command(name="append to pg_hba.conf",
                              cmdStr=cmdStr,
                              ctxt=base.REMOTE,
                              remoteHost=segmentPair.primaryDB.hostname)
                cmd.run(validateAfter=True)

        except Exception, e:
            logger.error(
                "Failed while modifying pg_hba.conf on primary segments to allow replication connections: %s"
                % str(e))
            raise
Пример #44
0
 def __init__(self,
              out_file,
              ans_file,
              gp_ignore=True,
              ignore_header=True,
              ignore_plans=False,
              match_sub=[]):
     cmd_str = 'gpdiff.pl -U 10 -w -B -I NOTICE:'
     if ignore_header:
         cmd_str += ' -I GP_IGNORE -gpd_ignore_headers'
     elif gp_ignore:
         cmd_str += ' -I GP_IGNORE'
     cmd_str += ' -gpd_init %s/global_init_file' % (os.path.abspath(
         os.path.dirname(__file__)))
     if ignore_plans:
         cmd_str += ' -gpd_ignore_plans'
     if match_sub:
         cmd_str += ' -gpd_init '
         cmd_str += ' -gpd_init '.join(match_sub)
     cmd_str += ' %s %s' % (ans_file, out_file)
     Command.__init__(self, 'run gpdiff', cmd_str)
Пример #45
0
def run_shell_command(cmdstr,
                      cmdname='shell command',
                      results={
                          'rc': 0,
                          'stdout': '',
                          'stderr': ''
                      }):
    cmd = Command(cmdname, cmdstr)
    tinctest.logger.info('Executing command: %s : %s' % (cmdname, cmdstr))
    cmd.run()
    result = cmd.get_results()
    results['rc'] = result.rc
    results['stdout'] = result.stdout
    results['stderr'] = result.stderr
    tinctest.logger.info('Finished command execution with return code %s ' %
                         (str(result.rc)))
    tinctest.logger.debug('stdout: ' + result.stdout)
    tinctest.logger.debug('stderr: ' + result.stderr)
    if result.rc != 0:
        return False
    return True
Пример #46
0
 def execute(self):
     logger.info("Commencing pg_catalog dump")
     if self.backup_dir is not None:
         global_file = os.path.join(
             self.backup_dir, DUMP_DIR, DUMP_DATE,
             "%s%s" % (GLOBAL_PREFIX, self.timestamp))
     else:
         global_file = os.path.join(
             self.master_datadir, DUMP_DIR, DUMP_DATE,
             "%s%s" % (GLOBAL_PREFIX, self.timestamp))
     Command('Dump global objects', "pg_dumpall -g --gp-syntax > %s" %
             global_file).run(validateAfter=True)
Пример #47
0
    def run_using_workerpool(self, option=''):
        if not (set(option.split()) <= set(['-F', '-r', ' '])):
            raise GpRecoversegException('Not a valid option with gprecoverseg')

        rcvr_cmd = 'gprecoverseg -a  %s' % option
        cmd = Command(name='Run gprecoverseg',
                      cmdStr='source %s/greenplum_path.sh;%s' %
                      (self.gphome, rcvr_cmd))
        tinctest.logger.info("Running gprecoverseg : %s" % cmd)

        pool = WorkerPool(numWorkers=1, daemonize=True)
        pool.addCommand(cmd)
Пример #48
0
def backup_file_with_nbu(context, filetype=None, path=None, dbid=1, hostname=None, timestamp=None):
    if filetype and path:
        raise Exception("Cannot supply both a file type and a file path to backup_file_with_nbu")
    if filetype is None and path is None:
        raise Exception("Cannot call backup_file_with_nbu with no type or path argument")
    if timestamp is None:
        timestamp = context.timestamp
    if filetype:
        path = context.generate_filename(filetype, dbid=dbid, timestamp=timestamp)
    command_string = "cat %s | gp_bsa_dump_agent --netbackup-service-host %s --netbackup-policy %s --netbackup-schedule %s --netbackup-filename %s" % \
                     (path, context.netbackup_service_host, context.netbackup_policy, context.netbackup_schedule, path)
    if context.netbackup_block_size is not None:
        command_string += " --netbackup-block-size %s" % context.netbackup_block_size
    if context.netbackup_keyword is not None:
        command_string += " --netbackup-keyword %s" % context.netbackup_keyword
    logger.debug("Command string inside backup_%s_file_with_nbu: %s\n", filetype, command_string)
    if hostname is None:
        Command("dumping metadata files from master", command_string).run(validateAfter=True)
    else:
        Command("dumping metadata files from segment", command_string, ctxt=REMOTE, remoteHost=hostname).run(validateAfter=True)
    logger.debug("Command ran successfully\n")
Пример #49
0
    def run(self, validate=True):
        tinctest.logger.info("Running gpseginstall: %s" %self)
        with open(self.hostfile, 'w') as f:
            for host in self.hosts[1:]:
                f.write(host)
                f.write('\n')
        
        res = {'rc':0, 'stderr':'', 'stdout':''}
        run_shell_command('gpssh-exkeys -f %s' %self.hostfile, 'gpssh-exkeys', res)

        if res['rc'] > 0:
            raise Exception("Failed to do gpssh-exkeys: %s" %res['stderr'])

        res = {'rc':0, 'stderr':'', 'stdout':''}
        run_shell_command("gpssh -f %s -e 'mkdir -p %s'" %(self.hostfile, self.gphome), 'gpssh-exkeys', res)
        if res['rc'] > 0:
            raise Exception("Failed to create gphome directories on segments: %s" %res[stderr])
        
        Command.run(self, validateAfter=validate)
        result = self.get_results()
        return result
Пример #50
0
 def get_linenum_pglog(self):
     """
     Get the latest log file, and count the current max line number before starting
     pg_terminate_backend(), record the value by using tmp files. Here not able to
     use global variables, since Tinc generates many new instances according to number
     of test cases, variable value will be cleaned up.
     """
     files = [
         os.path.join(self.PG_LOG_DIR, fname)
         for fname in os.listdir(self.PG_LOG_DIR)
     ]
     logfile = max(files, key=os.path.getmtime)
     log_dir = os.path.join(local_path(''), 'log.txt')
     linenum_dir = os.path.join(local_path(''), 'linenum.txt')
     Command('clean file', 'rm -rf %s' % log_dir).run()
     Command('save value', 'echo \'%s\'>>%s' % (logfile, log_dir)).run()
     with open(logfile) as fin:
         linenum = sum(1 for line in fin)
     Command('clean file', 'rm -rf %s' % linenum_dir).run()
     Command('save line number',
             'echo \'%s\'>>%s' % (linenum, linenum_dir)).run()
Пример #51
0
def run_shell_command(cmdstr,
                      cmdname='shell command',
                      results={
                          'rc': 0,
                          'stdout': '',
                          'stderr': ''
                      },
                      verbose=False):
    cmd = Command(cmdname, cmdstr)
    cmd.run()
    result = cmd.get_results()
    results['rc'] = result.rc
    results['stdout'] = result.stdout
    results['stderr'] = result.stderr

    if verbose:
        print "command output: %s" % results['stdout']
    if results['rc'] != 0:
        if verbose:
            print "command error: %s" % results['stderr']
    return results
Пример #52
0
def search_log(cmd, key, segdir, logname, start=""):
    """
    Search using gplogfilter. Returns True if match has no '0 lines' -
    meaning some match found
    """
    logfilename = local_path(logname)
    GPHOME = os.getenv("GPHOME")
    if len(start) > 0:
        start = "-b %s" % start
    cmd_str = ("gpssh %s -e \"source %s/greenplum_path.sh; "
               "gplogfilter %s -m %s %s\" > %s 2>&1" %
               (cmd, GPHOME, start, key, segdir, logfilename))
    cmd = Command("search_log", cmdStr=cmd_str)
    cmd.run(validateAfter=False)
    f = open(logfilename, "r")
    for line in f.readlines():
        line = line.strip()
        if line.find("match:") > 0:
            if line.find("0 lines") < 0:
                return True
    return False
Пример #53
0
class SimpleCommand(object):
    """A simple wrapper for Command."""
    def __init__(self, cmd, args=''):
        self.cmd = cmd
        self.args = args
        cmd_array = [cmd] + args
        self.proc = Command('run ' + cmd, ' '.join(cmd_array))

    def run_or_fail(self):
        """Run this command, and fails if the return code is non-zero.
        Returns stdout and stderr of the command.
        """

        self.proc.run()
        results = self.proc.get_results()
        assert results.rc == 0, (
            ""
            "{cmd} failed: args='{args}'\n{stderr}".format(
                cmd=self.cmd, args=' '.join(self.args), stderr=results.stderr))
        self.results = results
        return results.stdout, results.stderr
Пример #54
0
    def setUp(self):
        # Assume setup is done if db exists
        output = PSQL.run_sql_command(
            "select 'command_found_' || datname from pg_database where datname like '"
            + self.db_name + "'")
        if 'command_found_' + self.db_name in output:
            return
        cmd = Command('dropdb', "dropdb " + self.db_name)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        cmd = Command('createdb', "createdb " + self.db_name)
        cmd.run(validateAfter=True)
        result = cmd.get_results()

        MYD = os.path.abspath(os.path.dirname(__file__))

        # Copy and unzip data files
        if os.path.exists(MYD + '/' + '/compressed_data/' + 'mpp16291.tar.gz'):
            run_shell_command(
                'cp ' + MYD + '/compressed_data/' + 'mpp16291.tar.gz ' + MYD +
                '/' + 'mpp16291.tar.gz ', 'Copy compressed data')
            run_shell_command('gunzip ' + MYD + '/' + 'mpp16291.tar.gz',
                              'Unzip compressed data')
            run_shell_command(
                'tar -xvf ' + MYD + '/' + 'mpp16291.tar -C ' + MYD,
                'Untar archive')

        filelist = [
            'dim_workflows.dat', 'dim_temporarl_expressions.dat',
            'dim_subject_areas.dat', 'dim_dates.dat', 'xref_dim_dates_te.dat',
            'fact_workflow_events.dat', 'fact_task_events.dat', 'dim_tasks.dat'
        ]

        for i in range(len(filelist)):
            runfile = MYD + '/adp/' + filelist[i]
            PSQL.run_sql_file(runfile,
                              out_file=runfile.replace('.dat', '') + '.out',
                              dbname=self.db_name)

        return True
Пример #55
0
 def GetSegmentInSync(self, sleepTime=60, repeatCnt=120, greenplum_path=""):
     """
     @param sleepTime: Number of seconds to sleep before retry
     @param repeatCnt: Number of times to repeat retry. Default is 2 hours
     @return: Return True when the number of segment servers that are in resync is 0 rows
     """
     inSync = ""
     for cnt in range(repeatCnt):
         data = ""
         try:
             cmd = "psql gptest -c \"SELECT dbid, content, role, preferred_role, status, mode, address, fselocation, port, replication_port FROM gp_segment_configuration, pg_filespace_entry where dbid = fsedbid and mode = 'r'\""
             if greenplum_path:
                 cmd = "%s %s" % (greenplum_path, cmd)
             # use Command instead of ShellCommand
             #rc, data = self.generalUtil.ShellCommand(cmd)
             generalUtil = Command(name='psql gptest -c', cmdStr=cmd)
             generalUtil.run()
             rc = generalUtil.get_results().rc
             data = generalUtil.get_results().stdout
             if rc == 0:
                 if True in ['(0 rows)' in x for x in data]:
                     return rc, True
             time.sleep(sleepTime)
         except Exception, e:
             traceback.print_exc()
             print "ERRORFOUND GetSegmentInSync %s" % (str(e))
             #PrettyPrint('ERRORFOUND GetSegmentInSync', data) TODO
             print 'ERRORFOUND GetSegmentInSync', data
Пример #56
0
 def stop_gpsmon_process(self, hostname):
     grep_expr = '[g]psmon -m .*'
     tinctest.logger.info('Grep expr = %s on host %s' % (grep_expr, hostname))
     retries = 60
     for r in range(retries):
         cmd = Command('get the pid for gpsmon', cmdStr="ps -ef | grep '%s' | awk '{print \$2}'" % grep_expr, ctxt=REMOTE, remoteHost=hostname)
         cmd.run(validateAfter=True)
         result = cmd.get_results()
         if not result.stdout.strip() or len(result.stdout.strip().split('\n')) > 1:
             time.sleep(1)
             continue
         pid = ' '.join([p.strip() for p in result.stdout.strip().split('\n')]) 
         if pid:
             cmd = Command('stopping the gpsmon process', cmdStr='kill -SIGSTOP %s' % pid, ctxt=REMOTE, remoteHost=hostname)
             cmd.run(validateAfter=True)
             break
         else:
             raise Exception('Unable to find gpsmon process. Please make sure it is installed')
Пример #57
0
 def kill_zombies(self):
     ''' 
     @summary : There are stray zombie processes running after each test. This method clears them 
     '''
     tinctest.logger.info("[STLRTest] Running kill_zombies")
     cmd_str = "ps -ef | grep \"port\" | awk '{print $3 \"#\" $2}' | grep -w 1"
     cmd = Command("shell_command", cmd_str)
     tinctest.logger.info('Executing command: %s : %s' %("shell command", cmd_str))
     cmd.run()
     result = cmd.get_results()
     out = result.stdout
     lines = out.split('\n')
     for line in lines:
         pids = line.split('#')
         if pids[0] == '1':
            kill_str= "kill -9 %s" %(pids[1])
            cmd2 = Command("kill_command", kill_str)
            cmd2.run()
Пример #58
0
 	def test_gpstart_logDir(self):
             tinctest.logger.info("Running test_gpstart_logDir")
             self.logdir=''.join([self.basedir,'/logs'])
             cmd = Command(name='Remove gpstop<nnnn>.log', cmdStr='rm -f %s/gpstop*' % (self.logdir))
             tinctest.logger.info("Removing gpstop<nnnn>.log : %s" % cmd)
             cmd.run(validateAfter=True)
             result = cmd.get_results()
             if result.rc != 0 or result.stderr:
                raise gpstopException("Not able to delete existing gpstop<nnnn>.log")
             lcmd=' '.join(['ls',self.logdir, '| wc -l'])
             res=False
             if self.is_not_running_gpdb():
                res=self.gp.run_gpstart_cmd(logdir=self.logdir)
             if res is not True:
                raise GPstopError("Error : run_gpstart_cmd(logdir) failed \n")
             cmd = Command(name='count of  gpstart<nnnn>.log', cmdStr=' %s ' % (lcmd))
             tinctest.logger.info("Count gpstart<nnnn>.log : %s" % cmd)
             cmd.run(validateAfter=True)
             result = cmd.get_results()
             if result.rc != 0 or result.stderr:
                raise gpstopException("Not able to get count of gpstart<nnnn>.log")
             assert int(result.stdout) > 0
Пример #59
0
 def run_test(self, debug_dtm_action_segment, debug_dtm_action_target,
              debug_dtm_action_protocol, debug_dtm_action,
              debug_dtm_action_nestinglevel):
     file_name = ''
     file_name = 'protocol' + self.get_output_file(
         debug_dtm_action_segment, debug_dtm_action_target,
         debug_dtm_action_protocol, debug_dtm_action,
         debug_dtm_action_nestinglevel)
     test_name = 'udf_exception_handling_' + debug_dtm_action_target + '_seg' + debug_dtm_action_segment
     sql_file = self.get_sql_files(test_name)
     out_file = self.base_dir + "/sql/" + test_name + '.out'
     out_file2 = self.base_dir + "/sql/" + file_name + '.out'
     ans_file = self.base_dir + "/expected/" + file_name + '.ans'
     tinctest.logger.info('sql-file == %s \n' % sql_file)
     tinctest.logger.info('out-file == %s \n' % out_file)
     tinctest.logger.info('ans-file == %s \n' % ans_file)
     self.run_sql(sql_file, out_file=out_file)
     cmd_str = 'cp ' + out_file + ' ' + out_file2
     cmd = Command("bak outfile ", cmd_str)
     cmd.run()
     self.assertTrue(int(cmd.get_results().rc) == 0, cmd_str)
     self.validate_sql(ans_file, out_file2)
Пример #60
0
    def test_autovacuum_signaling_on_segment(self):
        """
        Same as above, but on a segment.
        """
        # connect to the master to build gparray
        primary, _ = self._get_primary_mirror_pair()
        logger.info('Isolated segment %d at %s:%d' %
                    (primary.dbid, primary.hostname, primary.port))
        dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port)

        with dbconn.connect(dburl, utility=True) as conn:
            oldest_xid = int(
                dbconn.execSQLForSingleton(conn, 'select get_oldest_xid()'))
            autovacuum_freeze_max_age = int(
                dbconn.execSQLForSingleton(conn,
                                           'show autovacuum_freeze_max_age'))
            autovacuum_xid_limit = xid_sum(oldest_xid,
                                           autovacuum_freeze_max_age)
            logger.info('Raising segment xid to autovacuum_xid_limit %d' %
                        autovacuum_xid_limit)
            dbconn.execSQLForSingleton(
                conn,
                "select spoof_next_xid('%d'::xid)" % autovacuum_xid_limit)

        # A new connection to the postmaster, at this point, will ensure that we roll through
        # the ServerLoop and potentially fork an autovacuum process... if enabled.
        with dbconn.connect(dburl, utility=True) as conn:
            self.assertEqual(1,
                             int(dbconn.execSQLForSingleton(conn, 'select 1')))

        cmd = Command(
            'check for autovacuum',
            'ssh %s ps -ef | grep -v grep | grep postgres | grep autovacuum' %
            primary.hostname)
        cmd.run()
        self.assertEqual(cmd.get_results().stdout, "",
                         "Seriously? Found a postgres autovacuum process!")

        self._basic_sanity_check('clean')