Exemplo n.º 1
0
Arquivo: fm01.py Projeto: 50wu/gpdb
    def setUp(self):
        # Assume setup is done if db exists
        output = PSQL.run_sql_command("select 'command_found_' || datname from pg_database where datname like '" + self.db_name + "'")
        if 'command_found_' + self.db_name in output:
            return
        cmd = Command('dropdb', "dropdb " + self.db_name)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        cmd = Command('createdb', "createdb " + self.db_name)
        cmd.run(validateAfter=True)
        result = cmd.get_results()

        MYD = os.path.abspath( os.path.dirname( __file__ ) )

        # Copy and unzip data files
        if os.path.exists( MYD + '/' + '/compressed_data/' + 'mpp16291.tar.gz' ):
            run_shell_command( 'cp ' + MYD + '/compressed_data/' + 'mpp16291.tar.gz ' + MYD + '/' + 'mpp16291.tar.gz ', 'Copy compressed data' )
            run_shell_command( 'gunzip ' + MYD + '/' +  'mpp16291.tar.gz', 'Unzip compressed data' )
            run_shell_command( 'tar -xvf ' + MYD + '/' +  'mpp16291.tar -C ' + MYD, 'Untar archive' )

        filelist = [ 'dim_workflows.dat', 'dim_temporarl_expressions.dat', 'dim_subject_areas.dat', 'dim_dates.dat', 'xref_dim_dates_te.dat', 'fact_workflow_events.dat', 'fact_task_events.dat', 'dim_tasks.dat']

        for i in range(len(filelist)):
            runfile = MYD + '/adp/' + filelist[i]
            PSQL.run_sql_file( runfile, out_file = runfile.replace('.dat', '') + '.out', dbname = self.db_name)

        return True
Exemplo n.º 2
0
    def setUp(self):
        # Assume setup is done if db exists
        output = PSQL.run_sql_command("select 'command_found_' || datname from pg_database where datname like '" + self.db_name + "'")
        if 'command_found_' + self.db_name in output:
            return
        cmd = Command('dropdb', "dropdb " + self.db_name)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        cmd = Command('createdb', "createdb " + self.db_name)
        cmd.run(validateAfter=True)
        result = cmd.get_results()

        MYD = os.path.abspath( os.path.dirname( __file__ ) )

        #First create the schemas, before loading the data
        runfile = MYD + '/' + 'mpp18457_repro_setup.sql'
        PSQL.run_sql_file( runfile, out_file = runfile.replace('.sql', '') + '.out', dbname = self.db_name)

        # Copy and unzip data files
        if os.path.exists( MYD + '/' + '/compressed_data/' + 'mpp18457.tar.gz' ):
            run_shell_command( 'cp ' + MYD + '/compressed_data/' + 'mpp18457.tar.gz ' + MYD + '/' + 'mpp18457.tar.gz ', 'Copy compressed data' )
            run_shell_command( 'gunzip ' + MYD + '/' +  'mpp18457.tar.gz', 'Unzip compressed data' )
            run_shell_command( 'tar -xvf ' + MYD + '/' +  'mpp18457.tar -C ' + MYD, 'Untar archive' )

        mypath = MYD + '/mpp18457/'

        filelist = [ f for f in os.listdir(mypath) if os.path.isfile(mypath + f) and f.endswith('dmp') ]

        # Set-up schema, data
        for i in range(len(filelist)):
            runfile = str(MYD) + str('/mpp18457/') + str(filelist[i])
            PSQL.run_sql_file( runfile, out_file = runfile.replace('.dmp', '') + '.out', dbname = self.db_name)

        return True
Exemplo n.º 3
0
def get_filename_for_content(context,
                             filetype,
                             content,
                             remote_directory=None,
                             host=None):
    filetype_glob = context.generate_filename(filetype,
                                              content=content,
                                              directory=remote_directory)
    if remote_directory:
        if not host:
            raise Exception(
                "Must supply name of remote host to check for %s file" %
                filetype)
        cmd = Command(
            name="Find file of type %s for content %d on host %s" %
            (filetype, content, host),
            cmdStr='python -c "import glob; print glob.glob(\'%s\')[0]"' %
            filetype_glob,
            ctxt=REMOTE,
            remoteHost=host)
        cmd.run()

        if cmd.get_results().rc == 0 and cmd.get_results().stdout:
            return cmd.get_results().stdout
        return None
    else:
        filenames = glob.glob(filetype_glob)
        if filenames and len(filenames) > 0:
            return filenames[0]
        return None
Exemplo n.º 4
0
    def setUp(self):
        # Assume setup is done if db exists
        output = PSQL.run_sql_command("select 'command_found_' || datname from pg_database where datname like '" + self.db_name + "'")
        if 'command_found_' + self.db_name in output:
            return
        cmd = Command('dropdb', "dropdb " + self.db_name)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        cmd = Command('createdb', "createdb " + self.db_name)
        cmd.run(validateAfter=True)
        result = cmd.get_results()

        MYD = os.path.abspath( os.path.dirname( __file__ ) )

        # Copy and unzip data files
        if os.path.exists( MYD + '/' + '/compressed_data/' + 'mpp16291.tar.gz' ):
            run_shell_command( 'cp ' + MYD + '/compressed_data/' + 'mpp16291.tar.gz ' + MYD + '/' + 'mpp16291.tar.gz ', 'Copy compressed data' )
            run_shell_command( 'gunzip ' + MYD + '/' +  'mpp16291.tar.gz', 'Unzip compressed data' )
            run_shell_command( 'tar -xvf ' + MYD + '/' +  'mpp16291.tar -C ' + MYD, 'Untar archive' )

        filelist = [ 'dim_workflows.dat', 'dim_temporarl_expressions.dat', 'dim_subject_areas.dat', 'dim_dates.dat', 'xref_dim_dates_te.dat', 'fact_workflow_events.dat', 'fact_task_events.dat', 'dim_tasks.dat']

        for i in range(len(filelist)):
            runfile = MYD + '/adp/' + filelist[i]
            PSQL.run_sql_file( runfile, out_file = runfile.replace('.dat', '') + '.out', dbname = self.db_name)

        return True
Exemplo n.º 5
0
def impl(context, filetype):
    backup_utils = Context()
    if hasattr(context, 'netbackup_service_host'):
        backup_utils.netbackup_service_host = context.netbackup_service_host
    if hasattr(context, 'backup_timestamp'):
        backup_utils.timestamp = context.backup_timestamp
    if hasattr(context, 'backup_dir'):
        backup_utils.backup_dir = context.backup_dir
    else:
        backup_dir = None

    if filetype not in ['config', 'state']:
        filename = backup_utils.generate_filename(filetype)
        cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (
            backup_utils.netbackup_service_host, filename)
        cmd = Command("Querying NetBackup server for %s file" % filetype,
                      cmd_str)
        cmd.run(validateAfter=True)
        if cmd.get_results().stdout.strip() != filename:
            raise Exception(
                'File %s was not backup up to NetBackup server %s successfully'
                % (filename, backup_utils.netbackup_service_host))

    if filetype == 'config':
        master_port = os.environ.get('PGPORT')
        gparray = GpArray.initFromCatalog(dbconn.DbURL(port=master_port),
                                          utility=True)
        segs = [
            seg for seg in gparray.getDbList()
            if seg.isSegmentPrimary(current_role=True)
        ]
        for seg in segs:
            backup_utils.master_datadir = seg.getSegmentDataDirectory()
            seg_config_filename = backup_utils.generate_filename(
                'segment_config', dbid=seg.getSegmentDbId())
            seg_host = seg.getSegmentHostName()
            cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (
                backup_utils.netbackup_service_host, seg_config_filename)
            cmd = Command("Querying NetBackup server for segment config file",
                          cmd_str,
                          ctxt=REMOTE,
                          remoteHost=seg_host)
            cmd.run(validateAfter=True)
            if cmd.get_results().stdout.strip() != seg_config_filename:
                raise Exception(
                    'Segment config file %s was not backup up to NetBackup server %s successfully'
                    % (seg_config_filename, netbackup_service_host))

    elif filetype == 'state':
        for type in ['ao', 'co', 'last_operation']:
            filename = backup_utils.generate_filename(type)
            cmd_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (
                backup_utils.netbackup_service_host, filename)
            cmd = Command("Querying NetBackup server for %s file" % type,
                          cmd_str)
            cmd.run(validateAfter=True)
            if cmd.get_results().stdout.strip() != filename:
                raise Exception(
                    'The %s file %s was not backup up to NetBackup server %s successfully'
                    % (type, filename, netbackup_service_host))
Exemplo n.º 6
0
    def GetSegmentInSync(self, sleepTime=60, repeatCnt=120, greenplum_path=""):
        """
        @param sleepTime: Number of seconds to sleep before retry
        @param repeatCnt: Number of times to repeat retry. Default is 2 hours
        @return: Return True when the number of segment servers that are in resync is 0 rows
        """
        inSync = ""
        for cnt in range(repeatCnt):
            data = ""
            try:
                cmd = "psql gptest -c \"SELECT dbid, content, role, preferred_role, status, mode, address, fselocation, port, replication_port FROM gp_segment_configuration, pg_filespace_entry where dbid = fsedbid and mode = 'r'\""
                if greenplum_path:
                    cmd = "%s %s" % (greenplum_path, cmd)
                # use Command instead of ShellCommand
                #rc, data = self.generalUtil.ShellCommand(cmd)
                generalUtil = Command(name='psql gptest -c',cmdStr=cmd)
                generalUtil.run()
                rc = generalUtil.get_results().rc
                data = generalUtil.get_results().stdout
                if rc == 0:
		            if True in ['(0 rows)' in x for x in data]:
			            return rc, True
                time.sleep(sleepTime)
            except Exception, e:
                traceback.print_exc()
                print "ERRORFOUND GetSegmentInSync %s" % (str(e))
                #PrettyPrint('ERRORFOUND GetSegmentInSync', data) TODO
                print 'ERRORFOUND GetSegmentInSync', data
Exemplo n.º 7
0
    def test_reindex_pg_class(self):
        tinctest.logger.info("create checkpoint")
        results = {'rc':0, 'stdout':'', 'stderr':''}
        PSQL.run_sql_command("checkpoint", results=results)
        assert results['rc'] == 0, results['stderr']

        tinctest.logger.info("inject fault to skip checkpoints")
        cmd = Command("skip checkpoint on primaries",
                      "gpfaultinjector -f checkpoint -m async -y skip -o 0"
                      " -H ALL -r primary")
        cmd.run(validateAfter=True)
        tinctest.logger.info(cmd.get_results().printResult())

        cmd = Command("skip checkpoint on master",
                      "gpfaultinjector -f checkpoint -m async -y skip -o 0 -s 1")
        cmd.run(validateAfter=True)
        tinctest.logger.info(cmd.get_results().printResult())

        tinctest.logger.info("reindex pg_class indexes")
        assert PSQL.run_sql_file(local_path('reindex_pg_class.sql'))

        tinctest.logger.info("shutdown immediate")
        cmd = Command("shutdown immediate", "gpstop -ai")
        cmd.run(validateAfter=True)
        tinctest.logger.info(cmd.get_results().printResult())

        tinctest.logger.info("trigger recovery")
        cmd = Command("restart the cluster", "gpstart -a")
        cmd.run(validateAfter=True)
        tinctest.logger.info(cmd.get_results().printResult())

        tinctest.logger.info("validate recovery succeeded")
        results = {'rc':0, 'stdout':'', 'stderr':''}
        PSQL.run_sql_command("DROP TABLE reindex_pg_class_test", results=results)
        assert results['rc'] == 0, results['stderr']
Exemplo n.º 8
0
def impl(context, filetype, dir):
    if dir == 'master_data_directory':
        dir = master_data_dir
    if filetype == 'report':
        filename = '%s/gp_restore_%s.rpt' % (dir, context.backup_timestamp)
        if not os.path.isfile(filename):
            raise Exception('Report file %s is not present in master data directory' % filename)
    elif filetype == 'status':
        gparray = GpArray.initFromCatalog(dbconn.DbURL())
        if dir == 'segment_data_directory':
            primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
            for seg in primary_segs:
                host = seg.getSegmentHostName()
                seg_data_dir = seg.getSegmentDataDirectory()
                cmd = Command('check status file', "ls %s/gp_restore_status_*_%s" % (seg_data_dir, context.backup_timestamp), ctxt=REMOTE, remoteHost=host)
                cmd.run(validateAfter=True)
                results = cmd.get_results()
                if not results.stdout.strip():
                    raise Exception('Status file ending with timestamp %s is not present in segment %s data directory' % (context.backup_timestamp, host))
        else:
            count = 0
            primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
            for seg in primary_segs:
                host = seg.getSegmentHostName()
                cmd = Command('check status file', "ls %s/gp_restore_status_*_%s" % (dir, context.backup_timestamp), ctxt=REMOTE, remoteHost=host)
                cmd.run(validateAfter=True)
                results = cmd.get_results()
                if results.stdout.strip():
                    count += 1
                else:
                    raise Exception('Status file not found in segment: %s' % host)
            segs = len(primary_segs)
            if count != segs:
                raise Exception('Expected %d status file but found %d' % (segs, count))
Exemplo n.º 9
0
def impl(context, filetype, dir):
    if dir == 'master_data_directory':
        dir = master_data_dir
    if filetype == 'report':
        filename = '%s/gp_restore_%s.rpt' % (dir, context.backup_timestamp)
        if not os.path.isfile(filename):
            raise Exception('Report file %s is not present in master data directory' % filename)
    elif filetype == 'status':
        gparray = GpArray.initFromCatalog(dbconn.DbURL())
        if dir == 'segment_data_directory':
            primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
            for seg in primary_segs:
                host = seg.getSegmentHostName()
                seg_data_dir = seg.getSegmentDataDirectory()
                cmd = Command('check status file', "ls %s/gp_restore_status_*_%s" % (seg_data_dir, context.backup_timestamp), ctxt=REMOTE, remoteHost=host)
                cmd.run(validateAfter=True)
                results = cmd.get_results()
                if not results.stdout.strip():
                    raise Exception('Status file ending with timestamp %s is not present in segment %s data directory' % (context.backup_timestamp, host))
        else:
            count = 0
            primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
            for seg in primary_segs:
                host = seg.getSegmentHostName()
                cmd = Command('check status file', "ls %s/gp_restore_status_*_%s" % (dir, context.backup_timestamp), ctxt=REMOTE, remoteHost=host)
                cmd.run(validateAfter=True)
                results = cmd.get_results()
                if results.stdout.strip():
                    count += 1
                else:
                    raise Exception('Status file not found in segment: %s' % host)
            segs = len(primary_segs)
            if count != segs:
                raise Exception('Expected %d status file but found %d' % (segs, count))
Exemplo n.º 10
0
 def GetSegmentInSync(self, sleepTime=60, repeatCnt=120, greenplum_path=""):
     """
     @param sleepTime: Number of seconds to sleep before retry
     @param repeatCnt: Number of times to repeat retry. Default is 2 hours
     @return: Return True when the number of segment servers that are in resync is 0 rows
     """
     inSync = ""
     for cnt in range(repeatCnt):
         data = ""
         try:
             cmd = "psql gptest -c \"SELECT dbid, content, role, preferred_role, status, mode, address, fselocation, port, replication_port FROM gp_segment_configuration, pg_filespace_entry where dbid = fsedbid and mode = 'r'\""
             if greenplum_path:
                 cmd = "%s %s" % (greenplum_path, cmd)
             # use Command instead of ShellCommand
             #rc, data = self.generalUtil.ShellCommand(cmd)
             generalUtil = Command(name='psql gptest -c', cmdStr=cmd)
             generalUtil.run()
             rc = generalUtil.get_results().rc
             data = generalUtil.get_results().stdout
             if rc == 0:
                 if True in ['(0 rows)' in x for x in data]:
                     return rc, True
             time.sleep(sleepTime)
         except Exception, e:
             traceback.print_exc()
             print "ERRORFOUND GetSegmentInSync %s" % (str(e))
             #PrettyPrint('ERRORFOUND GetSegmentInSync', data) TODO
             print 'ERRORFOUND GetSegmentInSync', data
Exemplo n.º 11
0
    def _run_test(self, tincmm_cmd):
        """
        Executes a tincmm cmd and compares the output with an expected file
        """
        pwd = os.path.dirname(inspect.getfile(self.__class__))
        test_dir = os.path.join(pwd, 'sample')

        cmd = Command('tincmm', tincmm_cmd)
        cmd.run()
        result = cmd.get_results()

        self.assertEquals(result.rc, 0)

        cmd_out_file = os.path.join(self.__class__.out_dir, "%s_cmd.out" %self._testMethodName)
        cmd_expected_out_file = os.path.join(self.__class__.expected_out_dir, "%s_cmd.out" %self._testMethodName)
        with open(cmd_out_file, 'w') as f:
            f.write(result.stdout)

        basename = os.path.basename(cmd_out_file)
        sorted_out_file = '/tmp/%s_output' %basename
        sorted_expected_out_file = '/tmp/%s_expected' %basename
        cmd = Command('sort', "sort %s > %s; sort %s > %s" %(cmd_out_file, sorted_out_file, cmd_expected_out_file, sorted_expected_out_file))
        cmd.run()
        result = cmd.get_results()
        self.assertEqual(result.rc, 0, result.stdout)
        cmd = Command('diff', "diff %s %s" %(sorted_out_file, sorted_expected_out_file))
        cmd.run()
        result = cmd.get_results()
        self.assertEqual(result.rc, 0, result.stdout)
Exemplo n.º 12
0
 def test_gpstop_logDir(self):
     tinctest.logger.info("Running test_gpstop_logDir")
     self.logdir = ''.join([self.basedir, '/logs'])
     cmd = Command(name='Remove gpstop<nnnn>.log',
                   cmdStr='rm -f %s/gpstop*' % (self.logdir))
     tinctest.logger.info("Removing gpstop<nnnn>.log : %s" % cmd)
     cmd.run(validateAfter=True)
     result = cmd.get_results()
     if result.rc != 0 or result.stderr:
         raise gpstopException(
             "Not able to delete existing gpstop<nnnn>.log")
     lcmd = ' '.join(['ls', self.logdir, '| wc -l'])
     res = False
     if self.is_gpdb_running():
         res = self.gps.run_gpstop_cmd(logdir=self.logdir)
     if res is not True:
         raise GPstopError("Error : gpstop_logDir() failed \n")
     cmd = Command(name='count of  gpstop<nnnn>.log',
                   cmdStr=' %s ' % (lcmd))
     tinctest.logger.info("Count gpstop<nnnn>.log : %s" % cmd)
     cmd.run(validateAfter=True)
     result = cmd.get_results()
     if result.rc != 0 or result.stderr:
         raise gpstopException("Not able to get count of gpstop<nnnn>.log")
     assert int(result.stdout) > 0
Exemplo n.º 13
0
def impl(context, sql_cmd, dbname):
    psql_cmd = "PGDATABASE=\'%s\' PGOPTIONS=\'-c gp_session_role=utility\' psql -t -h %s -p %s -c \"%s\"; " % (
        dbname, context.remote_pair_primary_host, context.remote_pair_primary_port, sql_cmd)
    cmd = Command(name='Running Remote command: %s' % psql_cmd, cmdStr = psql_cmd)
    cmd.run(validateAfter=True)
    if [cmd.get_results().stdout.strip()] not in context.stored_sql_results:
        raise Exception("cmd results do not match\n expected: '%s'\n received: '%s'" % (
            context.stored_sql_results, cmd.get_results().stdout.strip()))
Exemplo n.º 14
0
def impl(context, sql_cmd, dbname):
    psql_cmd = "PGDATABASE=\'%s\' PGOPTIONS=\'-c gp_session_role=utility\' psql -t -h %s -p %s -c \"%s\"; " % (
        dbname, context.remote_pair_primary_host, context.remote_pair_primary_port, sql_cmd)
    cmd = Command(name='Running Remote command: %s' % psql_cmd, cmdStr = psql_cmd)
    cmd.run(validateAfter=True)
    if [cmd.get_results().stdout.strip()] not in context.stored_sql_results:
        raise Exception("cmd results do not match\n expected: '%s'\n received: '%s'" % (
            context.stored_sql_results, cmd.get_results().stdout.strip()))
Exemplo n.º 15
0
 def kill_master_process(self, ProcName=None):
     cmdString = 'ps -ef|grep postgres| grep %s | grep \'%s\'| awk \'{print $2}\'|xargs kill -9'%(self.pgport,ProcName) 
     cmd = Command('kill process on master', cmdStr = cmdString)
     cmd.run()
     tinctest.logger.info('run command %s'%cmdString)
     rc = cmd.get_results().rc    
     result = cmd.get_results().stdout
     tinctest.logger.info('Command returning, rc: %s, result: %s'%(rc,result))
Exemplo n.º 16
0
 def kill_master_process(self, ProcName=None):
     cmdString = 'ps -ef|grep postgres| grep %s | grep \'%s\'| awk \'{print $2}\'|xargs kill -9'%(self.pgport,ProcName) 
     cmd = Command('kill process on master', cmdStr = cmdString)
     cmd.run()
     tinctest.logger.info('run command %s'%cmdString)
     rc = cmd.get_results().rc    
     result = cmd.get_results().stdout
     tinctest.logger.info('Command returning, rc: %s, result: %s'%(rc,result))
Exemplo n.º 17
0
 def kill_first_mirror(self):
     mirror_data_loc = self.get_default_fs_loc(role='m',content=0)
     (host, port) = self.gpconfig.get_hostandport_of_segment(psegmentNumber = 0, pRole = 'm')    
     cmdString = 'ps -ef|grep -v grep|grep \'%s\'|awk \'{print $2}\'|xargs kill -9'%mirror_data_loc
     remote = Command(name ='kill first mirror', cmdStr = cmdString, ctxt=2, remoteHost=host)
     remote.run() 
     tinctest.logger.info('run command %s'%cmdString)
     rc = remote.get_results().rc    
     result = remote.get_results().stdout
     tinctest.logger.info('Command returning, rc: %s, result: %s'%(rc,result))
Exemplo n.º 18
0
def tuneGPTextRAM (dburl, ram_size):
    logger.info ('Tuning GPText:')
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S')
    query = """
            SELECT  gpsc.hostname as host,
                    pgfse.fselocation as datadir
                FROM pg_tablespace            as pgts,
                     pg_filespace             as pgfs,
                     pg_filespace_entry       as pgfse,
                     gp_segment_configuration as gpsc
            WHERE    pgts.spcfsoid = pgfse.fsefsoid
                AND pgfse.fsefsoid = pgfs.oid
                AND  pgfse.fsedbid = gpsc.dbid
                AND   pgts.spcname = 'pg_default'
                AND    pgfs.fsname = 'pg_system'
                AND   gpsc.content <> -1
            ORDER BY host, datadir
        """
    hostdirs = execute(dburl, query)
    for hdir in hostdirs:
        host, dir = hdir[0], hdir[1]
        logger.info ('    host: %s  dir: %s' % (host, dir))
        cmd = Command("Backup GPText config",
                      "cp %s/solr/jetty.conf %s/solr/jetty.conf_%s" % (dir, dir, st),
                      ctxt=REMOTE,
                      remoteHost=host)
        cmd.run(validateAfter=True)
        res = cmd.get_results()
        if res.rc <> 0:
            logger.error('Failed to execute the copy command on host %s' % host)
            sys.exit(2)
        cmd = Command("Remove old jetty files",
                      "rm -f /tmp/jetty.*")
        cmd.run(validateAfter=True)
        res = cmd.get_results()
        if res.rc <> 0:
            logger.error('Failed to remove old jetty files from /tmp directory. Check /tmp access rights' % (host, dir))
            sys.exit(2)
        cmd = Command("Getting Jetty config to local machine",
                      "scp %s:%s/solr/jetty.conf /tmp/jetty.conf" % (host, dir))
        cmd.run(validateAfter=True)
        res = cmd.get_results()
        if res.rc <> 0:
            logger.error('Failed to load jetty.conf from %s:%s/solr to local machine. Check /tmp directory access rights' % (host, dir))
            sys.exit(2)
        editGPTextConfig(ram_size)
        cmd = Command("Copying Jetty config to target machine",
                      "scp /tmp/jetty.conf_new %s:%s/solr/jetty.conf" % (host, dir))
        cmd.run(validateAfter=True)
        res = cmd.get_results()
        if res.rc <> 0:
            logger.error('Failed to load jetty.conf from local machine to %s:%s/solr. Check the target directory' % (host, dir))
            sys.exit(2)
    return 0
Exemplo n.º 19
0
 def kill_first_mirror(self):
     mirror_data_loc = self.get_default_fs_loc(role='m',content=0)
     gpconfig = GPDBConfig()
     (host, port) = gpconfig.get_hostandport_of_segment(psegmentNumber = 0, pRole = 'm')    
     cmdString = 'ps -ef|grep -v grep|grep \'%s\'|awk \'{print $2}\'|xargs kill -9'%mirror_data_loc
     remote = Command(name ='kill first mirror', cmdStr = cmdString, ctxt=2, remoteHost=host)
     remote.run() 
     tinctest.logger.info('run command %s'%cmdString)
     rc = remote.get_results().rc    
     result = remote.get_results().stdout
     tinctest.logger.info('Command returning, rc: %s, result: %s'%(rc,result))
Exemplo n.º 20
0
 def execute(self):
     execname = os.path.split(sys.argv[0])[-1]
     pickled_execname = pickle.dumps(execname) 
     pickled_operation = pickle.dumps(self.operation)
     cmd = Command('pickling an operation', '$GPHOME/sbin/gpoperation.py',
                   ctxt=REMOTE, remoteHost=self.host, stdin = pickled_execname + pickled_operation)
     cmd.run(validateAfter=True)
     logger.debug(cmd.get_results().stdout)
     ret = self.operation.ret = pickle.loads(cmd.get_results().stdout)
     if isinstance(ret, Exception):
         raise ret
     return ret
Exemplo n.º 21
0
 def setUp(self):
     # Assume setup is done if db exists
     output = PSQL.run_sql_command("select 'command_found_' || datname from pg_database where datname like '" + self.db_name + "'")
     if 'command_found_' + self.db_name in output:
         return
     cmd = Command('dropdb', "dropdb " + self.db_name)
     cmd.run(validateAfter=False)
     result = cmd.get_results()
     cmd = Command('createdb', "createdb " + self.db_name)
     cmd.run(validateAfter=True)
     result = cmd.get_results()
     PSQL.run_sql_file(local_path('setup.sql'), dbname = self.db_name)
Exemplo n.º 22
0
 def execute(self):
     execname = os.path.split(sys.argv[0])[-1]
     pickled_execname = pickle.dumps(execname) 
     pickled_operation = pickle.dumps(self.operation)
     cmd = Command('pickling an operation', '$GPHOME/sbin/gpoperation.py',
                   ctxt=REMOTE, remoteHost=self.host, stdin = pickled_execname + pickled_operation)
     cmd.run(validateAfter=True)
     logger.debug(cmd.get_results().stdout)
     ret = self.operation.ret = pickle.loads(cmd.get_results().stdout)
     if isinstance(ret, Exception):
         raise ret
     return ret
Exemplo n.º 23
0
    def set_guc(self, guc_name, guc_value):

        logger.info('Configuring ' + guc_name + ' ...')
        cmd = Command("gpconfig " + guc_name,
                      "gpconfig -c " + guc_name + " -v " + guc_value)
        cmd.run()
        self.assertEqual(cmd.get_results().rc, 0, str(cmd))

        logger.info('gpstop -u to reload config files...')
        cmd = Command("gpstop -u", "gpstop -u")
        cmd.run()
        self.assertEqual(cmd.get_results().rc, 0, str(cmd))
Exemplo n.º 24
0
 def set_guc(self, guc_name, guc_value):
     # Set the guc value
     tinctest.logger.info('Configuring ' + guc_name +' ...')
     cmd_str='source ' + self.gphome+ '/greenplum_path.sh;gpconfig -c ' + guc_name + ' -v ' +guc_value +' --skipvalidation'
     cmd=Command("gpconfig " ,cmd_str) 
     cmd.run()
     self.assertTrue(int(cmd.get_results().rc) == 0,cmd_str)
     # Load the new value to the db
     tinctest.logger.info('gpstop -u to reload config files...')
     cmd_str2='source '+ self.gphome+ '/greenplum_path.sh;gpstop -u'
     cmd = Command("gpstop -u", cmd_str2)
     cmd.run()
     self.assertTrue(int(cmd.get_results().rc) == 0,cmd_str2)
Exemplo n.º 25
0
 def get_walsender_pid(self):
     ''' get wal sender pid '''
     pid_cmd = "ps -ef|grep \'%s\' |grep -v grep" % ('wal sender')
     cmd = Command('Get the pid of the wal sender process', cmdStr=pid_cmd)
     tinctest.logger.info('%s' % cmd)
     cmd.run(validateAfter=False)
     result = cmd.get_results()
     while (result.rc != 0):
         cmd.run(validateAfter=False)
         result = cmd.get_results()
     tinctest.logger.info(result)
     pid = result.stdout.splitlines()[0].split()[1].strip()
     return pid
Exemplo n.º 26
0
    def set_guc(self, guc_name, guc_value):

        logger.info('Configuring ' + guc_name +' ...')
        cmd = Command("gpconfig " + guc_name,
                          "gpconfig -c " + guc_name + " -v " + guc_value)
        cmd.run()
        self.assertEqual(cmd.get_results().rc, 0, str(cmd))

        logger.info('gpstop -u to reload config files...')
        cmd = Command("gpstop -u",
                          "gpstop -u")
        cmd.run()
        self.assertEqual(cmd.get_results().rc, 0, str(cmd))
Exemplo n.º 27
0
 def set_guc(self, guc_name, guc_value):
     # Set the guc value
     tinctest.logger.info('Configuring ' + guc_name + ' ...')
     cmd_str = 'source ' + self.gphome + '/greenplum_path.sh;gpconfig -c ' + guc_name + ' -v ' + guc_value + ' --skipvalidation'
     cmd = Command("gpconfig ", cmd_str)
     cmd.run()
     self.assertTrue(int(cmd.get_results().rc) == 0, cmd_str)
     # Load the new value to the db
     tinctest.logger.info('gpstop -u to reload config files...')
     cmd_str2 = 'source ' + self.gphome + '/greenplum_path.sh;gpstop -u'
     cmd = Command("gpstop -u", cmd_str2)
     cmd.run()
     self.assertTrue(int(cmd.get_results().rc) == 0, cmd_str2)
Exemplo n.º 28
0
 def get_walsender_pid(self):
     ''' get wal sender pid '''
     pid_cmd = "ps -ef|grep \'%s\' |grep -v grep"  %  ('wal sender')
     cmd = Command('Get the pid of the wal sender process', cmdStr = pid_cmd)
     tinctest.logger.info ('%s' % cmd)
     cmd.run(validateAfter=False)
     result = cmd.get_results()
     while(result.rc !=0):
         cmd.run(validateAfter=False)
         result = cmd.get_results()
     tinctest.logger.info(result)
     pid = result.stdout.splitlines()[0].split()[1].strip()
     return pid
Exemplo n.º 29
0
def impl(context, use_old_format=False):
    if hasattr(context, 'backup_timestamp'):
        ts = context.backup_timestamp
    if hasattr(context, 'netbackup_service_host'):
        netbackup_service_host = context.netbackup_service_host
    if not hasattr(context, "dump_prefix"):
        context.dump_prefix = ''

    master_config_filename = os.path.join(
        master_data_dir, 'db_dumps', context.backup_timestamp[0:8],
        '%sgp_master_config_files_%s.tar' % (context.dump_prefix, ts))

    command_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (
        netbackup_service_host, master_config_filename)
    cmd = Command('Validate master config file', command_str)
    cmd.run(validateAfter=True)
    results = cmd.get_results().stdout.strip()
    if results != master_config_filename:
        raise Exception(
            'Expected Master Config file: %s and found: %s. Master Config file was not backup up to NetBackup server'
            % (master_config_filename, results))

    gparray = GpArray.initFromCatalog(dbconn.DbURL())
    primary_segs = [
        seg for seg in gparray.getDbList()
        if seg.isSegmentPrimary(current_role=True)
    ]

    for seg in primary_segs:
        first_digit = 0 if use_old_format else seg.getSegmentContentId()
        segment_config_filename = os.path.join(
            seg.getSegmentDataDirectory(), 'db_dumps',
            context.backup_timestamp[0:8],
            '%sgp_segment_config_files_%d_%s_%s.tar' %
            (context.dump_prefix, first_digit, seg.getSegmentDbId(),
             context.backup_timestamp))
        command_str = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (
            netbackup_service_host, segment_config_filename)
        cmd = Command('Validate segment config file',
                      command_str,
                      ctxt=REMOTE,
                      remoteHost=seg.getSegmentHostName())
        cmd.run(validateAfter=True)
        results = cmd.get_results().stdout.strip()
        if results != segment_config_filename:
            raise Exception(
                'Expected Segment Config file: %s and found: %s. Segment Config file was not backup up to NetBackup server'
                % (segment_config_filename, results))
Exemplo n.º 30
0
    def setUp(self):
        # Assume setup is done if db exists
        output = PSQL.run_sql_command(
            "select 'command_found_' || datname from pg_database where datname like '"
            + self.db_name + "'")
        if 'command_found_' + self.db_name in output:
            return
        cmd = Command('dropdb', "dropdb " + self.db_name)
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        cmd = Command('createdb', "createdb " + self.db_name)
        cmd.run(validateAfter=True)
        result = cmd.get_results()

        MYD = os.path.abspath(os.path.dirname(__file__))

        #First create the schemas, before loading the data
        runfile = MYD + '/' + 'mpp18457_repro_setup.sql'
        PSQL.run_sql_file(runfile,
                          out_file=runfile.replace('.sql', '') + '.out',
                          dbname=self.db_name)

        # Copy and unzip data files
        if os.path.exists(MYD + '/' + '/compressed_data/' + 'mpp18457.tar.gz'):
            run_shell_command(
                'cp ' + MYD + '/compressed_data/' + 'mpp18457.tar.gz ' + MYD +
                '/' + 'mpp18457.tar.gz ', 'Copy compressed data')
            run_shell_command('gunzip ' + MYD + '/' + 'mpp18457.tar.gz',
                              'Unzip compressed data')
            run_shell_command(
                'tar -xvf ' + MYD + '/' + 'mpp18457.tar -C ' + MYD,
                'Untar archive')

        mypath = MYD + '/mpp18457/'

        filelist = [
            f for f in os.listdir(mypath)
            if os.path.isfile(mypath + f) and f.endswith('dmp')
        ]

        # Set-up schema, data
        for i in range(len(filelist)):
            runfile = str(MYD) + str('/mpp18457/') + str(filelist[i])
            PSQL.run_sql_file(runfile,
                              out_file=runfile.replace('.dmp', '') + '.out',
                              dbname=self.db_name)

        return True
Exemplo n.º 31
0
def verify_timestamps_on_master_with_nbu(timestamp, dump_type):
    list_cmd = 'ls -l %s/db_dumps/%s/*%s* | wc -l' % (master_data_dir,
                                                      timestamp[:8], timestamp)
    cmd = Command('verify timestamps on master', list_cmd)
    cmd.run(validateAfter=True)
    expected_num_files = '8' if dump_type == 'incremental' else '6'
    verify_num_files_with_nbu(cmd.get_results(), expected_num_files, timestamp)
Exemplo n.º 32
0
 def check_gpfdist_process(self, wait=60, port=None, raise_assert=True):
     """
     Check for the gpfdist process
     Wait at least 60s until gpfdist starts, else raise an exception
     """
     if port is None:
         port = self.port
     count = 0
     # handle escape of string's quotation for localhost and remote host
     if self.host in ('127.0.0.1',socket.gethostbyname(socket.gethostname()),socket.gethostname(),'localhost'):
         cmdStr = "%s -ef | grep \'gpfdist -d %s -p %s\' | grep -v grep"%(self.ps_command, self.dir, port)
     else:
         cmdStr = 'gpssh -h %s -e "%s -ef | grep \'gpfdist -d %s -p %s\' |grep -v grep"'%(self.host, self.ps_command, self.dir, port)
     cmd = Command(self.name, cmdStr, self.ctxt, self.host)
     # run the command for 5 time
     while count < wait:
         cmd.run()      
         results = cmd.get_results()      
         if results.rc == 0:
             return True                
         count = count + 1
         time.sleep(1)
     if raise_assert:
         raise GPFDISTError("Could not start gpfdist process")
     else :
         return False
Exemplo n.º 33
0
def get_full_timestamp_for_incremental_with_nbu(netbackup_service_host, netbackup_block_size, incremental_timestamp):
    if dump_prefix:
        get_inc_files_cmd = (
            "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=%sgp_dump_*_increments"
            % (netbackup_service_host, dump_prefix)
        )
    else:
        get_inc_files_cmd = (
            "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=gp_dump_*_increments"
            % netbackup_service_host
        )

    cmd = Command("Query NetBackup server to get the list of increments files backed up", get_inc_files_cmd)
    cmd.run(validateAfter=True)
    files_list = cmd.get_results().stdout.split("\n")

    for line in files_list:
        fname = line.strip()
        restore_file_with_nbu(netbackup_service_host, netbackup_block_size, fname)
        contents = get_lines_from_file(fname)
        if incremental_timestamp in contents:
            full_timestamp = get_timestamp_from_increments_filename(fname)
            return full_timestamp

    return None
Exemplo n.º 34
0
    def drop_database(dbname, retries=5, sleep_interval=5):
        """
        Execute dropdb against the given database.

        @type dbname: string
        @param dbname: Name of the database to be deleted

        @type retires: integer
        @param retries: Number of attempts to drop the database.

        @type sleep_interval: integer
        @param sleep_interval: Time in seconds between retry attempts

        @rtype: boolean
        @return: True if successful, False otherwise

        @raise PSQLException: When the database does not exist
        """
        # TBD: Use shell when available
        if not PSQL.database_exists(dbname):
            tinctest.logger.error("Database %s does not exist." % dbname)
            raise PSQLException("Database %s does not exist" % dbname)
        cmd = Command(name="drop database", cmdStr="dropdb %s" % (dbname))
        tinctest.logger.debug("Dropping database: %s" % cmd)
        count = 0
        while count < retries:
            cmd.run(validateAfter=False)
            result = cmd.get_results()
            tinctest.logger.debug("Output - %s" % result)
            if result.rc == 0 and not result.stderr:
                return True
            time.sleep(sleep_interval)
            count += 1
        return False
Exemplo n.º 35
0
Arquivo: gpfdist.py Projeto: 50wu/gpdb
 def check_gpfdist_process(self, wait=60, port=None, raise_assert=True):
     """
     Check for the gpfdist process
     Wait at least 60s until gpfdist starts, else raise an exception
     @var wait: wait at least 60s for gpfdist
     @var port: Port Number
     @var raise_assert: raise gpfdist error by default
     """
     if port is None:
         port = self.port
     process_started = False
     count = 0
     while (not process_started and count<wait):
         cmd_str = "%s -ef | grep \"gpfdist -p %s\" | grep -v grep" % (self.ps_command, port)
         cmd = Command(name='check for gpfdist', cmdStr=cmd_str)
         cmd.run()
         
         content = cmd.get_results().stdout
         if len(content)>0:
             if content.find("gpfdist -p %s" % port)>0:
                 process_started = self.is_gpfdist_connected(port)
                 if process_started:
                     return True
         count = count + 1
         time.sleep(1)
     if raise_assert:
         raise GpfdistError("Could not start gpfdist process")
     else:
         print "Could not start gpfdist process"
Exemplo n.º 36
0
    def test00_pg_hba_conf_file(self):
        os.environ[self.GP_COMMAND_FAULT_POINT] = 'gpexpand tar segment template'

        cmd = Command(name='run gpexpand', cmdStr='gpexpand -i %s' % (self.EXPANSION_INPUT_FILE))
        with self.assertRaisesRegexp(ExecutionError, 'Fault Injection'):
            cmd.run(validateAfter=True)

        #Read from the pg_hba.conf file and ensure that
        #The address of the new hosts is present.
        cmd = Command(name='get the temp pg_hba.conf file',
                      cmdStr="ls %s" % os.path.join(os.path.dirname(self.MASTER_DATA_DIRECTORY),
                                                    'gpexpand*',
                                                    'pg_hba.conf'))
        cmd.run(validateAfter=True)
        results = cmd.get_results()
        temp_pg_hba_conf = results.stdout.strip()

        actual_values = set()
        expected_values = set([self.primary_host_address, self.mirror_host_address])
        with open(temp_pg_hba_conf) as f:
            for line in f:
                if line.strip() == '# %s' % self.primary_host_name or\
                   line.strip() == '# %s' % self.mirror_host_name:
                    address = f.next().strip().split()[3]
                    address = address[:address.rfind('/')]
                    actual_values.add(address)

        self.assertEqual(actual_values, expected_values)

        GpStart(name='start the database in master only mode', masterOnly=True).run(validateAfter=True)
        Command(name='rollback the expansion', cmdStr='gpexpand -r').run(validateAfter=True)
        GpStart(name='start the database').run(validateAfter=True)
def impl(context):
    cmd = """ps ux | grep "/bin/postgres \-D %s " | grep -v grep""" % (context.remote_mirror_datadir)
    cmd=Command(name='user command', cmdStr=cmd, ctxt=REMOTE, remoteHost=context.remote_mirror_segdbname)
    cmd.run(validateAfter=True)
    res = cmd.get_results()
    if not res.stdout.strip():
        raise Exception('Mirror segment "%s" not active on "%s"' % (context.remote_mirror_datadir, context.remote_mirror_segdbname))
Exemplo n.º 38
0
def get_latest_full_ts_with_nbu(dbname, backup_dir, dump_prefix, netbackup_service_host, netbackup_block_size):
    if dump_prefix:
        get_rpt_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=%sgp_dump_*.rpt" % (netbackup_service_host, dump_prefix)
    else:
        get_rpt_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=gp_dump_*.rpt" % netbackup_service_host

    cmd = Command("Query NetBackup server to get the list of report files backed up", get_rpt_files_cmd)
    cmd.run(validateAfter=True)
    files_list = cmd.get_results().stdout.split('\n')

    for line in files_list:
        fname = line.strip()
        if fname == '':
            continue
        if backup_dir not in fname:
            continue
        if ("No object matched the specified predicate" in fname) or ("No objects of the format" in fname):
            return None
        restore_file_with_nbu(netbackup_service_host, netbackup_block_size, fname)
        timestamp = get_full_ts_from_report_file(dbname, fname, dump_prefix, netbackup_service_host=netbackup_service_host, netbackup_block_size=netbackup_block_size)
        logger.debug('Timestamp = %s' % timestamp)
        if timestamp is not None:
            return timestamp

    raise Exception('No full backup found for given incremental on the specified NetBackup server')
Exemplo n.º 39
0
 def invoke_sigterm_and_verify(self):
     ''' Invoke sigterm on wal receiver and verify that a new process is spawned after '''
     gpact_stdby = GpactivateStandby()
     standby_host = gpact_stdby.get_current_standby()
     standby_port = gpact_stdby.get_standby_port()
     wal_rec_pid_1 = self.pgutil.get_pid_by_keyword(
         host=standby_host,
         pgport=standby_port,
         keyword='wal receiver process',
         option='')
     sig_cmd = "gpssh -h %s -e 'kill -15 %s'" % (standby_host,
                                                 wal_rec_pid_1)
     cmd = Command('Issue SIGTERM to wam receiver process', cmdStr=sig_cmd)
     tinctest.logger.info('%s' % cmd)
     cmd.run(validateAfter=True)
     result = cmd.get_results()
     if result.rc != 0:
         return False
     wal_rec_pid_2 = self.pgutil.get_pid_by_keyword(
         host=standby_host,
         pgport=standby_port,
         keyword='wal receiver process',
         option='')
     if wal_rec_pid_1 == wal_rec_pid_2:
         return False
     return True
Exemplo n.º 40
0
def get_info():
    """
    Get the current platform
    @return: type platform of the current system
    @rtype : String
    """
    myos = platform.system()
    if myos == "Darwin":
        return 'OSX'
    elif myos == "Linux":
        if os.path.exists("/etc/SuSE-release"):
            return 'SUSE'
        elif os.path.exists("/etc/redhat-release"):
            cmd_str = "cat /etc/redhat-release"
            cmd = Command("run cat for RHEL version", cmd_str)
            cmd.run()
            result = cmd.get_results()
            msg = result.stdout
            if msg.find("5") != -1:
                return 'RHEL5'
            else:
                return 'RHEL6'
    elif myos == "SunOS":
        return 'SOL'
    return None
Exemplo n.º 41
0
    def run_gpfaultinjector(self, fault_type, fault_name):
        cmd_str = 'gpfaultinjector -s 1 -y {0} -f {1}'.format(
            fault_type, fault_name)
        cmd = Command(cmd_str, cmd_str)
        cmd.run()

        return cmd.get_results()
Exemplo n.º 42
0
    def run_gpfaultinjector(self, fault_type, fault_name):
        cmd_str = 'gpfaultinjector -s 1 -y {0} -f {1}'.format(
                        fault_type, fault_name)
        cmd = Command(cmd_str, cmd_str)
        cmd.run()

        return cmd.get_results()
Exemplo n.º 43
0
    def execute(self):
        entry = ValidateVerificationEntry(token = self.token).run()
        if not entry['verdone']:   
            raise WrongStateError("Only finished verification tasks may be cleaned up.")

        path = os.path.join(get_masterdatadir(), 'pg_verify', self.token)
        Command('cleanup', 'rm -rf %s' % path).run(validateAfter=True)
        #RemoveTree(path).run()

        to_clean = ValidateVerification(content = entry['vercontent'],
                                        primaries_only = False).run()
        pool = WorkerPool(min(len(to_clean), self.batch_default))
        for seg in to_clean:
            host = seg.getSegmentHostName()
            path = os.path.join(seg.getSegmentDataDirectory(), 'pg_verify', "*%s*" % self.token)
            cmd = Command('cleanup', 'rm -f %s' % path, remoteHost=host)
            pool.addCommand(cmd)

        logger.info('Waiting for clean commands to complete...')
        pool.wait_and_printdots(len(to_clean))

        for cmd in pool.getCompletedItems():
            res = cmd.get_results()
            if not res.wasSuccessful():
                logger.error('Failed to send cleanup on %s' % cmd.host)
                logger.error('Error: %s' % res.stderr)
                raise CleanVerificationError()
        RemoveVerificationEntry(token = self.token).run()
        logger.info('Verification %s has been cleaned.' % self.token)
Exemplo n.º 44
0
    def test_autovacuum_signaling(self):
        """
        Raise the nextXid to oldest_frozenxid + autovacuum_freeze_max_age.
        Run a transaction.
        Ensure that no autovacuum daemon is started.
        """
        dburl = dbconn.DbURL()
        with dbconn.connect(dburl) as conn:
            oldest_xid = int(dbconn.execSQLForSingleton(conn, 'select get_oldest_xid()'))
            autovacuum_freeze_max_age = int(dbconn.execSQLForSingleton(conn, 'show autovacuum_freeze_max_age'))
            autovacuum_xid_limit = xid_sum(oldest_xid, autovacuum_freeze_max_age)
            logger.info('Raising master xid to autovacuum_xid_limit %d' % autovacuum_xid_limit)
            dbconn.execSQLForSingleton(conn, "select spoof_next_xid('%d'::xid)" % autovacuum_xid_limit)

        # A new connection to the postmaster, at this point, will ensure that we roll through
        # the ServerLoop and potentially fork an autovacuum process... if enabled.
        # Burn a transaction to trigger any undesirable behavior that we're disabling.
        with dbconn.connect(dburl) as conn:
            self.assertEqual(1, int(dbconn.execSQLForSingleton(conn, 'select 1')))

        cmd = Command('check for autovacuum',
                      'ps -ef | grep -v grep | grep postgres | grep autovacuum')
        cmd.run()
        self.assertEqual(cmd.get_results().stdout, "", "Seriously? Found a postgres autovacuum process!")

        self._basic_sanity_check('clean')
Exemplo n.º 45
0
    def run_repair_script(self, repair_script_dir, dbname=None, alldb=True, online=False, testname=None, outputFile=None, host=None, port=None):
        '''
        @summary : Run the gpcehckcat repair script generated by gpcehckcat
        '''

        if not os.path.exists(repair_script_dir):
            repair_script_dir = '%s/%s' % (self.tinchome, repair_script_dir)

        tinctest.logger.debug('Using repair script dir ... %s' % repair_script_dir)
        repair_scripts = glob.glob(repair_script_dir + '/*.sh')

        ok = 0
        for repair_script in repair_scripts:
            repair_cmd = "/bin/bash %s" % str(repair_script).strip()
            tinctest.logger.info('Running repair script ... %s' % repair_cmd)
            if host and host not in (socket.gethostname(), 'localhost'):
                cmd = Command(name=' Running Gpcheckcat.. ', cmdStr = repair_cmd, ctxt=REMOTE, remoteHost=host)
            else:
                cmd = Command(name=' Running Gpcheckcat.. ', cmdStr = repair_cmd)
            cmd.run(validateAfter=False)
            result = cmd.get_results()
            # Get Error Code from running repair script
            if result.rc != 0:
                ok = result.rc

        if ok != 0:
            return False

        return True
Exemplo n.º 46
0
    def test_autovacuum_signaling_on_segment(self):
        """
        Same as above, but on a segment.
        """
        # connect to the master to build gparray
        primary, _ = self._get_primary_mirror_pair()
        logger.info('Isolated segment %d at %s:%d' % (primary.dbid, primary.hostname, primary.port))
        dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port)

        with dbconn.connect(dburl, utility=True) as conn:
            oldest_xid = int(dbconn.execSQLForSingleton(conn, 'select get_oldest_xid()'))
            autovacuum_freeze_max_age = int(dbconn.execSQLForSingleton(conn, 'show autovacuum_freeze_max_age'))
            autovacuum_xid_limit = xid_sum(oldest_xid, autovacuum_freeze_max_age)
            logger.info('Raising segment xid to autovacuum_xid_limit %d' % autovacuum_xid_limit)
            dbconn.execSQLForSingleton(conn, "select spoof_next_xid('%d'::xid)" % autovacuum_xid_limit)

        # A new connection to the postmaster, at this point, will ensure that we roll through
        # the ServerLoop and potentially fork an autovacuum process... if enabled.
        with dbconn.connect(dburl, utility=True) as conn:
            self.assertEqual(1, int(dbconn.execSQLForSingleton(conn, 'select 1')))

        cmd = Command('check for autovacuum',
                      'ssh %s ps -ef | grep -v grep | grep postgres | grep autovacuum' % primary.hostname)
        cmd.run()
        self.assertEqual(cmd.get_results().stdout, "", "Seriously? Found a postgres autovacuum process!")

        self._basic_sanity_check('clean')
Exemplo n.º 47
0
    def check_integrityresults(self):
        """
        Check gpverify results from the last token
        @return: True or False, -1 is still running
        """
        sql = "select vertoken from gp_verification_history order by 1 desc limit 1"
        out= PSQL.run_sql_command(sql, flags='-q -t', dbname='postgres')
        last_token = out.strip()

        if not last_token:
            return 0 # No records of gpverify

        cmd = Command(name='gpverify', cmdStr="gpverify --results --token %s" % (last_token))
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        state = result.stdout[len(result.stdout)-2]
        if state.find(self.GPVERIFY_STATE["success"])>0:
            tinctest.logger.info("gpverify at %s: Successful" % last_token)
            return True
        elif state.find(self.GPVERIFY_STATE["running"])>0:
            tinctest.logger.info("gpverify at %s: Running" % last_token)
            return -1
        elif state.find(self.GPVERIFY_STATE["fail"])>0:
            tinctest.logger.info("gpverify at %s: Failed" % last_token)
            return False
        else:
            tinctest.logger.info("gpverify has not start")
            return 0
Exemplo n.º 48
0
def impl(context):
    host = gethostname()
    psql_cmd = 'psql -U "gpadmin/kerberos-test" -h %s template1 -c """select 1;"""' % host
    cmd = Command(name='psql connection with kerberos user',
           cmdStr=psql_cmd)
    cmd.run(validateAfter=True)
    results = cmd.get_results()
Exemplo n.º 49
0
    def check_integrityresults(self):
        """
        Check gpverify results from the last token
        @return: True or False, -1 is still running
        """
        sql = "select vertoken from gp_verification_history order by 1 desc limit 1"
        out = PSQL.run_sql_command(sql, flags='-q -t', dbname='postgres')
        last_token = out.strip()

        if not last_token:
            return 0  # No records of gpverify

        cmd = Command(name='gpverify',
                      cmdStr="gpverify --results --token %s" % (last_token))
        cmd.run(validateAfter=False)
        result = cmd.get_results()
        state = result.stdout[len(result.stdout) - 2]
        if state.find(self.GPVERIFY_STATE["success"]) > 0:
            tinctest.logger.info("gpverify at %s: Successful" % last_token)
            return True
        elif state.find(self.GPVERIFY_STATE["running"]) > 0:
            tinctest.logger.info("gpverify at %s: Running" % last_token)
            return -1
        elif state.find(self.GPVERIFY_STATE["fail"]) > 0:
            tinctest.logger.info("gpverify at %s: Failed" % last_token)
            return False
        else:
            tinctest.logger.info("gpverify has not start")
            return 0
Exemplo n.º 50
0
 def _run_remote_command(self, host, command):
     rmt_cmd = "gpssh -h %s -e '%s' " % (host, command)
     cmd = Command(name='Running a remote command', cmdStr=rmt_cmd)
     cmd.run(validateAfter=False)
     result = cmd.get_results()
     tinctest.logger.info('%s\n%s' % (rmt_cmd, result.stdout))
     return result.stdout
Exemplo n.º 51
0
def get_full_timestamp_for_incremental_with_nbu(netbackup_service_host,
                                                netbackup_block_size,
                                                incremental_timestamp):
    if dump_prefix:
        get_inc_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=%sgp_dump_*_increments" % (
            netbackup_service_host, dump_prefix)
    else:
        get_inc_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=gp_dump_*_increments" % netbackup_service_host

    cmd = Command(
        "Query NetBackup server to get the list of increments files backed up",
        get_inc_files_cmd)
    cmd.run(validateAfter=True)
    files_list = cmd.get_results().stdout.split('\n')

    for line in files_list:
        fname = line.strip()
        restore_file_with_nbu(netbackup_service_host, netbackup_block_size,
                              fname)
        contents = get_lines_from_file(fname)
        if incremental_timestamp in contents:
            full_timestamp = get_timestamp_from_increments_filename(fname)
            return full_timestamp

    return None
Exemplo n.º 52
0
Arquivo: verify.py Projeto: 50wu/gpdb
 def _run_remote_command(self, host, command):
     rmt_cmd = "gpssh -h %s -e '%s' " % (host, command)
     cmd = Command(name='Running a remote command', cmdStr = rmt_cmd)
     cmd.run(validateAfter=False)
     result = cmd.get_results()
     tinctest.logger.info('%s\n%s' %(rmt_cmd, result.stdout))
     return result.stdout
Exemplo n.º 53
0
def check_file_dumped_with_nbu(context,
                               filetype=None,
                               path=None,
                               dbid=1,
                               hostname=None):
    if filetype and path:
        raise Exception(
            "Cannot supply both a file type and a file path toeck_file_dumped_with_nbu"
        )
    if filetype is None and path is None:
        raise Exception(
            "Cannot call check_file_dumped_with_nbu with no type or path argument"
        )
    if filetype:
        path = context.generate_filename(filetype, dbid=dbid)
    command_string = "gp_bsa_query_agent --netbackup-service-host %s --netbackup-filename %s" % (
        context.netbackup_service_host, path)
    logger.debug("Command string inside 'check_file_dumped_with_nbu': %s\n",
                 command_string)
    if hostname is None:
        cmd = Command("Querying NetBackup server to check for dumped file",
                      command_string)
    else:
        cmd = Command("Querying NetBackup server to check for dumped file",
                      command_string,
                      ctxt=REMOTE,
                      remoteHost=hostname)

    cmd.run(validateAfter=True)
    if cmd.get_results().stdout.strip() == path:
        return True
    else:
        return False
Exemplo n.º 54
0
    def drop_database(dbname, retries=5, sleep_interval=5):
        """
        Execute dropdb against the given database.

        @type dbname: string
        @param dbname: Name of the database to be deleted

        @type retires: integer
        @param retries: Number of attempts to drop the database.

        @type sleep_interval: integer
        @param sleep_interval: Time in seconds between retry attempts

        @rtype: boolean
        @return: True if successful, False otherwise

        @raise PSQLException: When the database does not exist
        """
        # TBD: Use shell when available
        if not PSQL.database_exists(dbname):
            tinctest.logger.error("Database %s does not exist." % dbname)
            raise PSQLException('Database %s does not exist' % dbname)
        cmd = Command(name='drop database', cmdStr='dropdb %s' % (dbname))
        tinctest.logger.debug("Dropping database: %s" % cmd)
        count = 0
        while count < retries:
            cmd.run(validateAfter=False)
            result = cmd.get_results()
            tinctest.logger.debug("Output - %s" % result)
            if result.rc == 0 and not result.stderr:
                return True
            time.sleep(sleep_interval)
            count += 1
        return False
Exemplo n.º 55
0
def get_ddboost_backup_directory():
    """
        The gpddboost --show-config command, gives us all the ddboost \
            configuration details.
        Third line of the command output gives us the backup directory \
            configured with ddboost.
    """
    cmd_str = 'gpddboost --show-config'
    cmd = Command('Get the ddboost backup directory', cmd_str)
    cmd.run(validateAfter=True)

    config = cmd.get_results().stdout.splitlines()
    for line in config:
        if line.strip().startswith("Default Backup Directory:"):
            ddboost_dir = line.split(':')[-1].strip()
            if ddboost_dir is None or ddboost_dir == "":
                logger.error(
                    "Expecting format: Default Backup Directory:<dir>")
                raise Exception(
                    "DDBOOST default backup directory is not configured. Or the format of the line has changed"
                )
            return ddboost_dir

    logger.error("Could not find Default Backup Directory:<dir> in stdout")
    raise Exception("Output: %s from command %s not in expected format." %
                    (config, cmd_str))
Exemplo n.º 56
0
    def test00_pg_hba_conf_file(self):
        os.environ[self.GP_COMMAND_FAULT_POINT] = 'gpexpand tar segment template'

        cmd = Command(name='run gpexpand', cmdStr='gpexpand -D %s -i %s' % (self.TEST_DB, self.EXPANSION_INPUT_FILE))
        with self.assertRaisesRegexp(ExecutionError, 'Fault Injection'):
            cmd.run(validateAfter=True)
        
        #Read from the pg_hba.conf file and ensure that 
        #The address of the new hosts is present.
        cmd = Command(name='get the temp pg_hba.conf file', 
                      cmdStr="ls %s" % os.path.join(os.path.dirname(self.MASTER_DATA_DIRECTORY),
                                                    'gpexpand*',
                                                    'pg_hba.conf'))
        cmd.run(validateAfter=True)
        results = cmd.get_results()
        temp_pg_hba_conf = results.stdout.strip() 

        actual_values = set()
        expected_values = set([self.primary_host_address, self.mirror_host_address])
        with open(temp_pg_hba_conf) as f:
            for line in f:
                if line.strip() == '# %s' % self.primary_host_name or\
                   line.strip() == '# %s' % self.mirror_host_name:
                    address = f.next().strip().split()[3]
                    address = address[:address.rfind('/')]
                    actual_values.add(address)

        self.assertEqual(actual_values, expected_values)

        GpStart(name='start the database in master only mode', masterOnly=True).run(validateAfter=True)
        Command(name='rollback the expansion', cmdStr='gpexpand -r -D %s' % self.TEST_DB).run(validateAfter=True)
        GpStart(name='start the database').run(validateAfter=True)
Exemplo n.º 57
0
def get_latest_full_ts_with_nbu(context):
    if context.dump_prefix:
        get_rpt_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=%sgp_dump_*.rpt" % \
                            (context.netbackup_service_host, context.dump_prefix)
    else:
        get_rpt_files_cmd = "gp_bsa_query_agent --netbackup-service-host=%s --netbackup-list-dumped-objects=gp_dump_*.rpt" % context.netbackup_service_host

    cmd = Command("Query NetBackup server to get the list of report files backed up", get_rpt_files_cmd)
    cmd.run(validateAfter=True)
    files_list = cmd.get_results().stdout.strip().split('\n')

    for line in files_list:
        fname = line.strip()
        if fname == '':
            continue
        if context.backup_dir is not None and context.backup_dir not in fname:
            continue
        if ("No object matched the specified predicate" in fname) or ("No objects of the format" in fname):
            return None
        restore_file_with_nbu(context, path=fname)
        timestamp = get_full_ts_from_report_file(context, report_file=fname)
        logger.debug('Timestamp = %s' % timestamp)
        if timestamp is not None:
            return timestamp

    raise Exception('No full backup found for given incremental on the specified NetBackup server')