Beispiel #1
0
    def create_filespace(self, filespace):
        '''
        @param filespace: Filespace Name
        '''
        if self.exists(filespace) is True:
            tinctest.logger.info('Filespace %s exists' % filespace)
            return

        file1 = local_path(filespace)
        f1 = open(file1+".fs.config","w")
        f1.write('filespace:%s\n' % filespace)
       
        for record in self.config.record:
            if record.role:
                fileloc = '%s/%s/primary' % (os.path.split(record.datadir)[0], filespace)
            else:
                fileloc = '%s/%s/mirror' % (os.path.split(record.datadir)[0], filespace)
            # @todo: use a common utility to create/delete remotely
            cmd = "gpssh -h %s -e 'rm -rf %s; mkdir -p %s'"  % (record.hostname, fileloc, fileloc)
            run_shell_command(cmd)
            f1.write("%s:%s:%s/%s\n" % (record.hostname, record.dbid, fileloc, os.path.split(record.datadir)[1])) 
        f1.close()
        result = self.run(config=f1.name)
        if result.rc != 0:
            raise GPfilespaceException('"gpfilespace creation filespace FAILED".  Output = %s ' % out)
Beispiel #2
0
    def _do_gpinitstandby(self):
        """
        Initializes a standby host in the second host in the list self.hosts.
        setUpModule would have taken care of installing the binaries on all the hosts.
        Hence all we have to do here is a gpinitstandby
        """
        standby_host = self.hosts[1]
        tinctest.logger.info("Initializing standby master on host: %s" %
                             standby_host)

        # Create master directory on the standby host
        res = {'rc': 0, 'stdout': '', 'stderr': ''}
        run_shell_command(
            "gpssh -h %s -e 'rm -rf %s; mkdir -p %s'" %
            (standby_host, self.testcase_master_dir, self.testcase_master_dir),
            'create master dir on standby host', res)
        if res['rc'] > 0:
            raise GpExpandTestCaseException(
                "Failed to create segment directories")

        # Do gpinitstandby
        cmd = GpInitStandby(standby_host,
                            mdd=os.path.join(self.testcase_master_dir,
                                             'gpseg-1'))
        result = cmd.run(validate=True)
        if result.rc > 0:
            tinctest.logger.error(
                "gpinitstandby failed with an error code: %s" % result)
            raise GPExpandTestCaseException(
                "gpinitstandby failed with an error code. Failing the test module"
            )
        gpsc = PSQL.run_sql_command("SELECT * FROM gp_segment_configuration")
        tinctest.logger.info("Segment configuration: %s" % gpsc)
Beispiel #3
0
    def run(self, validate=True):
        tinctest.logger.info("Running gpseginstall: %s" % self)
        with open(self.hostfile, 'w') as f:
            for host in self.hosts[1:]:
                f.write(host)
                f.write('\n')

        res = {'rc': 0, 'stderr': '', 'stdout': ''}
        run_shell_command('gpssh-exkeys -f %s' % self.hostfile, 'gpssh-exkeys',
                          res)

        if res['rc'] > 0:
            raise Exception("Failed to do gpssh-exkeys: %s" % res[stderr])

        res = {'rc': 0, 'stderr': '', 'stdout': ''}
        run_shell_command(
            "gpssh -f %s -e 'mkdir -p %s'" % (self.hostfile, self.gphome),
            'gpssh-exkeys', res)
        if res['rc'] > 0:
            raise Exception(
                "Failed to create gphome directories on segments: %s" %
                res[stderr])

        Command.run(self, validateAfter=validate)
        result = self.get_results()
        return result
Beispiel #4
0
    def setUpClass(self):
        super(MapreduceMPPTestCase, self).setUpClass()
        gppkg = Gppkg()
        gppkg.gppkg_install(product_version, 'plperl')
        setup_command = "create language plperl;"
        PSQL.run_sql_command(setup_command,
                             dbname=os.environ.get('PGDATABASE'))

        "compile functions.c and build functions.so"
        makeLog = local_path('testBuildSOLog.out')
        cmdMake = 'cd ' + local_path('c_functions') + ' && make clean && make'
        res = {'rc': 0, 'stdout': '', 'stderr': ''}
        run_shell_command(cmdMake, 'compile functions.c', res)
        file = open(makeLog, 'w')
        file.write(res['stdout'])
        file.close()
        if res['rc']:
            raise Exception('a problem occurred while creating the so files ')
        so_dir = local_path('c_functions')
        sharedObj = local_path('c_functions/functions.so')
        # if not os.path.isfile(sharedObj):
        #raise gptest.GPTestError('so files does not exist')

        # For multinode cluster, need to copy shared object tabfunc_gppc_demo.so to all primary segments
        if gpdbconfig.is_multinode():
            res = {'rc': 0, 'stderr': '', 'stdout': ''}
            hosts = gpdbconfig.get_hosts(segments=True)
            scp_cmd = 'gpscp  -h ' + ' -h '.join(map(
                str, hosts)) + ' ' + sharedObj + ' =:%s' % so_dir
            run_shell_command(scp_cmd)
            if res['rc']:
                raise Exception(
                    'Could not copy shared object to primary segment')
Beispiel #5
0
 def put_file_in_hdfs(self, input_path, hdfs_path):
     if hdfs_path.rfind('/') > 0:
         hdfs_dir = hdfs_path[:hdfs_path.rfind('/')]
         cmd_str = "%s/bin/hdfs dfs -mkdir -p %s" %(self.HADOOP_ENVS['HADOOP_HOME'], hdfs_dir)
         run_shell_command(cmd_str, "Creating parent HDFS dir for path %s" %input_path)
     cmd_str = "%s/bin/hdfs dfs -put %s %s" %(self.HADOOP_ENVS['HADOOP_HOME'], input_path, hdfs_path)
     run_shell_command(cmd_str, "Copy to HDFS : file %s" %input_path)
Beispiel #6
0
    def test_option_port_offset(self):
        """
	primary port + offset = mirror database port
	primary port + (2 * offset) = mirror replication port
	primary port + (3 * offset) = primary replication port
        """
        gprecover = GpRecover()
        port_offset = 500
        self._setup_gpaddmirrors(port_offset = port_offset)
        self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)

        res = {'rc': 0, 'stdout' : '', 'stderr': ''}
        run_shell_command("gpaddmirrors -a -i %s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with non default port_offset', res)
        self.assertEqual(0, res['rc'])
        query_ports = 'SELECT port, replication_port FROM gp_segment_configuration WHERE content = 0 ORDER BY preferred_role DESC;'
        result = PSQL.run_sql_command(query_ports, flags='-q -t', dbname='template1')
        ports = result.strip().split('\n')
        primary_ports = ports[0]
        mirror_ports = ports[1]
        primary_ports = primary_ports.split('|')
        primary_ports = [port.strip() for port in primary_ports]
        primary_db_port = int(primary_ports[0])
        primary_replic_port = int(primary_ports[1])
        mirror_ports = mirror_ports.split('|')
        mirror_ports = [port.strip() for port in mirror_ports]
        mirror_db_port = int(mirror_ports[0])
        mirror_replic_port = int(mirror_ports[1])  
        self.assertEqual(primary_db_port + port_offset, mirror_db_port)
        self.assertEqual(primary_db_port + 2*port_offset, mirror_replic_port)
        self.assertEqual(primary_db_port + 3*port_offset, primary_replic_port)
        gprecover.wait_till_insync_transition()
        self.verify_config_file_with_gp_config()
        self.check_mirror_seg()
Beispiel #7
0
    def create_filespace(self, filespace):
        '''
        @param filespace: Filespace Name
        '''
        if self.exists(filespace) is True:
            tinctest.logger.info('Filespace %s exists' % filespace)
            return

        file1 = local_path(filespace)
        f1 = open(file1+".fs.config","w")
        f1.write('filespace:%s\n' % filespace)
        f1.write('fsysname:hdfs\n')
        fsrep = PSQL.run_sql_command("select fsrep from pg_filespace where fsname='dfs_system';", flags = '-t -q', dbname='postgres')
        f1.write('fsreplica:%s\n' % fsrep.strip())

        dfs_loc_cmd = "SELECT substring(fselocation from length('hdfs:// ') for (position('/' in substring(fselocation from length('hdfs:// ')))-1)::int) FROM pg_filespace pgfs, pg_filespace_entry pgfse  WHERE pgfs.fsname = 'dfs_system' AND fsedbid = 2 AND pgfse.fsefsoid=pgfs.oid ;"
        dfs_loc = PSQL.run_sql_command(dfs_loc_cmd,flags = '-t -q', dbname='postgres')
        for record in self.config.record:
            if record.content == -1:
                fileloc = '%s/hdfs_%s' % (os.path.split(record.datadir)[0], filespace)
                f1.write("%s:%s:%s/%s\n" % (record.hostname, record.dbid, fileloc, os.path.split(record.datadir)[1]))
                cmd = "gpssh -h %s -e 'rm -rf %s; mkdir -p %s'"  % (record.hostname, fileloc, fileloc)
                run_shell_command(cmd)
            else:
                f1.write("%s:%s:[%s/%s/%s]\n" % (record.hostname, record.dbid, dfs_loc.strip(), filespace, os.path.split(record.datadir)[1]))
        f1.close()
        filespace_cmd = '%s/bin/gpfilespace -c %s' % (self.gphome, f1.name)
        cmd = Command(name='Gpfilespace command', cmdStr="%s" % (filespace_cmd))
        tinctest.logger.info(" %s" % cmd)
        cmd.run(validateAfter=True)
        result = cmd.get_results()
        if result.rc != 0:
            raise GPfilespaceException('"gpfilespace creation filespace FAILED".  Output = %s ' % resutl.stdout)
Beispiel #8
0
 def gpconfig_alter(self, type, bool):
     ''' Alter postgres configuration '''
     if bool == 'true':
         fault_string = "filerep_inject_listener_fault=true"
     elif bool == 'false':
         fault_string = "filerep_inject_listener_fault=false"
     for record in self.gpconfig.record:
         if type == 'primary':
             if record.role and record.content != -1:
                 fse_location = record.datadir
             else:
                 continue
         if type == 'mirror':
             if (not record.role) and record.content != -1:
                 fse_location = record.datadir
             else:
                 continue
         run_shell_command('ssh ' + record.hostname + ' \'echo ' +
                           fault_string + ' >> ' + fse_location +
                           '/postgresql.conf\'')
         tinctest.logger.info(
             "\n ssh   %s   'echo %s  >>   %s/postgresql.conf'" %
             (record.hostname, fault_string, fse_location))
         tinctest.logger.info(
             "\n  Done set %s in postgresql.conf on all primary segments" %
             fault_string)
    def test_no_corruption(self):
        """
        Test that gpcheckcat does not report any errors and it does
        not generate the verify file if the gpcheckcat test succeeds.
        We choose missing_extraneous test for this purpose.

        """
        dbname = 'test_no_corruption'
        PSQL.run_sql_command('DROP DATABASE IF EXISTS %s' % dbname)
        stdout = PSQL.run_sql_command('CREATE DATABASE %s' % dbname)
        if not stdout.endswith('CREATE DATABASE\n'):
            self.fail('failed to create database: %s' % stdout)

        sql_file = local_path('sql/create_tables.sql')
        if not PSQL.run_sql_file(sql_file, dbname=dbname,
                                 output_to_file=False):
            self.fail('failed to create tables')

        res = {'rc': 0, 'stdout' : '', 'stderr': ''}
        run_shell_command(
            "cd %s && $GPHOME/bin/lib/gpcheckcat -p %s -R missing_extraneous %s" %
            (self.gpcheckcat_test_dir, self.master_port, dbname),
            results=res)
        self.assertEqual(0, res['rc'])
        for f in os.listdir(self.gpcheckcat_test_dir):
            if fnmatch.fnmatch(f, 'gpcheckcat.verify.%s.*' % dbname):
                self.fail('found verify file when not expecting it')
Beispiel #10
0
    def test_with_fault_injection(self):
        """
        add new mirrors run workload to verify if cluster functioning correctly, and 
        inject the mirror to bring cluster into change tracking, then recoverseg
        """
        filerepUtil = Filerepe2e_Util()
        gprecover = GpRecover()
        self._setup_gpaddmirrors()
        self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)

        res = {'rc': 0, 'stdout' : '', 'stderr': ''}
        run_shell_command("gpaddmirrors -a -i %s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with fault injection', res)
        gprecover.wait_till_insync_transition()
        self.assertEqual(0, res['rc'])
        self.run_simple_ddl_dml()

        # after adding new mirrors, check the intergrity between primary and mirror
        self.check_mirror_seg()
        out_file = local_path('inject_fault_into_ct')
        filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='mirror', H='ALL', outfile=out_file)
        # trigger the transtion to change tracking
        PSQL.run_sql_command('drop table if exists foo;', dbname = 'template1')
        filerepUtil.wait_till_change_tracking_transition()
        gprecover.incremental()
        gprecover.wait_till_insync_transition()
        out_file=local_path('reset_fault')
        filerepUtil.inject_fault(f='filerep_consumer', m='async', y='reset', r='mirror', H='ALL', outfile=out_file)
Beispiel #11
0
    def test_option_port_offset(self):
        """
	primary port + offset = mirror database port
	primary port + (2 * offset) = mirror replication port
	primary port + (3 * offset) = primary replication port
        """
        gprecover = GpRecover()
        port_offset = 500
        self._setup_gpaddmirrors(port_offset = port_offset)
        self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)

        res = {'rc': 0, 'stdout' : '', 'stderr': ''}
        run_shell_command("gpaddmirrors -a -i %s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with non default port_offset', res)
        self.assertEqual(0, res['rc'])
        query_ports = 'SELECT port, replication_port FROM gp_segment_configuration WHERE content = 0 ORDER BY preferred_role DESC;'
        result = PSQL.run_sql_command(query_ports, flags='-q -t', dbname='template1')
        ports = result.strip().split('\n')
        primary_ports = ports[0]
        mirror_ports = ports[1]
        primary_ports = primary_ports.split('|')
        primary_ports = [port.strip() for port in primary_ports]
        primary_db_port = int(primary_ports[0])
        primary_replic_port = int(primary_ports[1])
        mirror_ports = mirror_ports.split('|')
        mirror_ports = [port.strip() for port in mirror_ports]
        mirror_db_port = int(mirror_ports[0])
        mirror_replic_port = int(mirror_ports[1])  
        self.assertEqual(primary_db_port + port_offset, mirror_db_port)
        self.assertEqual(primary_db_port + 2*port_offset, mirror_replic_port)
        self.assertEqual(primary_db_port + 3*port_offset, primary_replic_port)
        gprecover.wait_till_insync_transition()
        self.verify_config_file_with_gp_config()
        self.check_mirror_seg()
    def test_MPP24237(self):

        cmd_cleanup = "psql -Atc \"select datname from pg_database where datname != \'template0\'\" | while read a; do echo \"check for ${a}\";psql -Atc \"select \'drop schema if exists \' || nspname || \' cascade;\' from (select nspname from pg_namespace where nspname like \'pg_temp%\' union select nspname from gp_dist_random(\'pg_namespace\') where nspname like \'pg_temp%\' except select \'pg_temp_\' || sess_id::varchar from pg_stat_activity) as foo\" ${a}; done"

        res = {'rc':0, 'stderr':'', 'stdout':''}
        run_shell_command(cmd_cleanup, 'do_clean', res)

        if res['rc'] > 0:
            raise Exception("Failed to do cleanup %s" %res[stderr])

        PSQL.run_sql_file(local_path('pre_script.sql'), out_file=local_path('pre_script.out'))
        self.assertTrue(Gpdiff.are_files_equal(local_path('pre_script.out'), local_path('pre_script.ans')))

        cmd = "select count(*) from pg_tables where schemaname like 'pg_temp%';"
        out = PSQL.run_sql_command(cmd, flags ='-q -t')

        if int(out) != 0:
            tinctest.logger.info("temp tables found")
            tinctest.logger.info(PSQL.run_sql_command("select * from pg_tables where schemaname like 'pg_temp%';"))
            self.fail("temp tables were found")
            PSQL.run_sql_file(local_path('clean_script.sql'))

        PSQL.run_sql_file(local_path('clean_script.sql'))

        run_shell_command(cmd_cleanup, 'do_clean', res)
        if res['rc'] > 0:
            raise Exception("Failed to do cleanup %s" %res[stderr])
Beispiel #13
0
    def create_filespace(self, filespace):
        '''
        @param filespace: Filespace Name
        '''
        if self.exists(filespace) is True:
            tinctest.logger.info('Filespace %s exists' % filespace)
            return

        file1 = local_path(filespace)
        f1 = open(file1+".fs.config","w")
        f1.write('filespace:%s\n' % filespace)
        f1.write('fsysname:hdfs\n')
        fsrep = PSQL.run_sql_command("select fsrep from pg_filespace where fsname='dfs_system';", flags = '-t -q', dbname='postgres')
        f1.write('fsreplica:%s\n' % fsrep.strip())

        dfs_loc_cmd = "SELECT substring(fselocation from length('hdfs:// ') for (position('/' in substring(fselocation from length('hdfs:// ')))-1)::int) FROM pg_filespace pgfs, pg_filespace_entry pgfse  WHERE pgfs.fsname = 'dfs_system' AND fsedbid = 2 AND pgfse.fsefsoid=pgfs.oid ;"
        dfs_loc = PSQL.run_sql_command(dfs_loc_cmd,flags = '-t -q', dbname='postgres')
        for record in self.config.record:
            if record.content == -1:
                fileloc = '%s/hdfs_%s' % (os.path.split(record.datadir)[0], filespace)
                f1.write("%s:%s:%s/%s\n" % (record.hostname, record.dbid, fileloc, os.path.split(record.datadir)[1]))
                cmd = "gpssh -h %s -e 'rm -rf %s; mkdir -p %s'"  % (record.hostname, fileloc, fileloc)
                run_shell_command(cmd)
            else:
                f1.write("%s:%s:[%s/%s/%s]\n" % (record.hostname, record.dbid, dfs_loc.strip(), filespace, os.path.split(record.datadir)[1]))
        f1.close()
        filespace_cmd = '%s/bin/gpfilespace -c %s' % (self.gphome, f1.name)
        cmd = Command(name='Gpfilespace command', cmdStr="%s" % (filespace_cmd))
        tinctest.logger.info(" %s" % cmd)
        cmd.run(validateAfter=True)
        result = cmd.get_results()
        if result.rc != 0:
            raise GPfilespaceException('"gpfilespace creation filespace FAILED".  Output = %s ' % resutl.stdout)
Beispiel #14
0
    def test_PLPERL91010_super_untrust(self):
        """ Language PL/PERL upgrade to 9.1:File system operations are allowed for untrusted PL/PERL """
        if self.checkAPPHOMEandLIB("plperl"):
            print 'installation'
            gp_procedural_languages().installPL('plperlu')
        tmpfilename = local_path('plperl91/plsuperuser.tmp')
        tmpfile = ''
        for i in tmpfilename:
           if i == '/':
               tmpfile = tmpfile + '\/'
           else:
               tmpfile = tmpfile + i
        tmpfilename = tmpfile
        localpath = local_path('')

        if sys.platform == 'sunos5':
            cmd = 'sed \'s/TMPFILENAME/%s/g\' %s/plperl91/test010_super_untrust.sql > %s/plperl91/test010_super_untrust.sql.tmp && mv %s/plperl91/test010_super_untrust.sql.tmp %s/plperl91/test010_super_untrust.sql' % ( tmpfilename, localpath, localpath, localpath, localpath)
        elif sys.platform == 'darwin':
            cmd = 'sed -i \'\' \'s/TMPFILENAME/%s/g\' %s/plperl91/test010_super_untrust.sql' % ( tmpfilename, localpath )
        else:
            cmd = 'sed -i \'s/TMPFILENAME/%s/g\' %s/plperl91/test010_super_untrust.sql' % ( tmpfilename, localpath )
        os.system( cmd )
        self.doPLPERLUbyuser("plperl91/test010_super_untrust", 'plsuperuser')
        checkcmd = 'cat ' + tmpfilename
        res = {'rc': 0, 'stdout' : '', 'stderr': ''}
        run_shell_command(checkcmd, 'run command %s'%checkcmd, res)
        if res['rc']:
            raise Exception("Unable to open created file")
Beispiel #15
0
 def install_kerberos_conf(self):
     """
     Update the kerberos configuration files according the env
     and copy in appropriate locations
     """
     transforms = {
                     "%DOMAIN%" : self.kdc_domain,
                     "%HOSTNAME%" : self.kdc_host
                 }
     input_file_path = local_path(self.krb_template_conf + "/" + self.KRB_CONF_TEMPLATE)
     output_file_path = local_path(self.krb_template_conf + "/" + self.KRB_CONF_TEMPLATE[:-2])
     with open(input_file_path, 'r') as input:
         with open(output_file_path, 'w') as output:
             for line in input.readlines():
                 for key,value in transforms.iteritems():
                     line = re.sub(key,value,line)
                 output.write(line)
     cmd_str = "sudo cp %s %s" %(output_file_path, self.KRB_CONF)
     if not run_shell_command(cmd_str,"Copying krb5.conf"):
         raise KerberosUtilException("Couldn't copy krb5.conf")
     cmd_str = "sudo cp %s %s" %(local_path(self.krb_template_conf + "/" + self.KDC_CONF_TEMPLATE), self.KDC_CONF)
     if not run_shell_command(cmd_str,"Copying kdc.conf"):
         raise KerberosUtilException("Couldn't copy kdc.conf")
     cmd_str = "sudo cp %s %s" %(local_path(self.krb_template_conf + "/" + self.KADMIN_ACL_CONF_TEMPLATE), self.KADMIN_ACL_CONF)
     if not run_shell_command(cmd_str,"Copying kadm5.acl"):
         raise KerberosUtilException("Couldn't copy kadm5.acl")
Beispiel #16
0
 def changetracking(self, type = 'mirror'):
     ''' Routine to inject fault that places system in change tracking'''
     tinctest.logger.info("Put system in changetracking ")
     cmd_str = 'gpfaultinjector -f filerep_consumer -m async -y fault -r %s -H ALL' %type
     results={'rc':0, 'stdout':'', 'stderr':''}
     run_shell_command(cmd_str, results=results)
     return results['stdout']
Beispiel #17
0
 def doPLJAVA(self, num, filename, default="-a"):
     """ run PL/JAVA test case """
     # If JAVA_HOME is set, then run PL/Java
     # Also check whether pljava.so is install in $GPHOME/lib/postgresql
     init_file_list = []
     init_file = local_path('pljava/init_file')
     init_file_list.append(init_file)
     if self.checkAPPHOMEandLIB("pljava", "JAVA_HOME"):
         # If JDK is not 1.6 and up, then raise error
         res = {'rc': 0, 'stdout': '', 'stderr': ''}
         run_shell_command("java -version 2>&1", 'check java version', res)
         out = res['stdout'].split('\n')
         if out[0].find("1.6.") > 0:
             gp_procedural_languages().installPL('pljava')
             self.doTest(num,
                         filename,
                         default=default,
                         match_sub=init_file_list)
         else:
             raise Exception(
                 "Requires JDK 1.6 and up, your current version is :%s" %
                 (out[0]))
     else:
         # If JAVA_HOME is not set, then raise error
         if not os.environ.get("JAVA_HOME"):
             raise Exception("JAVA_HOME is not set")
Beispiel #18
0
    def copyJARFILE(self, srcjarfile):
        """ copy jar file to $GPHOME/lib/postgresql/java on master and all segments """
 
        if not os.path.isfile(srcjarfile):
            raise Exception("Can not find jar file %s" % (srcjarfile))

        hosts = config.get_hosts()
        hoststr = ""
        for host in hosts:
            hoststr += " -h %s" % (host)

        # set acccess permissions to existing jar file so that gpscp can overwrite it with current one
        jarfilename = os.path.basename(srcjarfile)
        cmd = "gpssh%s -e 'chmod -Rf 755 %s/java/%s'" % (hoststr, LIBDIR, jarfilename)
        Command(name = 'set acccess permissions to existing jar', cmdStr = cmd).run(validateAfter=True)

        # copy current jar file to all hosts using gpscp
        cmd = "gpscp%s %s =:%s/java" % (hoststr, srcjarfile, LIBDIR)
        res = {'rc': 0, 'stdout' : '', 'stderr': ''}
        run_shell_command(cmd, 'copy current jar file to all hosts', res)

        if res['rc']:
            raise Exception("Can not copy jar file %s to hosts" % (srcjarfile))

        # set access permissions to current jar file so that it can be accessed by applications
        cmd = "gpssh%s -e 'chmod -Rf 755 %s/java/%s'" % (hoststr, LIBDIR, jarfilename)
        res = {'rc': 0, 'stdout' : '', 'stderr': ''}
        run_shell_command(cmd, 'set access permissions to current jar file', res)

        if res['rc']:
            raise Exception("Can not set access permissions of jar file %s to 755" % (jarfilename))
Beispiel #19
0
 def runCmd(self, command):
     '''
     run shell command, redirecting standard error message to standard output
     '''
     res = {'rc': 0, 'stdout' : '', 'stderr': ''}
     run_shell_command(command + " 2>&1", 'set access permissions to current jar file', res)
     return (not res['rc'], res['stdout'])
Beispiel #20
0
 def check_gpfdist_process(self, wait=60, port=None, raise_assert=True):
     """
     Check for the gpfdist process
     Wait at least 60s until gpfdist starts, else raise an exception
     """
     if port is None:
         port = self.port
     process_started = False
     count = 0
     while (not process_started and count < wait):
         cmd_str = " | ".join([
             self.ps_command + ' -ef',
             'grep \"[g]pfdist -p %s\"' % (port)
         ])
         cmd = "gpssh -h %s '%s'" % (self.hostname, cmd_str)
         res = {'rc': 0, 'stderr': '', 'stdout': ''}
         run_shell_command(cmd, 'gpfdist process check', res)
         content = res['stdout']
         if len(content) > 0:
             if content.find("gpfdist -p %s" % port) > 0:
                 process_started = self.is_gpfdist_connected(port)
                 if process_started:
                     return True
         count = count + 1
         time.sleep(1)
     if raise_assert:
         raise GPFDISTError("Could not start gpfdist process")
     else:
         tinctest.logger.warning("Could not start gpfdist process")
    def setUp(self):
        # compile tablefunc_demo.c and install the tablefunc_demo.so
        cmdMakeInstall = 'cd ' + MYD + '/%s/data && make && make install' % self.sql_dir
        ok = run_shell_command(cmdMakeInstall)
        # Current make file works for linux, but not for Solaris or OSX.
        # If compilation fails or installation fails, force system quit: os._exit(1)
        if not ok:
            tinctest.logger.error(
                "***** make command failed!! Executed Command : %s" %
                cmdMakeInstall)
            self.fail("ERROR: make command failed!!")
        sharedObj = GPHOME + '/lib/postgresql/tabfunc_demo.so'
        if not os.path.isfile(sharedObj):
            tinctest.logger.error("***** Shared object '%s' does not exist!!" %
                                  sharedObj)
            self.fail("ERROR: Shared object '%s' does not exist!!" % sharedObj)

        # For multinode cluster, need to copy shared object tabfunc_demo.so to all primary segments
        hosts = self.getMultinodeHosts()
        if hosts is not None:
            for host in hosts:
                cmd_str = "scp " + GPHOME + "/lib/postgresql/tabfunc_demo.so " + host.strip(
                ) + ":" + GPHOME + "/lib/postgresql"
                ok = run_shell_command(cmd_str)
                if not ok:
                    tinctest.logger.error(
                        '***** Could not copy shared object to primary segment: '
                        + cmd_str)
                    self.fail(
                        'Could not copy shared object to primary segment: ' +
                        cmd_str)
Beispiel #22
0
    def killGpfdist(self, wait=60, port=None):
        """
        kill the gpfdist process
        @change: Johnny Soedomo, check from netstat whether the system has released the process rather than waiting a flat 10s
        @todo: Support for stopping gpfdist process on remote host
        """
        if port is None:
            port = self.port

        cmd_str = ' | '.join([
            self.ps_command + " -ef",
            "grep \"[g]pfdist -p %s\"" % (port),
            "awk '\"'\"'{print $2}'\"'\"'", "xargs kill"
        ])
        cmd = "gpssh -h %s '%s'" % (self.hostname, cmd_str)

        res = {'rc': 0, 'stderr': '', 'stdout': ''}
        run_shell_command(cmd, 'kill gpfdist', res)

        if not self.is_gpfdist_killed():
            raise GPFDISTError("Could not kill gpfdist process on %s:%s" %
                               (self.hostname, self.port))
        # Make sure the port is released
        is_released = False
        count = 0
        while (not is_released and count < wait):
            is_released = self.is_port_released()
            count = count + 1
            time.sleep(1)
Beispiel #23
0
    def startGpfdist(self, options="", port=None, raise_assert=True, ssl=None):
        """
        start hosting the data
        @comment: Why do we need to ssh to a host that is localhost
                  killGpfdist does not support kill process on other host
        @note: If we are to use ssh subprocess, we will go to the home folder,
            let's revisit this with remote command so that it works for starting 
            gpfdist on remote host
        """
        if port is None:
            port = self.port
        else:
            port = str(port)
        if ssl is None:
            ssl = ""
        else:
            self.secure = True
            self.ssl_cert = ssl
            ssl = "--ssl %s" % self.ssl_cert

        directory = self.directory

        gpfdist_cmd = "gpfdist -p %s -d %s %s %s" % (port, directory, options,
                                                     ssl)
        cmd = "gpssh -h %s 'source %s/greenplum_path.sh; %s > /dev/null &'" % (
            self.hostname, self.gphome, gpfdist_cmd)

        res = {'rc': 0, 'stderr': '', 'stdout': ''}
        run_shell_command(cmd, 'gpfdist', res)
        if res['rc'] > 0:
            raise Exception(
                "Failed to start gpfdist on host %s and port %s with non-zero rc"
                % (self.hostname, port))
        return self.check_gpfdist_process(port=port, raise_assert=raise_assert)
Beispiel #24
0
    def start_yarn(self):
        # start yarn daemons
        # start resource manager
        self.set_hdfs_permissions()
        cmd_str = "sudo %s/sbin/yarn-daemon.sh --config %s start resourcemanager" % (
            self.HADOOP_ENVS['HADOOP_HOME'],
            self.HADOOP_ENVS['HADOOP_CONF_DIR'])
        namenode_started = run_shell_command(cmd_str)
        if not namenode_started:
            raise Exception("Resource manager not started")

        # start node manager
        cmd_str = "sudo %s/sbin/yarn-daemon.sh --config %s start nodemanager" % (
            self.HADOOP_ENVS['HADOOP_HOME'],
            self.HADOOP_ENVS['HADOOP_CONF_DIR'])
        namenode_started = run_shell_command(cmd_str)
        if not namenode_started:
            raise Exception("Node manager not started")

        # start history server
        cmd_str = "sudo %s/sbin/mr-jobhistory-daemon.sh --config %s start historyserver" % (
            self.HADOOP_ENVS['HADOOP_HOME'],
            self.HADOOP_ENVS['HADOOP_CONF_DIR'])
        namenode_started = run_shell_command(cmd_str)
        if not namenode_started:
            raise Exception("History server not started")
Beispiel #25
0
 def is_gpfdist_killed(self, port=None, wait=1):
     """
     Check whether the gpfdist process is killed
     """
     if port is None:
         port = self.port
     process_killed = False
     count = 0
     while (not process_killed and count < wait):
         cmd_str = " | ".join([
             self.ps_command + ' -ef',
             'grep \"[g]pfdist -p %s\"' % (port)
         ])
         cmd = "gpssh -h %s '%s'" % (self.hostname, cmd_str)
         res = {'rc': 0, 'stderr': '', 'stdout': ''}
         run_shell_command(cmd, 'gpfdist process check', res)
         content = res['stdout']
         # strip hostname prefix from gpssh output
         content = content.replace(self.hostname, '').strip('[]').strip()
         if len(content) > 0 or content.find("gpfdist -p %s" % port) > 0:
             tinctest.logger.warning(
                 "gpfdist process still exists on %s:%s" %
                 (self.hostname, self.port))
         else:
             return True
         count = count + 1
         time.sleep(1)
     tinctest.logger.warning("gpfdist process not killed on %s:%s" %
                             (self.hostname, self.port))
     return False
Beispiel #26
0
    def start_hdfs(self):
        # format namenode
        cmd_str = "sudo -u hdfs %s/bin/hdfs --config %s namenode -format" % (
            self.HADOOP_ENVS['HADOOP_HOME'],
            self.HADOOP_ENVS['HADOOP_CONF_DIR'])
        namenode_formatted = run_shell_command(cmd_str)
        if not namenode_formatted:
            raise Exception("Exception in namnode formatting")

        # start namenode
        cmd_str = "sudo -u hdfs %s/sbin/hadoop-daemon.sh --config %s start namenode" % (
            self.HADOOP_ENVS['HADOOP_HOME'],
            self.HADOOP_ENVS['HADOOP_CONF_DIR'])
        namenode_started = run_shell_command(cmd_str)
        if not namenode_started:
            raise Exception("Namenode not started")

        cmd_str = "sudo %s/sbin/hadoop-daemon.sh --config %s start datanode" % (
            self.HADOOP_ENVS['HADOOP_HOME'],
            self.HADOOP_ENVS['HADOOP_CONF_DIR'])
        namenode_started = run_shell_command(cmd_str)
        if not namenode_started:
            raise Exception("Namenode not started")

        cmd_str = "sudo -u hdfs %s/sbin/hadoop-daemon.sh --config %s start secondarynamenode" % (
            self.HADOOP_ENVS['HADOOP_HOME'],
            self.HADOOP_ENVS['HADOOP_CONF_DIR'])
        namenode_started = run_shell_command(cmd_str)
        if not namenode_started:
            raise Exception("Secondary namenode not started")
Beispiel #27
0
 def get_disk_usage(self, hostname, partition='/data'):
     '''Returns the disk usage of individual hosts'''
     cmd_str = "ssh %s df %s | grep -v Filesystem |awk \'{print $4}\'" % (
         hostname, partition)
     results = {'rc': 0, 'stdout': '', 'stderr': ''}
     run_shell_command(cmd_str, results=results)
     return results1['rc'], results1['stdout']
Beispiel #28
0
 def _create_test_jars(self, export_env, java_classpath):
     cmd_str = "%s cd %s; javac -cp %s javaclasses/*.java" %(export_env, self.cur_dir, java_classpath)
     if not run_shell_command(cmd_str, "Compiling java classes"):
         raise HadoopIntegrationException("Error while compiling java classes!")
     cmd_str = "cd %s; jar cf maptest.jar javaclasses/*.class" %self.cur_dir
     if not run_shell_command(cmd_str, "Creating jar file"):
         raise HadoopIntegrationException("Error while creating the jar!")
Beispiel #29
0
    def test_PLPERL91010_super_untrust(self):
        """ Language PL/PERL upgrade to 9.1:File system operations are allowed for untrusted PL/PERL """
        if self.checkAPPHOMEandLIB("plperl"):
            print 'installation'
            gp_procedural_languages().installPL('plperlu')
        tmpfilename = local_path('plperl91/plsuperuser.tmp')
        tmpfile = ''
        for i in tmpfilename:
            if i == '/':
                tmpfile = tmpfile + '\/'
            else:
                tmpfile = tmpfile + i
        tmpfilename = tmpfile
        localpath = local_path('')

        if sys.platform == 'sunos5':
            cmd = 'sed \'s/TMPFILENAME/%s/g\' %s/plperl91/test010_super_untrust.sql > %s/plperl91/test010_super_untrust.sql.tmp && mv %s/plperl91/test010_super_untrust.sql.tmp %s/plperl91/test010_super_untrust.sql' % (
                tmpfilename, localpath, localpath, localpath, localpath)
        elif sys.platform == 'darwin':
            cmd = 'sed -i \'\' \'s/TMPFILENAME/%s/g\' %s/plperl91/test010_super_untrust.sql' % (
                tmpfilename, localpath)
        else:
            cmd = 'sed -i \'s/TMPFILENAME/%s/g\' %s/plperl91/test010_super_untrust.sql' % (
                tmpfilename, localpath)
        os.system(cmd)
        self.doPLPERLUbyuser("plperl91/test010_super_untrust", 'plsuperuser')
        checkcmd = 'cat ' + tmpfilename
        res = {'rc': 0, 'stdout': '', 'stderr': ''}
        run_shell_command(checkcmd, 'run command %s' % checkcmd, res)
        if res['rc']:
            raise Exception("Unable to open created file")
Beispiel #30
0
 def install_kerberos_conf(self):
     """
     Update the kerberos configuration files according the env
     and copy in appropriate locations
     """
     transforms = {"%DOMAIN%": self.kdc_domain, "%HOSTNAME%": self.kdc_host}
     input_file_path = local_path(self.krb_template_conf + "/" +
                                  self.KRB_CONF_TEMPLATE)
     output_file_path = local_path(self.krb_template_conf + "/" +
                                   self.KRB_CONF_TEMPLATE[:-2])
     with open(input_file_path, 'r') as input:
         with open(output_file_path, 'w') as output:
             for line in input.readlines():
                 for key, value in transforms.iteritems():
                     line = re.sub(key, value, line)
                 output.write(line)
     cmd_str = "sudo cp %s %s" % (output_file_path, self.KRB_CONF)
     if not run_shell_command(cmd_str, "Copying krb5.conf"):
         raise KerberosUtilException("Couldn't copy krb5.conf")
     cmd_str = "sudo cp %s %s" % (local_path(self.krb_template_conf + "/" +
                                             self.KDC_CONF_TEMPLATE),
                                  self.KDC_CONF)
     if not run_shell_command(cmd_str, "Copying kdc.conf"):
         raise KerberosUtilException("Couldn't copy kdc.conf")
     cmd_str = "sudo cp %s %s" % (local_path(self.krb_template_conf + "/" +
                                             self.KADMIN_ACL_CONF_TEMPLATE),
                                  self.KADMIN_ACL_CONF)
     if not run_shell_command(cmd_str, "Copying kadm5.acl"):
         raise KerberosUtilException("Couldn't copy kadm5.acl")
Beispiel #31
0
    def killGpfdist(self, wait=60, port=None):
        """
        kill the gpfdist process
        @change: Johnny Soedomo, check from netstat whether the system has released the process rather than waiting a flat 10s
        @todo: Support for stopping gpfdist process on remote host
        """
        if port is None:
            port = self.port

        cmd_str = ' | '.join([self.ps_command + " -ef",
                              "grep \"[g]pfdist -p %s\"" % (port),
                              "awk '\"'\"'{print $2}'\"'\"'",
                              "xargs kill"])
        cmd = "gpssh -h %s '%s'" %(self.hostname, cmd_str)

        res = {'rc':0, 'stderr':'', 'stdout':''}
        run_shell_command(cmd, 'kill gpfdist', res)

        if not self.is_gpfdist_killed():
            raise GPFDISTError("Could not kill gpfdist process on %s:%s" %(self.hostname, self.port))
        # Make sure the port is released
        is_released = False
        count = 0
        while (not is_released and count < wait):
            is_released = self.is_port_released()
            count = count + 1
            time.sleep(1)
Beispiel #32
0
    def startGpfdist(self, options="", port=None, raise_assert=True, ssl=None):
        """
        start hosting the data
        @comment: Why do we need to ssh to a host that is localhost
                  killGpfdist does not support kill process on other host
        @note: If we are to use ssh subprocess, we will go to the home folder,
            let's revisit this with remote command so that it works for starting 
            gpfdist on remote host
        """
        if port is None:
            port = self.port
        else:
            port = str(port)
        if ssl is None:
            ssl = ""
        else:
            self.secure = True
            self.ssl_cert = ssl
            ssl = "--ssl %s" % self.ssl_cert

        directory = self.directory

        gpfdist_cmd = "gpfdist -p %s -d %s %s %s" % (port, directory, options, ssl)
        cmd = "gpssh -h %s 'source %s/greenplum_path.sh; %s > /dev/null &'" % (self.hostname, self.gphome, gpfdist_cmd)
        
        res = {'rc':0, 'stderr':'', 'stdout':''}
        run_shell_command(cmd, 'gpfdist', res)
        if res['rc'] > 0:
            raise Exception("Failed to start gpfdist on host %s and port %s with non-zero rc" %(self.hostname, port))
        return self.check_gpfdist_process(port=port, raise_assert=raise_assert)
Beispiel #33
0
    def test_batch_size_4(self):
        """
        check the batch size option -B of gpaddmirrors, depending on how many mirror segment to setup, otherwise, it will start up to 10
        """
        gprecover = GpRecover()
        self._setup_gpaddmirrors()
        self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)

        workers = Set()
        batch_size = 4
        res = {'rc': 0, 'stdout': '', 'stderr': ''}
        run_shell_command(
            "gpaddmirrors -a -i %s -B %s -d %s --verbose" %
            (self.mirror_config_file, batch_size, self.mdd),
            'run gpaddmirrros batch size %s' % batch_size, res)
        self.assertEqual(0, res['rc'])
        lines = res['stdout'].split('\n')
        for line in lines:
            if 'worker' in line and 'haltWork' in line:
                elems = line.split(' ')[1]
                worker = elems.split('-')[-1]
                workers.add(worker)
        self.assertEquals(len(workers), batch_size)
        gprecover.wait_till_insync_transition()
        self.verify_config_file_with_gp_config()
        self.check_mirror_seg()
Beispiel #34
0
 def test_mirror_spread(self):
     """
     Mirror spreading will place each mirror on a different host within the Greenplum  Database array
     """
     gprecover = GpRecover()
     if self.number_of_segments_per_host > len(self.hosts):
         self.skipTest(
             'skipping test since the number of host is less than number of segments per hosts'
         )
     self._setup_gpaddmirrors()
     self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
     res = {'rc': 0, 'stdout': '', 'stderr': ''}
     run_shell_command(
         "gpaddmirrors -a -i %s -s -d %s --verbose" %
         (self.mirror_config_file, self.mdd),
         'run gpaddmirrros with mirror spreading', res)
     self.assertEqual(0, res['rc'])
     check_mirror_spreading = '''SELECT A.hostname, B.hostname 
                               FROM gp_segment_configuration A, gp_segment_configuration B 
                               WHERE A.preferred_role = \'p\' AND B.preferred_role = \'m\' AND A.content = B.content AND A.hostname <> B.hostname;'''
     result = PSQL.run_sql_command(check_mirror_spreading,
                                   flags='-q -t',
                                   dbname='template1')
     result = result.strip()
     self.assertNotEqual(0, len(result))
     rows = result.split('\n')
     self.assertEqual(self.number_of_segments, len(rows))
     gprecover.wait_till_insync_transition()
     self.verify_config_file_with_gp_config()
     self.check_mirror_seg()
Beispiel #35
0
 def runCmd(self, command):
     '''
     run shell command, redirecting standard error message to standard output
     '''
     res = {'rc': 0, 'stdout' : '', 'stderr': ''}
     run_shell_command(command + " 2>&1", 'set access permissions to current jar file', res)
     return (not res['rc'], res['stdout'])
Beispiel #36
0
    def _do_gpinitstandby(self):
        """
        Initializes a standby host on a host which is different from master host.
        """
        for host in self.hosts:
            if host != self.master_host:
                standby_host = host
                break
        tinctest.logger.info("Initializing standby master on host: %s" %
                             standby_host)

        # Create master directory on the standby host
        res = {'rc': 0, 'stdout': '', 'stderr': ''}
        run_shell_command(
            "gpssh -h %s -e 'rm -rf %s; mkdir -p %s'" %
            (standby_host, os.path.dirname(self.mdd), os.path.dirname(
                self.mdd)), 'create master dir on standby host', res)
        if res['rc'] > 0:
            raise GPAddmirrorsTestCaseException(
                "Failed to create segment directories")

        # Do gpinitstandby
        cmd = GpInitStandby(standby_host, mdd=self.mdd)
        result = cmd.run(validate=False)
        if result.rc > 0:
            tinctest.logger.error(
                "gpinitstandby failed with an error code: %s" % result)
            raise GPAddmirrorsTestCaseException(
                "gpinitstandby failed with an error code. Failing the test module"
            )
Beispiel #37
0
    def setUpClass(cls):
        """
        Checking if plperl package installed, otherwise install the package
        """
        super(MDTSQLTestCase, cls).setUpClass()
        mdt.pre_process_sql()
        mdt.pre_process_ans()
        mdt.setup_gpfdist()

        cmd = 'gpssh --version'
        res = {'rc': 0, 'stderr': '', 'stdout': ''}
        run_shell_command(cmd, 'check product version', res)
        gppkg = Gppkg()
        product_version = res['stdout']
        gppkg.gppkg_install(product_version, 'plperl')

        setup_user = '******'
        setup_db = 'create database mdt_db;'
        setup_sql = local_path('sql/setup/setup.sql')
        setup_output = local_path('output/setup/setup.out')
        PSQL.run_sql_command(sql_cmd=setup_user,
                             dbname=os.environ.get('PGDATABASE'))
        PSQL.run_sql_command(sql_cmd=setup_db,
                             dbname=os.environ.get('PGDATABASE'),
                             username='******')
        PSQL.run_sql_file(sql_file=setup_sql,
                          out_file=setup_output,
                          dbname='mdt_db',
                          username='******')
Beispiel #38
0
 def put_file_in_hdfs(self, input_path, hdfs_path):
     if hdfs_path.rfind('/') > 0:
         hdfs_dir = hdfs_path[:hdfs_path.rfind('/')]
         cmd_str = "hdfs dfs -mkdir -p %s" %hdfs_dir
         run_shell_command(cmd_str, "Creating parent HDFS dir for path %s" %input_path)
     cmd_str = "hdfs dfs -put %s %s" %(input_path, hdfs_path)
     run_shell_command(cmd_str, "Copy to HDFS : file %s" %input_path)
Beispiel #39
0
 def check_gpfdist_process(self, wait=60, port=None, raise_assert=True):
     """
     Check for the gpfdist process
     Wait at least 60s until gpfdist starts, else raise an exception
     """
     if port is None:
         port = self.port
     process_started = False
     count = 0
     while (not process_started and count<wait):
         cmd_str = " | ".join([
                    self.ps_command + ' -ef',
                    'grep \"[g]pfdist -p %s\"' % (port)])
         cmd = "gpssh -h %s '%s'" %(self.hostname, cmd_str)
         res = {'rc':0, 'stderr':'', 'stdout':''}
         run_shell_command(cmd, 'gpfdist process check', res)
         content = res['stdout']
         if len(content)>0:
             if content.find("gpfdist -p %s" % port)>0:
                 process_started = self.is_gpfdist_connected(port)
                 if process_started:
                     return True
         count = count + 1
         time.sleep(1)
     if raise_assert:
         raise GPFDISTError("Could not start gpfdist process")
     else:
         tinctest.logger.warning("Could not start gpfdist process")
Beispiel #40
0
    def setUpClass(self):
        super(MapreduceMPPTestCase, self).setUpClass()
        gppkg = Gppkg()
        gppkg.gppkg_install(product_version, 'plperl')
        setup_command = "create language plperl;"
        PSQL.run_sql_command(setup_command, dbname = os.environ.get('PGDATABASE'))

        "compile functions.c and build functions.so"
        makeLog = local_path('testBuildSOLog.out')
        cmdMake = 'cd '+local_path('c_functions') + ' && make clean && make'
        res = {'rc': 0, 'stdout' : '', 'stderr': ''}
        run_shell_command(cmdMake, 'compile functions.c', res)
        file = open(makeLog, 'w')
        file.write(res['stdout'])
        file.close()
        if res['rc']:
            raise Exception('a problem occurred while creating the so files ')
        so_dir = local_path('c_functions')
        sharedObj = local_path('c_functions/functions.so')
        # if not os.path.isfile(sharedObj):
            #raise gptest.GPTestError('so files does not exist')

        # For multinode cluster, need to copy shared object tabfunc_gppc_demo.so to all primary segments
        if gpdbconfig.is_multinode():
            res = {'rc':0, 'stderr':'', 'stdout':''}
            hosts = gpdbconfig.get_hosts(segments=True)
            scp_cmd = 'gpscp  -h ' +' -h '.join(map(str,hosts)) +' '+ sharedObj + ' =:%s' % so_dir
            run_shell_command(scp_cmd)
            if res['rc']:
                raise Exception('Could not copy shared object to primary segment')
Beispiel #41
0
    def setUpClass(cls):
        super(EtablefuncGppcTestCase, cls).setUpClass()
        """
        compile tablefunc_gppc_demo.c and install the tablefunc_gppc_demo.so
        """
        gppkg = Gppkg()
        gpccutil.pre_process(product_version)
        result = gppkg.gppkg_install(product_version, "libgppc")
        # makeLog = loal_path('test00MakeLog.out')
        if result:
            cmdMakeInstall = (
                "cd " + local_path("data") + " && make clean && make CPPFLAGS=-D_GNU_SOURCE && make install"
            )
            res = {"rc": 0, "stderr": "", "stdout": ""}
            run_shell_command(cmdMakeInstall, "compile tablefunc_gppc_demo.c", res)

            # Current make file works for linux, but not for Solaris or OSX.
            # If compilation fails or installation fails, force system quit: os._exit(1)
            if res["rc"]:
                os._exit(1)  # This will exit the test including the next test suites
            sharedObj = "%s/tabfunc_gppc_demo.so" % (LIBDIR)
            if not os.path.isfile(sharedObj):
                os._exit(1)

            # For multinode cluster, need to copy shared object tabfunc_gppc_demo.so to all primary segments
            if gpdbconfig.is_multinode():
                res = {"rc": 0, "stderr": "", "stdout": ""}
                hosts = gpdbconfig.get_hosts(segments=True)
                scp_cmd = "gpscp  -h " + " -h ".join(map(str, hosts)) + " " + sharedObj + " =:%s" % LIBDIR
                run_shell_command(scp_cmd)
                if res["rc"]:
                    raise Excpetion("Could not copy shared object to primary segment")
Beispiel #42
0
    def _do_gpinitsystem(self):
        # Check the config files to initialize the cluster
        self.assertTrue(os.path.exists(self.gpinitconfig_file))
        self.assertTrue(os.path.exists(self.host_file))

        # cleanup data directories before running gpinitsystem
        self._cleanup_segment_data_dir(self.host_file,
                                       os.path.dirname(self.primary_data_dir))
        self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)

        res = {'rc': 0, 'stdout': '', 'stderr': ''}
        run_shell_command(
            "rm -rf %s; mkdir -p %s" %
            (os.path.dirname(self.mdd), os.path.dirname(self.mdd)),
            'create master dir', res)
        if res['rc'] > 0:
            raise GPAddmirrorsTestCaseException(
                "Failed to create master directories")

        result = GpInitSystem(self.gpinitconfig_file).run(validate=False)

        # initsystem returns 1 for warnings and 2 for errors
        if result.rc > 1:
            tinctest.logger.error("Failed initializing the cluster: %s" %
                                  result)
            raise GPAddmirrorsTestCaseException(
                "Failed initializing the cluster. Look into gpAdminLogs for more information"
            )
Beispiel #43
0
 def is_gpfdist_killed(self, port=None, wait=1):
     """
     Check whether the gpfdist process is killed
     """
     if port is None:
         port = self.port
     process_killed = False
     count = 0
     while (not process_killed and count < wait):
         cmd_str = " | ".join([
                    self.ps_command + ' -ef',
                    'grep \"[g]pfdist -p %s\"' % (port)])
         cmd = "gpssh -h %s '%s'" %(self.hostname, cmd_str)
         res = {'rc':0, 'stderr':'', 'stdout':''}
         run_shell_command(cmd, 'gpfdist process check', res)
         content = res['stdout']
         # strip hostname prefix from gpssh output
         content = content.replace(self.hostname, '').strip('[]').strip()
         if len(content)>0 or content.find("gpfdist -p %s" %port) > 0:
             tinctest.logger.warning("gpfdist process still exists on %s:%s" %(self.hostname, self.port))
         else:
             return True
         count = count + 1
         time.sleep(1)
     tinctest.logger.warning("gpfdist process not killed on %s:%s" %(self.hostname, self.port))
     return False
Beispiel #44
0
    def create_filespace(self, filespace):
        '''
        @param filespace: Filespace Name
        '''
        if self.exists(filespace) is True:
            tinctest.logger.info('Filespace %s exists' % filespace)
            return

        file1 = local_path(filespace)
        f1 = open(file1 + ".fs.config", "w")
        f1.write('filespace:%s\n' % filespace)

        for record in self.config.record:
            if record.role:
                fileloc = '%s/%s/primary' % (os.path.split(
                    record.datadir)[0], filespace)
            else:
                fileloc = '%s/%s/mirror' % (os.path.split(
                    record.datadir)[0], filespace)
            # @todo: use a common utility to create/delete remotely
            cmd = "gpssh -h %s -e 'rm -rf %s; mkdir -p %s'" % (
                record.hostname, fileloc, fileloc)
            run_shell_command(cmd)
            f1.write("%s:%s:%s/%s\n" % (record.hostname, record.dbid, fileloc,
                                        os.path.split(record.datadir)[1]))
        f1.close()
        result = self.run(config=f1.name)
        if result.rc != 0:
            raise GPfilespaceException(
                '"gpfilespace creation filespace FAILED".  Output = %s ' % out)
Beispiel #45
0
 def changetracking(self, type = 'mirror'):
     ''' Routine to inject fault that places system in change tracking'''
     tinctest.logger.info("Put system in changetracking ")
     cmd_str = 'gpfaultinjector -f filerep_consumer -m async -y fault -r %s -H ALL' %type
     results={'rc':0, 'stdout':'', 'stderr':''}
     run_shell_command(cmd_str, results=results)
     return results['stdout']
    def test_with_fault_injection(self):
        """
        add new mirrors run workload to verify if cluster functioning correctly, and 
        inject the mirror to bring cluster into change tracking, then recoverseg
        """
        filerepUtil = Filerepe2e_Util()
        gprecover = GpRecover()
        self._setup_gpaddmirrors()
        self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)

        res = {'rc': 0, 'stdout' : '', 'stderr': ''}
        run_shell_command("gpaddmirrors -a -i %s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with fault injection', res)
        gprecover.wait_till_insync_transition()
        self.assertEqual(0, res['rc'])
        self.run_simple_ddl_dml()

        # after adding new mirrors, check the intergrity between primary and mirror
        self.check_mirror_seg()
        out_file = local_path('inject_fault_into_ct')
        filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='mirror', H='ALL', outfile=out_file)
        # trigger the transtion to change tracking
        PSQL.run_sql_command('drop table if exists foo;', dbname = 'template1')
        filerepUtil.wait_till_change_tracking_transition()
        gprecover.incremental()
        gprecover.wait_till_insync_transition()
        out_file=local_path('reset_fault')
        filerepUtil.inject_fault(f='filerep_consumer', m='async', y='reset', r='mirror', H='ALL', outfile=out_file)
Beispiel #47
0
 def check_orca_status(self):
     res = {"rc": 0, "stdout": "", "stderr": ""}
     run_shell_command("gpconfig -s optimizer", "check if orca on or off", res)
     lines = res["stdout"].strip().split("\n")
     if "Master  value: on" in lines and "Segment value: on" in lines:
         return True
     else:
         return False
Beispiel #48
0
 def test_gprestart(self):
     tinctest.logger.info('Restart database after immediate shutdown')
     sleep(20)
     cmd_str = 'source %s/greenplum_path.sh;%s/bin/gpstop -air'% (os.environ['GPHOME'], os.environ['GPHOME'])
     results={'rc':0, 'stdout':'', 'stderr':''}
     run_shell_command(cmd_str, results=results)
     if int(results['rc']) !=0:
         raise Exception('Gp-Restart failed')
Beispiel #49
0
 def check_orca(self):
     cmd = 'gpconfig -s optimizer'
     res = {'rc': 0, 'stdout' : '', 'stderr': ''}
     run_shell_command(cmd, 'check if orca enabled', res)        
     for line in res['stdout']:
         if 'Master  value: off' in line or 'Segment value: off' in line:
             return False
     return True
Beispiel #50
0
    def setUpClass(self):
        super(PgcryptoScenarioTestCase, self).setUpClass()
        gppkg = Gppkg()
        gppkg.gppkg_install(product_version, 'pgcrypto')

        res = {'rc': 0, 'stdout' : '', 'stderr': ''}
        run_shell_command(". $GPHOME/greenplum_path.sh; psql -d %s -f $GPHOME/share/postgresql/contrib/pgcrypto.sql" % os.environ.get('PGDATABASE'), 'pgcrypto: setup', res)
        tinctest.logger.info('result from installing pgcrypto %s' % res['stdout'])
Beispiel #51
0
 def start_server(self):
     """
     Starts Kerberos server
     """
     if not run_shell_command("%s krb5kdc restart" %self.service_cmd):
         raise KerberosUtilException("Couln't start kerberos service : krb5kdc\nCheck out the logs in /var/log/krb5kdc.log")
     if not run_shell_command("%s kadmin restart" %self.service_cmd):
         raise KerberosUtilException("Couln't start kerberos service : kadmin")
Beispiel #52
0
 def check_and_install_sql(self, output = None):
     lines = output.strip().split('\n')
     res = {'rc':0, 'stderr':'', 'stdout':''}
     for line in lines:
         if 'Please run psql -d mydatabase -f $GPHOME' in line:
             sql_path = os.environ.get('GPHOME') + line.split('Please run psql -d mydatabase -f $GPHOME')[1].split(' ')[0]
             run_shell_command('psql -d %s -f %s' % (os.environ.get('PGDATABASE', 'gptest'), sql_path), 'run sql to build functions for the package', res)
             tinctest.logger.info('running sql file %s, result is %s' % (sql_path, res['stdout']))
             break
Beispiel #53
0
 def add_krb_principals(self, hosts_list):
     """
     Add principal to kerberos server
     """
     for host in hosts_list:
         for principal in self.PRINCIPALS:
             run_shell_command(self.kadmin_cmd + "\"addprinc -randkey %s/%s@%s\"" %(principal, host, self.REALM))
     # creating principal for log-in user for KDC host only
     run_shell_command(self.kadmin_cmd + "\"addprinc -randkey %s/%s@%s\"" %(self.login_user, self.kdc_host, self.REALM))
Beispiel #54
0
 def set_fips_mode(self, fips_mode = False):
     res = {'rc':0, 'stderr':'', 'stdout':''}
     if fips_mode:
         cmd = 'gpconfig -c custom_variable_classes -v pgcrypto --skipvalidation && gpconfig -c pgcrypto.fips -v on --skipvalidation'
     else:
         cmd = 'gpconfig -r custom_variable_classes && gpconfig -r pgcrypto.fips --skipvalidation'
     run_shell_command (cmd, 'run command %s' % cmd, res)
     tinctest.logger.info('Unable to config the fips_mode, stdout: %s /n error: %s' % (res['stdout'], res['stderr']))
     self.gpstop.run_gpstop_cmd(restart='ar')
Beispiel #55
0
    def remove_fillfiles(self, filename, host):
        location = os.getcwd()
        cmd_str = "ssh %s rm %s/diskfill/%s*" % (host,location, filename)

        results={'rc':0, 'stdout':'', 'stderr':''}
        run_shell_command(cmd_str, results=results)
        if int(results['rc']) !=0:
            raise Exception('Unable to delete the fill files')
        return
Beispiel #56
0
 def getGPDBVersion(self):
     """
     Returns the version of gpdb
     """
     cmd = 'gpssh --version'
     res = {'rc':0, 'stderr':'', 'stdout':''}
     run_shell_command (cmd, 'check product version', res)
     product_version = res['stdout'].split('gpssh version ')[1].split(' build ')[0]
     return product_version
Beispiel #57
0
 def replaceTemplate(self, file, path):
     # Uses Jeff/Caleb fixup.pl to replace abs_srcdir and hostname
     fixup_file = os.path.join(path, MAPREDUCE.fixup)
     if not os.path.exists(fixup_file):
         return
     else:
         res = {'rc':0, 'stderr':'', 'stdout':''}
         run_shell_command(fixup_file+" "+file, 'run fixup command', res)
         return res