Ejemplo n.º 1
0
 def doTest(self, sql_filename):
     '''Run the file, compare oids in out file '''
     sql_file = local_path(sql_filename)
     out_file = local_path(sql_filename.split('.sql')[0] + '.out')
     PSQL.run_sql_file(sql_file = sql_file, out_file = out_file)
     isOk = self.compare_oids(out_file)
     self.assertTrue(isOk)
Ejemplo n.º 2
0
    def setUpClass(cls):
        super(GppcTestCase, cls).setUpClass()
        """
        compile gppc_test.c and install the gppc_test.so
        """
        gpccutil.pre_process()
        cmd = 'gpssh --version'
        res = {'rc':0, 'stderr':'', 'stdout':''}
        run_shell_command (cmd, 'check product version', res)
        gppkg = Gppkg()
        product_version = res['stdout']
        result = gppkg.gppkg_install(product_version, 'libgppc')
        #makeLog = loal_path('test00MakeLog.out')
        if result:
            cmdMakeInstall = 'cd '+local_path('data')+' && make clean && make CPPFLAGS=-D_GNU_SOURCE && make install'
            res = {'rc':0, 'stderr':'', 'stdout':''}
            run_shell_command (cmdMakeInstall, 'compile gppc_test.c', res)

            # Current make file works for linux, but not for Solaris or OSX.
            # If compilation fails or installation fails, force system quit: os._exit(1)
            if res['rc']:
                os._exit(1) # This will exit the test including the next test suites
            sharedObj = local_path('data/gppc_test.so')
            if not os.path.isfile(sharedObj):
                os._exit(1)

            # For multinode cluster, need to copy shared object tabfunc_gppc_demo.so to all primary segments
            res = {'rc':0, 'stderr':'', 'stdout':''}
            hosts = gpdbconfig.get_hosts(segments=True)
            scp_cmd = 'gpscp  -h ' +' -h '.join(map(str,hosts)) +' '+ sharedObj + ' =:%s' % LIBDIR
            run_shell_command(scp_cmd, 'scp share object to all segment', res)
            tinctest.logger.info('scp shared object result %s' % res['stdout'])
            if res['rc']:
                raise Excpetion('Could not copy shared object to primary segment')
Ejemplo n.º 3
0
    def test_with_fault_injection(self):
        """
        add new mirrors run workload to verify if cluster functioning correctly, and 
        inject the mirror to bring cluster into change tracking, then recoverseg
        """
        filerepUtil = Filerepe2e_Util()
        gprecover = GpRecover()
        self._setup_gpaddmirrors()
        self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)

        res = {'rc': 0, 'stdout' : '', 'stderr': ''}
        run_shell_command("gpaddmirrors -a -i %s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with fault injection', res)
        gprecover.wait_till_insync_transition()
        self.assertEqual(0, res['rc'])
        self.run_simple_ddl_dml()

        # after adding new mirrors, check the intergrity between primary and mirror
        self.check_mirror_seg()
        out_file = local_path('inject_fault_into_ct')
        filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='mirror', H='ALL', outfile=out_file)
        # trigger the transtion to change tracking
        PSQL.run_sql_command('drop table if exists foo;', dbname = 'template1')
        filerepUtil.wait_till_change_tracking_transition()
        gprecover.incremental()
        gprecover.wait_till_insync_transition()
        out_file=local_path('reset_fault')
        filerepUtil.inject_fault(f='filerep_consumer', m='async', y='reset', r='mirror', H='ALL', outfile=out_file)
    def setUpClass(cls):
        super(EnhancedTableFunctionTest, cls).setUpClass()

        tinctest.logger.info("*** Running the pre-requisite sql files drop.sql and setup.sql")
        PSQL.run_sql_file(local_path('sqls/setup/drop.sql'))
        PSQL.run_sql_file(local_path('sqls/setup/create.sql'))
        tinctest.logger.info("*** Starting the Enhaced table test")
Ejemplo n.º 5
0
 def install_kerberos_conf(self):
     """
     Update the kerberos configuration files according the env
     and copy in appropriate locations
     """
     transforms = {
                     "%DOMAIN%" : self.kdc_domain,
                     "%HOSTNAME%" : self.kdc_host
                 }
     input_file_path = local_path(self.krb_template_conf + "/" + self.KRB_CONF_TEMPLATE)
     output_file_path = local_path(self.krb_template_conf + "/" + self.KRB_CONF_TEMPLATE[:-2])
     with open(input_file_path, 'r') as input:
         with open(output_file_path, 'w') as output:
             for line in input.readlines():
                 for key,value in transforms.iteritems():
                     line = re.sub(key,value,line)
                 output.write(line)
     cmd_str = "sudo cp %s %s" %(output_file_path, self.KRB_CONF)
     if not run_shell_command(cmd_str,"Copying krb5.conf"):
         raise KerberosUtilException("Couldn't copy krb5.conf")
     cmd_str = "sudo cp %s %s" %(local_path(self.krb_template_conf + "/" + self.KDC_CONF_TEMPLATE), self.KDC_CONF)
     if not run_shell_command(cmd_str,"Copying kdc.conf"):
         raise KerberosUtilException("Couldn't copy kdc.conf")
     cmd_str = "sudo cp %s %s" %(local_path(self.krb_template_conf + "/" + self.KADMIN_ACL_CONF_TEMPLATE), self.KADMIN_ACL_CONF)
     if not run_shell_command(cmd_str,"Copying kadm5.acl"):
         raise KerberosUtilException("Couldn't copy kadm5.acl")
Ejemplo n.º 6
0
 def test_01(self):
     "SPI: plpgsql"
     sql_file = local_path("query01.sql")
     out_file = local_path("query01.out")
     ans_file = local_path("query01.ans")
     PSQL.run_sql_file(sql_file=sql_file, out_file=out_file)
     self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
Ejemplo n.º 7
0
    def test_PLPERL91010_super_untrust(self):
        """ Language PL/PERL upgrade to 9.1:File system operations are allowed for untrusted PL/PERL """
        if self.checkAPPHOMEandLIB("plperl"):
            print 'installation'
            gp_procedural_languages().installPL('plperlu')
        tmpfilename = local_path('plperl91/plsuperuser.tmp')
        tmpfile = ''
        for i in tmpfilename:
           if i == '/':
               tmpfile = tmpfile + '\/'
           else:
               tmpfile = tmpfile + i
        tmpfilename = tmpfile
        localpath = local_path('')

        if sys.platform == 'sunos5':
            cmd = 'sed \'s/TMPFILENAME/%s/g\' %s/plperl91/test010_super_untrust.sql > %s/plperl91/test010_super_untrust.sql.tmp && mv %s/plperl91/test010_super_untrust.sql.tmp %s/plperl91/test010_super_untrust.sql' % ( tmpfilename, localpath, localpath, localpath, localpath)
        elif sys.platform == 'darwin':
            cmd = 'sed -i \'\' \'s/TMPFILENAME/%s/g\' %s/plperl91/test010_super_untrust.sql' % ( tmpfilename, localpath )
        else:
            cmd = 'sed -i \'s/TMPFILENAME/%s/g\' %s/plperl91/test010_super_untrust.sql' % ( tmpfilename, localpath )
        os.system( cmd )
        self.doPLPERLUbyuser("plperl91/test010_super_untrust", 'plsuperuser')
        checkcmd = 'cat ' + tmpfilename
        res = {'rc': 0, 'stdout' : '', 'stderr': ''}
        run_shell_command(checkcmd, 'run command %s'%checkcmd, res)
        if res['rc']:
            raise Exception("Unable to open created file")
Ejemplo n.º 8
0
	def test_outof_shmm_exit_slots(self):
		"""
		The issue of MPP-19973 is that a shmem exit callback to reset 
		a temporary namespace is not removed when the temporary namespace is
		reset.

		In situations, where a temporary namespace is multiple times reset
		because of an exception in a subtransaction, the callbacks
		use up all shmem_exit slots.
		"""

		sql_setup_file = local_path('mpp19973_setup.sql')
		PSQL.run_sql_file(sql_file=sql_setup_file)

		# Test case setup verification
		self.assertTrue(table_exists("foo"))
		self.assertTrue(function_exists("testfn"))
	
		sql_file = local_path('mpp19973.sql')
		out_file = local_path('mpp19973.out')
		PSQL.run_sql_file(sql_file=sql_file,
			out_file=out_file, output_to_file=True)

		# There will be different error messages in the output, but
		# we should not run out of shmem exit slots.
		self.assertNotRegexpMatches(open(out_file).read(), 
			"out of on_shmem_exit slots",
			"Database should not run out of shmem_exit slots")
Ejemplo n.º 9
0
 def test_43_alter_table_with_oid(self):
     '''MPP-13870: Alter table Set Without Oids fails in case of inheritance'''
     sql_file = local_path('alter_table_with_oid.sql')
     out_file = local_path('alter_table_with_oid.out')
     ans_file = local_path('alter_table_with_oid.ans')
     PSQL.run_sql_file(sql_file = sql_file, out_file = out_file)
     self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
Ejemplo n.º 10
0
 def setUpClass(cls):
     super(uao_visimap, cls).setUpClass()
     sqlpath = local_path("sql")
     outpath = local_path("output")
     anspath = local_path("expected")
     if not os.path.exists(outpath):
         os.mkdir(outpath)
Ejemplo n.º 11
0
 def doQuery(self, sqlfile, default=''):
     sql_file = local_path(sqlfile)
     filename_prefix = sqlfile.split('.sql')[0]
     out_file = local_path(filename_prefix + '.out')
     ans_file = local_path(filename_prefix + '.ans')
     PSQL.run_sql_file(sql_file = sql_file, out_file = out_file)
     self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
Ejemplo n.º 12
0
    def setUpClass(cls):
        super(uao_visimap, cls).setUpClass()
	sqlpath=local_path('sql')
	outpath=local_path('output')
	anspath=local_path('expected')
        if not os.path.exists(outpath):
               os.mkdir(outpath)
    def test_gp_interconnect_fc_ard_142(self):
        if (self.cluster_platform.lower().find('red hat enterprise linux server') < 0):
            self.skipTest('Test only applies to RHEL platform.')
        try:
            out = self.checkGUC(self.gp_interconnect_min_retries_before_timeout)
            self.assertTrue(len(out) > 4)
            out = self.checkGUC(self.gp_interconnect_transmit_timeout)
            self.assertTrue(len(out) > 4)
            out = self.checkGUC(self.gp_interconnect_fc_method)
            self.assertTrue(len(out) > 4)
        except:
            self.skipTest("GUC " + self.gp_interconnect_min_retries_before_timeout + " or " + self.gp_interconnect_transmit_timeout + " or " +  self.gp_interconnect_fc_method + " not defined")


        result = runShellCommand('gpssh ' + self.hoststr +  ' \"export PATH=$PATH:/sbin; \
                                              sudo insmod ickm.ko ict_type=0x101 seq_array=2 drop_times=80\"')
        self.assertTrue(result)
        
        sql_file = local_path(self.common_sql + str(self._testMethodName) + '.sql');
        self.assertTrue(PSQL.run_sql_file(local_path(sql_file)))        
        out_file = sql_file.replace(".sql",".out")
        test_ret = "Failed to send packet (seq 2) to" in open(out_file).read() and "retries in 40 seconds" in open(out_file).read()
        ret_log = runShellCommand(self.log_str + self._testMethodName + '.log' )
        result = runShellCommand('gpssh ' + self.hoststr +  ' \"export PATH=$PATH:/sbin;sudo rmmod ickm.ko \"')

        self.assertTrue(result)  
        self.assertTrue(ret_log)
        self.assertTrue(test_ret)
Ejemplo n.º 14
0
    def DONTtestMapReduceError007(self):
	"MapReduce: MPP-11061: mapreduce crash c_working.yaml"
        os.system("cd %s/mpp11061; make" % (local_path('')))
	mapr.replaceTemplate(local_path("mpp11061/c_working.yaml.in"), local_path)('')
        p = subprocess.Popen(["gpfdist","-d",local_path(''),"-p","8090"])
	self.doTest("mpp11061/c_working.yaml")
        os.kill(p.pid,signal.SIGKILL)
Ejemplo n.º 15
0
    def replace_new_dirPath(self, orig_filename='recovery.conf', new_filename='recovery_new.conf'):
        """
        @summary: Modifies the template config file with new filespace location
        
        @param orig_filename: name of the template config file
        @param new_filename: name of the new config file
        @return: None
        """

        new_file = open(local_path(new_filename),'w')
        old_file = open(local_path(orig_filename))
        # Finds the gp prefix string from the host entry of the segments
        (host, fileLoc) = self.get_segment_host_fileLoc()
        slashIndex = fileLoc.rfind('/')
        # Extract the gp prefix
        gp_prefix = fileLoc[(slashIndex+1):]
        lineNo = 0
        for line in old_file:
            if lineNo == 1 :
                # If you checkout the template out file from the command gprecoverseg -o, you'll see that,
                # it has failed_host:port:data_dir <SPACE> recovery_host:port:data_dir.
                # we intend to change only the data_dir of the recovery_host and not that of the failed host
                # so we first divide the lines & modify only the second part 
                # and then concatenate them again
                prefix_index = line.find(gp_prefix)
                first_part_of_line = line[:(prefix_index + len(gp_prefix))]
                remaining_part_of_line = line[(prefix_index + len(gp_prefix)):]
                new_gp_prefix = gp_prefix+'new'
                remaining_part_of_line = re.sub(gp_prefix,new_gp_prefix,remaining_part_of_line)
                line = first_part_of_line + remaining_part_of_line
            lineNo = lineNo + 1
            new_file.write(line)
        new_file.close()
        old_file.close()
Ejemplo n.º 16
0
    def __init__(self, methodName):
        self.config = GPDBConfig()
        self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
        self.seg_prefix = os.path.basename(self.mdd).split('-')[0]
        self.master_host = self.config.get_masterhost()
        self.gpinitconfig_template = local_path('configs/gpinitconfig_template')
        self.datadir_config_file = local_path('configs/datadir_config_file') 
        self.mirror_config_file = local_path('configs/mirror_config_file')
        self.gpinitconfig_file = local_path('configs/gpinitconfig')
        self.host_file = local_path('configs/hosts')
        self.hosts = self.config.get_hosts(segments = True)

        self.port_base = '40000'
        self.master_port = os.environ.get('PGPORT', '5432')
        self.primary_data_dir = self.config.get_host_and_datadir_of_segment(dbid = 2)[1]
        # initially set the mirror data dir same to primary's
        self.mirror_data_dir = os.path.join(os.path.dirname(os.path.dirname(self.primary_data_dir)), 'mirror')
        self.gpinitsystem = True
        self.number_of_segments = self.config.get_countprimarysegments()
        self.number_of_segments_per_host = self.number_of_segments / len(self.hosts)
        self.standby_enabled = False
        self.number_of_parallelism = 4
        self.fs_location = []

        super(GPAddmirrorsTestCase, self).__init__(methodName)
Ejemplo n.º 17
0
 def verify_mpp21545(self):
     """
     After the pg_terminate_backend(), check if new log file has been generated, if so,
     append all new content into a string, plus new content from previous log file(if log
     msg cross two logfiles).
     Check if new content contains 'PANIC', OR 'Unexpected internal error',or 'Stack trace',
     """
     files = [os.path.join(self.PG_LOG_DIR, fname) for fname in os.listdir(self.PG_LOG_DIR)]
     latest_logfile = max(files, key=os.path.getmtime)
     with open (local_path('log.txt')) as fin:
         logfile = fin.read().replace('\n', '')
     with open (local_path('linenum.txt')) as fin:
         linenum = int(fin.read().replace('\n', ''))
     new_logfile_content = ''
     previous_logfile_new_content = ''
     if latest_logfile != logfile:
         with open(latest_logfile) as fin:
             new_logfile_content = fin.read().replace('\n', '')
     with open(logfile) as fin:
         for i, line in enumerate(fin):
             if i>=linenum:
                 previous_logfile_new_content += line
             else:
                 continue
     new_log_content = previous_logfile_new_content+' '+new_logfile_content
     self.assertNotRegexpMatches(new_log_content,
         "PANIC",
         "pg_terminate_backend() should not cause PANIC !")
     self.assertNotRegexpMatches(new_log_content,
         "Unexpected internal error",
         "pg_terminate_backend() should not cause Unexpected error !")
     self.assertNotRegexpMatches(new_log_content,
         "Stack trace",
         "pg_terminate_backend() should not cause Stack trace !")
Ejemplo n.º 18
0
    def do_test(self, timeout=0, sqlfile=None, host=None, port=None, username=None, password=None, flags='-a', usetemplate=False):
        """
        @summary: Run a PostGIS test case
        
        @param timeout: Number of seconds to run sql file before timing out
        @param sqlfile: The path to sql file (relative to TEST.py directory)
        @param host: The GPDB master host name to use to connect to database
        @param port: The GPDB port used to make connections to the database
        @param username: The database username to use to connect to the database
        @param password: The password for the database user used to connect to database
        """
        if sqlfile is None:
            testcase = inspect.stack()[1][3].split('test_')[1]
            
            #file = mkpath(testcase +".sql")
            file = local_path(testcase +".sql")
        else:
            #file = mkpath(sqlfile)
            file = local_path(sqlfile)
        # run psql on file, and check result
        #psql.runfile(file,timeout=timeout,host=host,port=port,username=username,password=password,flag=flags)
        #self.checkResult(ifile=file, optionalFlags=" -B")

        out_file = local_path(testcase + ".out")
        ans_file = local_path(testcase +".ans")
        PSQL.run_sql_file(sql_file = file, out_file = out_file)
        self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
Ejemplo n.º 19
0
    def test_run_sql_file(self):
        test_case = MockSQLTestCase('test_query03')
        if os.path.exists(test_case.get_out_dir()):
            shutil.rmtree(test_case.get_out_dir())
        # Default mode
        test_case.run_sql_file(local_path('query03.sql'))
        self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03.sql')))
        self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03.out')))
        self.assertFalse(self._check_str_in_file('SET optimizer',
                                                os.path.join(test_case.get_out_dir(), 'query03.sql')))

        # Optimizer on mode
        test_case.run_sql_file(local_path('query03.sql'), optimizer=True)
        self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_orca.sql')))
        self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_orca.out')))
        self.assertTrue(self._check_str_in_file('SET optimizer=on;',
                                                os.path.join(test_case.get_out_dir(), 'query03_orca.sql')))
                        

        # Optimizer off mode
        test_case.run_sql_file(local_path('query03.sql'), optimizer=False)
        self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_planner.sql')))
        self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_planner.out')))
        self.assertTrue(self._check_str_in_file('SET optimizer=off;',
                                                os.path.join(test_case.get_out_dir(), 'query03_planner.sql')))
Ejemplo n.º 20
0
    def test_verify_setup_teardown(self):
        test_loader = tinctest.TINCTestLoader()
        test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)

        # As explained above, we want MockSQLTestCase to run if and only if
        # it's being invoked by our unit tests. So, it's skipped if discovered
        # directly by unit2. Here, bearing in mind that SQLTestCaseTests is itself
        # triggered by unit2, we override MockSQLTestCase's skip decorator to allow
        # this explicit construction of MockSQLTestCase to proceed.

        for test_case in test_suite._tests:
            test_case.__class__.__unittest_skip__ = False

        if os.path.exists(local_path("output/")):
            shutil.rmtree(local_path("output/"))
        test_result = unittest.TestResult()
        test_suite.run(test_result)

        self.assertEqual(test_result.testsRun, 4)
        self.assertEqual(len(test_result.errors), 0)
        self.assertEqual(len(test_result.skipped), 0)
        self.assertEqual(len(test_result.failures), 1)

        # Verify if setup and teardown sqls were executed
        self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'setup.out')))
        self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'setup', 'setup1.out')))
        self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'teardown.out')))
        self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'teardown', 'teardown1.out')))
        self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_setup.out')))
        self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query03_teardown.out')))
Ejemplo n.º 21
0
 def test_with_concurrent_workload(self):
     """
     add new mirrors while concurrent workload in progress, check that mirrors added
     and current workload won't get affected, in the end, run checkmirrorseg.
     Note that: adding mirrors while running workload has checkmirrorseg issue with MPP-24311
     """
     gprecover = GpRecover()
     self._setup_gpaddmirrors()
     self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
     sql_setup_file = local_path('sql/ao_heap_table_setup.sql') 
     sql_file = local_path('sql/ao_heap_table.sql')
     pg_stat_activity = 'SELECT * FROM pg_stat_activity;'
     PSQL.run_sql_file(sql_setup_file)
     subprocess.Popen(["psql", "-f", sql_file])
     time.sleep(15)
     subprocess.Popen(["gpaddmirrors", "-ai", self.mirror_config_file, "-d", self.mdd])
     time.sleep(15)
     result = PSQL.run_sql_command(pg_stat_activity, flags='-q -t', dbname='template1')
     result = result.strip()
     rows = result.split('\n')
     self.assertTrue(len(rows) > 1)
     while len(rows) > 1:
         result = PSQL.run_sql_command(pg_stat_activity, flags='-q -t', dbname='template1')
         result = result.strip()
         rows = result.split('\n')
         time.sleep(3)
     gprecover.wait_till_insync_transition()
     self.verify_config_file_with_gp_config()
Ejemplo n.º 22
0
    def pre_process(self, product_version):
        """
        Just process all the sql and ans files to replace the path with new environment
        """
        for sql_file in os.listdir(local_path('sql')):
            if sql_file.endswith('.t'):
                f_input = open(sql_file, 'r')
                f_output = open(sql_file.split('.t')[0], 'w')
                for line in f_input:
                    if line.find('%MYD%') >= 0:
                        f_output.write(line.replace('%MYD%', local_path('')))
                f_input.close()
                f_output.close()

        for ans_file in os.listdir(local_path('expected')):
            if ans_file.endswith('.t'):
                f_input = open(ans_file, 'r')
                f_output = open(ans_file.split('.t')[0], 'w')
                for line in f_input:
                    if line.find('%MYD%') >= 0:
                        f_output.write(line.replace('%MYD%', local_path('')))
                f_input.close()
                f_output.close()
            if ans_file.endswith('4.2') and product_version.startswith('4.2'):
                shutil.move(os.path.join(local_path('expected'), ans_file), os.path.join(local_path('expected'), ans_file.split('.4.2')[0]))
Ejemplo n.º 23
0
    def setUpClass(self):
        super(MapreduceMPPTestCase, self).setUpClass()
        gppkg = Gppkg()
        gppkg.gppkg_install(product_version, 'plperl')
        setup_command = "create language plperl;"
        PSQL.run_sql_command(setup_command, dbname = os.environ.get('PGDATABASE'))

        "compile functions.c and build functions.so"
        makeLog = local_path('testBuildSOLog.out')
        cmdMake = 'cd '+local_path('c_functions') + ' && make clean && make'
        res = {'rc': 0, 'stdout' : '', 'stderr': ''}
        run_shell_command(cmdMake, 'compile functions.c', res)
        file = open(makeLog, 'w')
        file.write(res['stdout'])
        file.close()
        if res['rc']:
            raise Exception('a problem occurred while creating the so files ')
        so_dir = local_path('c_functions')
        sharedObj = local_path('c_functions/functions.so')
        # if not os.path.isfile(sharedObj):
            #raise gptest.GPTestError('so files does not exist')

        # For multinode cluster, need to copy shared object tabfunc_gppc_demo.so to all primary segments
        if gpdbconfig.is_multinode():
            res = {'rc':0, 'stderr':'', 'stdout':''}
            hosts = gpdbconfig.get_hosts(segments=True)
            scp_cmd = 'gpscp  -h ' +' -h '.join(map(str,hosts)) +' '+ sharedObj + ' =:%s' % so_dir
            run_shell_command(scp_cmd)
            if res['rc']:
                raise Exception('Could not copy shared object to primary segment')
Ejemplo n.º 24
0
 def check_duplicate_entry(self):     
     ans_file = local_path('check_duplicate_entry.ans')
     out_file=local_path('check_duplicate_entry.out')
     sql_file=local_path('check_duplicate_entry.sql')
     PSQL.run_sql_file(sql_file=sql_file,
                       out_file=out_file)        
     assert Gpdiff.are_files_equal(out_file,
                                   ans_file)
Ejemplo n.º 25
0
 def test_question_mark_help_option(self):
     """
     check the question mark option -? of gpaddmirrors
     """
     help_doc = local_path('data/help_doc')
     help_output = local_path('data/help_output')
     Command('output the help information', 'gpaddmirrors -? > %s' % help_output).run(validateAfter=True)
     self.assertTrue(Gpdiff.are_files_equal(help_output, help_doc))
Ejemplo n.º 26
0
 def setUp(self):
     seg_dir = os.getcwd()
     test_config = local_path("test_gpinit_config_nomirror.template")
     hostfile = local_path("test_hostfile")
     gpdb_dir = os.getenv("GPHOME", "/usr/local/greenplum-db")
     self.mdd = seg_dir + "/master/gpseg-1"
     self.create_template(seg_dir, test_config, hostfile)
     self.gpdbinit = gpinitsystem(gpdb_dir, test_config.strip(".template"), seg_dir, False)
Ejemplo n.º 27
0
    def setUpClass(cls):
        #Create tables that will be used for tests below
        super(MVCC_UAOCS_TestCase, cls).setUpClass()
        sql_file=local_path('setup/create_tab_foruaocsmvcc.sql')
        out_file=local_path('setup/create_tab_foruaocsmvcc.out')

    	print "sql file for creating tables : "+ (sql_file)
    	print "output file for sqls         : "+ (out_file)
	assert PSQL.run_sql_file(sql_file = sql_file,out_file = out_file,flags='-q')
Ejemplo n.º 28
0
 def pre_process(self, sql_files=[], ans_files=[]):
     for sql_file in sql_files:
         file = local_path(sql_file)
         if os.path.isfile(file):
             self.replace(file, ' modifies sql data', '')
     for ans_file in ans_files:
         file = local_path(ans_file)
         if os.path.isfile(file):
             self.replace(file, ' modifies sql data', '')
Ejemplo n.º 29
0
 def test_ao_toast_empty(self):
     PSQL.run_sql_file(local_path("workload_ao_toast_empty.sql"), out_file=local_path("workload_ao_toast_empty.out"))    
     self.run_validation()
     PSQL.run_sql_command("INSERT INTO test_table select i, i*2, i*5, CURRENT_TIMESTAMP  from generate_series(1, 20)i;"
                          "select count(*) from test_table;", flags ='-q -t', out_file=local_path("update_ao_toast_empty.out"))
     PSQL.run_sql_command("delete from test_table;"
                          "select count(*) from test_table;", flags ='-q -t', out_file=local_path("delete_ao_toast_empty.out"))
     PSQL.run_sql_command("VACUUM;", flags ='-q -t', out_file=local_path("vac_ao_toast_empty.out"))
     self.run_validation()
Ejemplo n.º 30
0
 def test_MapReduceError004(self):
     "MapReduce: Test Error Handling 4: MPP-5550"
     f1 = open(local_path('mpp5550.yml.in'), 'r')
     f2 = open(local_path('mpp5550.yml'), 'w')
     for line in f1:
         if '@db_user@' in line:
             line = line.replace('@db_user@', os.environ.get('PGUSER', getpass.getuser()))
         f2.write(line)
     f1.close()
     f2.close()
     self.doTest("mpp5550.yml")
Ejemplo n.º 31
0
 def test_gpactivatestandby_new_host_with_filespace(self):
     #from mpp.lib.gpfilespace import Gpfilespace
     #gpfile = Gpfilespace()
     #gpfile.create_filespace('fs_walrepl_a')
     #PSQL.run_sql_file(local_path('filespace.sql'), dbname= self.db_name)
     gputil.install_standby()
     initstdby = GpinitStandby()
     gpact_stdby = GpactivateStandby()
     self.mdd = gpact_stdby.get_standby_dd()
     self.host = initstdby.get_standbyhost()
     self.port = gpact_stdby.get_standby_port()
     self.standby_pid = gpact_stdby.get_standby_pid(self.host, self.port, self.mdd)
     PSQL.run_sql_file(local_path('create_tables.sql'), dbname = self.db_name)
     self.assertTrue(gpact_stdby.activate())
     self.assertTrue(gpact_stdby.verify_gpactivatestandby(self.standby_pid, self.host, self.port, self.mdd)) 
     gputil.failback_to_original_master(self.origin_mdd,self.host,self.mdd,self.port)
Ejemplo n.º 32
0
    def test_05_incremental_restore_select_tablefile_with_option_s(self):
        tinctest.logger.info("Test25: Test to restore with selected tables in a file  with option s , drop and recreate db manualy before restore")

        self.run_workload("backup_dir", 'bkdb9')
        self.run_full_backup(dbname = 'bkdb9', option = '--ddboost', location=self.BACKUPDIR)

        self.run_workload("dirty_dir_1", 'bkdb9')
        self.get_data_to_file('bkdb9', 'backup1') #Create a copy of all the tables in database before the last backup
        self.run_incr_backup("dirty_dir_1", dbname = 'bkdb9', option = '--ddboost', location=self.BACKUPDIR)

        self.drop_create_database('bkdb9')

        table_file = local_path('%s/table_file1' % ('restore_incr_T_with_e'))
        self.cleanup()
        self.run_restore('bkdb9', option = '--table-file=%s --ddboost -e -s bkdb9' % table_file, location=self.BACKUPDIR)
        self.validate_restore("restore_incr_T_with_e", 'bkdb9')
Ejemplo n.º 33
0
    def create_filespace(self, filespace):
        '''
        @param filespace: Filespace Name
        '''
        if self.exists(filespace) is True:
            tinctest.logger.info('Filespace %s exists' % filespace)
            return

        file1 = local_path(filespace)
        f1 = open(file1 + ".fs.config", "w")
        f1.write('filespace:%s\n' % filespace)
        f1.write('fsysname:hdfs\n')
        fsrep = PSQL.run_sql_command(
            "select fsrep from pg_filespace where fsname='dfs_system';",
            flags='-t -q',
            dbname='postgres')
        f1.write('fsreplica:%s\n' % fsrep.strip())

        dfs_loc_cmd = "SELECT substring(fselocation from length('hdfs:// ') for (position('/' in substring(fselocation from length('hdfs:// ')))-1)::int) FROM pg_filespace pgfs, pg_filespace_entry pgfse  WHERE pgfs.fsname = 'dfs_system' AND fsedbid = 2 AND pgfse.fsefsoid=pgfs.oid ;"
        dfs_loc = PSQL.run_sql_command(dfs_loc_cmd,
                                       flags='-t -q',
                                       dbname='postgres')
        for record in self.config.record:
            if record.content == -1:
                fileloc = '%s/hdfs_%s' % (os.path.split(
                    record.datadir)[0], filespace)
                f1.write("%s:%s:%s/%s\n" %
                         (record.hostname, record.dbid, fileloc,
                          os.path.split(record.datadir)[1]))
                cmd = "gpssh -h %s -e 'rm -rf %s; mkdir -p %s'" % (
                    record.hostname, fileloc, fileloc)
                run_shell_command(cmd)
            else:
                f1.write("%s:%s:[%s/%s/%s]\n" %
                         (record.hostname, record.dbid, dfs_loc.strip(),
                          filespace, os.path.split(record.datadir)[1]))
        f1.close()
        filespace_cmd = '%s/bin/gpfilespace -c %s' % (self.gphome, f1.name)
        cmd = Command(name='Gpfilespace command',
                      cmdStr="%s" % (filespace_cmd))
        tinctest.logger.info(" %s" % cmd)
        cmd.run(validateAfter=True)
        result = cmd.get_results()
        if result.rc != 0:
            raise GPfilespaceException(
                '"gpfilespace creation filespace FAILED".  Output = %s ' %
                resutl.stdout)
Ejemplo n.º 34
0
 def setUp(self):
     super(XidlimitsTests, self).setUp()
     Command('re-build regress.so', 'make -C %s xidhelper.so' %
             local_path('.')).run(validateAfter=True)
     SQLTemplateTestCase.perform_transformation_on_sqlfile(
         local_path('load_xidhelper.sql'),
         local_path('load_xidhelper.sql.t'),
         {'@source@': local_path('xidhelper.so')})
     PSQL.run_sql_file(sql_file=local_path('load_xidhelper.sql.t'),
                       out_file=local_path('load_xidhelper.out.t'))
     self.gparray = GpArray.initFromCatalog(dbconn.DbURL(), utility=True)
 def test_gpactivatestandby_on_same_host(self):
     ''' Doesn't work due to STO-374'''
     gputil.install_standby(new_stdby_host='localhost')
     initstdby = GpinitStandby()
     gpact_stdby = GpactivateStandby()
     self.mdd = gpact_stdby.get_standby_dd()
     self.port = gpact_stdby.get_standby_port()
     self.standby_pid = gpact_stdby.get_standby_pid('localhost', self.port,
                                                    self.mdd)
     PSQL.run_sql_file(local_path('create_tables.sql'), dbname=self.db_name)
     self.assertTrue(gpact_stdby.activate())
     self.assertTrue(
         gpact_stdby.verify_gpactivatestandby(self.standby_pid, 'localhost',
                                              self.port, self.mdd))
     gputil.failback_to_original_master(self.origin_mdd,
                                        socket.gethostname(), self.mdd,
                                        self.port)
Ejemplo n.º 36
0
    def create_new_loc_config(self,
                              hosts,
                              orig_filename='recovery.conf',
                              new_filename='recovery_new.conf'):
        """
        @summary: Runs recovery with -o option and creates a new config out of template
                    having new filespace location
        
        @param orig_filename: name of the template config file
        @param new_filename: name of the new config file
        @return: None
        """

        gpcmd = 'source %s/greenplum_path.sh; gprecoverseg -p %s -o %s' % (
            GPHOME, hosts, local_path(orig_filename))
        self._run_sys_cmd(gpcmd)
        self.replace_new_dirPath(orig_filename, new_filename)
Ejemplo n.º 37
0
 def __init__(self,
              hadoop_artifact_url,
              hadoop_install_dir,
              hadoop_data_dir,
              template_conf_dir,
              hostname='localhost',
              secure_hadoop=False):
     HadoopUtil.__init__(self, hadoop_artifact_url, hadoop_install_dir,
                         hadoop_data_dir, hostname)
     self.rpmutil = RPMUtil()
     self.hostname = hostname
     self.hadoop_artifact_url = hadoop_artifact_url
     self.hadoop_install_dir = hadoop_install_dir
     self.hadoop_binary_loc = ''
     self.hadoop_data_dir = hadoop_data_dir
     self.template_conf_dir = local_path(template_conf_dir)
     self.secure_hadoop = secure_hadoop
     # Constants
     # under the hadoop template configuration directory
     # both the below directories should be present
     self.SECURE_DIR_NAME = "conf.secure"  # secure configuration files location
     self.NON_SECURE_DIR_NAME = "conf.pseudo"  # non-secure configuration files location
     self.DEPENDENCY_PKGS = [
         "fuse-",  # eg. fuse-2.8.3-4.el6.x86_64 
         "fuse-libs",  # eg. fuse-libs-2.8.3-4.el6.x86_6
         "nc-"  # eg. 1.84-22.el6.x86_64"
     ]
     self.PKGS_TO_REMOVE = "^hadoop-*|^bigtop-*|^zookeeper-*|^parquet-*"
     self.HADOOP_ENVS = {
         "HADOOP_HOME": "/usr/lib/hadoop/",
         "HADOOP_COMMON_HOME": "/usr/lib/hadoop/",
         "HADOOP_HDFS_HOME": "/usr/lib/hadoop-hdfs/",
         "HADOOP_MAPRED_HOME": "/usr/lib/hadoop-mapreduce/",
         "YARN_HOME": "/usr/lib/hadoop-yarn/",
         "HADOOP_TMP_DIR": "%s/hadoop-hdfs/cache/" % self.hadoop_data_dir,
         "MAPRED_TMP_DIR":
         "%s/hadoop-mapreduce/cache/" % self.hadoop_data_dir,
         "YARN_TMP_DIR": "%s/hadoop-yarn/cache/" % self.hadoop_data_dir,
         "HADOOP_CONF_DIR": "/etc/hadoop/conf",
         "HADOOP_LOG_DIR":
         "%s/hadoop-logs/hadoop-hdfs" % self.hadoop_data_dir,
         "MAPRED_LOG_DIR":
         "%s/hadoop-logs/hadoop-mapreduce" % self.hadoop_data_dir,
         "YARN_LOG_DIR": "%s/hadoop-logs/hadoop-yarn" % self.hadoop_data_dir
     }
Ejemplo n.º 38
0
    def download_pkg(self, product_version, gppkg):
        """
        Download gppkg from artifacts server.
        """
        target_dir = local_path('download/')
        if not os.path.exists(target_dir):
            os.makedirs(target_dir)
        (rc, download_link, package_name) = self.get_download_url_from_build_prod(product_version, gppkg)
        if rc != 0:
            return (-1, None, None)
        wget_cmd = 'wget --html-extension %s -O %s`basename %s`' % (download_link, target_dir, download_link)

        logger.debug('Download link: %s' % wget_cmd)
        res = {'rc':0, 'stderr':'', 'stdout':''}
        run_shell_command (wget_cmd, 'run wget', res)
        if res['rc'] > 0:
            raise Exception("Gppkg download failed")
        return (0, target_dir, package_name)
Ejemplo n.º 39
0
    def run_recovery_with_config(self, filename='recovery_new.conf'):
        """
        @summary: Runs incremental recovery using config file.
        
        @param filename: name of the modified recoverseg config file
        @return: Boolean value representing the status of recovery process
        """

        rcvr_cmd = 'gprecoverseg -a  -i %s' % local_path(filename)
        cmd = Command(name='Run gprecoverseg',
                      cmdStr='source %s/greenplum_path.sh;%s' %
                      (GPHOME, rcvr_cmd))
        tinctest.logger.info("Running gprecoverseg : %s" % cmd)
        cmd.run(validateAfter=True)
        result = cmd.get_results()
        if result.rc != 0 or result.stderr:
            return False
        return True
Ejemplo n.º 40
0
 def test_pg_aocsseg_corruption(self):
     self.create_appendonly_tables(row=False)
     config = GPDBConfig()
     host, port = config.get_hostandport_of_segment()
     self.transform_sql_file(
         os.path.join(self.sql_dir, 'corrupt_pg_aocsseg.sql.t'), 'co1')
     out_file = os.path.join(self.output_dir, 'corrupt_pg_aocsseg.out')
     ans_file = os.path.join(self.ans_dir, 'corrupt_pg_aocsseg.ans')
     sql_file = os.path.join(self.sql_dir, 'corrupt_pg_aocsseg.sql')
     PSQL.run_sql_file_utility_mode(sql_file,
                                    out_file=out_file,
                                    host=host,
                                    port=port,
                                    dbname=os.environ['PGDATABASE'])
     if not Gpdiff.are_files_equal(
             out_file, ans_file, match_sub=[local_path('sql/init_file')]):
         raise Exception(
             'Corruption test of pg_aocsseg failed for appendonly tables !')
Ejemplo n.º 41
0
    def create_filespace(self, fsname = 'filespace_a'):

        if self.filespace_exists(fsname) is False:
            config = self.get_config_with_fs()
            file_config = local_path('%s_config' % fsname)
            f1 = open(file_config , "w")
            f1.write('filespace:%s\n' % fsname)
            for record in config:
                if record[1] == 'p':
                    fileloc = '%s/%s/primary' % (os.path.split(record[3])[0], fsname)
                else:
                    fileloc = '%s/%s/mirror' % (os.path.split(record[3])[0], fsname)
                cmd = "gpssh -h %s -e 'rm -rf %s; mkdir -p %s'"  % (record[2], fileloc, fileloc)
                self.run_command(cmd)
                f1.write("%s:%s:%s/%s\n" % (record[2], record[0], fileloc, os.path.split(record[3])[1]))
            f1.close()
            fs_cmd = '%s/bin/gpfilespace -c %s' % (self.gphome, file_config)
            self.run_gpcommand(fs_cmd)
Ejemplo n.º 42
0
    def infer_metadata(self):
        intended_docstring = ""
        sql_file = local_path(self.gucCheck_sql + str(self._testMethodName) + '_1.sql')
        with open(sql_file, 'r') as f:
            for line in f:
                line = line.strip()
                if line.find('--') != 0:
                    break
                intended_docstring += line[2:].strip()
                intended_docstring += "\n"
                line = line[2:].strip()
                if line.find('@') != 0:
                    continue
                line = line[1:]
                (key, value) = line.split(' ', 1)
                self._metadata[key] = value

        self.gpdb_version = self._metadata.get('gpdb_version', None)
Ejemplo n.º 43
0
 def do_PLJAVA_setup(self):
     """Language: PL/Java Setup"""
     if self.checkAPPHOMEandLIB("pljava", "JAVA_HOME"):
         sql_file = local_path("pljava/setup.sql")
         PSQL.run_sql_file(sql_file=sql_file)
         javahome = os.environ.get("JAVA_HOME")
         ldpath = "LD_LIBRARY_PATH=%s/jre/lib/amd64/server:$LD_LIBRARY_PATH\nexport LD_LIBRARY_PATH" % javahome
         if platform.machine() == "i686" or platform.machine() == "i386":
             ldpath = "LD_LIBRARY_PATH=%s/i386/server/libjvm.so:$LD_LIBRARY_PATH\nexport LD_LIBRARY_PATH" % javahome
         Command(name='add ldpath into greenplum_path.sh',
                 cmdStr="echo '%s' >> $GPHOME/greenplum_path.sh" %
                 ldpath).run()
         self.gpstop.run_gpstop_cmd(restart=True)
         pljava_install = os.path.join(
             GPHOME, "share/postgresql/pljava/install.sql")
         PSQL.run_sql_file(sql_file=pljava_install, dbname='gptest')
     else:
         self.skipTest('skipped')
Ejemplo n.º 44
0
    def run_sql_under_KM_dropMore(self,suffix):
        
        result = runShellCommand('gpssh' + self.hoststr +  ' \"sudo dmesg -c\"')
        self.assertTrue(result)
        runShellCommand('gpssh ' + self.hoststr +  ' \"export PATH=$PATH:/sbin; \
                                            sudo rmmod ickm.ko\"')
        result = runShellCommand('gpssh' + self.hoststr +  ' \"export PATH=$PATH:/sbin;sudo insmod ickm.ko ict_type=0x101 seq_array=2 drop_times=80\"') 
        self.assertTrue(result)
        
        sql_file = local_path(self.gucCheck_sql + str(self._testMethodName) + suffix + '.sql')
        self.assertTrue(PSQL.run_sql_file(sql_file))        

        result = runShellCommand('gpssh' + self.hoststr +  ' \"export PATH=$PATH:/sbin;sudo rmmod ickm.ko \"') 
        self.assertTrue(result)        

        out_file = sql_file.replace(".sql",".out")

        return out_file
    def test_start_gpdb_with_high_transaction_id(self):
        """
        
        @description GPDB hang after high transaction id
        @created 2013-04-18 00:00:00
        @modified 2013-04-18 00:00:00
        @tags transaction MPP-17302 MPP-17323 MPP-17325 MPP-18462 MPP-18463 schedule_transaction 
        
        @note This requires that both primary and mirror to reset xlog.

        Repro step from Hitoshi:
        gpstop -a
        pg_resetxlog -x 100000000 /data/haradh1/gpdata/d/gpseg0
        dd if=/dev/zero of=/data/haradh1/gpdata/d/gpseg0/pg_clog/0017 oflag=append conv=notrunc bs=1048576 count=1
        cp /data/haradh1/gpdata/d/gpseg0/pg_clog/0017 /data/haradh1/gpdata/d/gpseg0/pg_distributed/02FA
        gpstart -a
        """

        # @note: need a class to get GPDB configuration, need to get primary/mirror segment location
        sqlcmd = "select fselocation from gp_segment_configuration, pg_filespace_entry where dbid=fsedbid and content=0"
        with dbconn.connect(dbconn.DbURL()) as conn:
            segments = dbconn.execSQL(conn, sqlcmd)

        # @note: Issue with self.run_gpstop, hard-coded remoteHost to mdw
        # @note: upgrade model uses a series of gpstop and gpstart command, need helper classes
        cmd = GpStop("gpstop")
        cmd.run(validateAfter=True)

        for segment in segments:
            cmd = Command(name="reset xlog",
                          cmdStr="pg_resetxlog -x 100000000 %s" % segment[0])
            cmd.run()

            xlogfile = local_path('xlog_file')
            # @todo: able to copy the xlogfile remotely
            shutil.copyfile(xlogfile, "%s/pg_clog/0017" % segment[0])
            shutil.copyfile(xlogfile, "%s/pg_distributedlog/02FA" % segment[0])

        # @note: workaround the issue with tinc and 4.1 gppylib
        cmd = Command(name='run gpstop',
                      cmdStr='source %s/greenplum_path.sh;\
            gpstart -a' % os.environ['GPHOME'])
        cmd.run(validateAfter=True)
Ejemplo n.º 46
0
 def test_file_with_gucs(self):
     test_case = MockSQLTestCase('test_test_file_with_gucs')
     result = test_case.run_test()
     self.assertTrue(
         os.path.exists(
             os.path.join(test_case.get_out_dir(),
                          'test_file_with_gucs.sql')))
     self._check_gucs_exist_in_file(
         os.path.join(test_case.get_out_dir(), 'test_file_with_gucs.sql'),
         test_case.gucs)
     self.assertTrue(
         os.path.exists(
             os.path.join(test_case.get_out_dir(),
                          'test_file_with_gucs.out')))
     self.assertTrue(
         Gpdiff.are_files_equal(
             os.path.join(test_case.get_out_dir(),
                          'test_file_with_gucs.sql'),
             local_path('gucs/test_file_with_gucs.sql')))
Ejemplo n.º 47
0
    def test_ddboost_23_full_exclude_schema_file(self):
        '''exclude multiple tables '''
        tinctest.logger.info("Test-ddboost-20: Test for exclude multiple tables with --ddboost")

        self.run_workload("backup_dir_simple_db", 'bkdbbb')
        exclude_schema_file = local_path('%s/exclude_schema_file' % ('filter_file'))
        self.run_full_backup(dbname = 'bkdbbb', option = "--exclude-schema-file %s --ddboost" % exclude_schema_file)

        self.get_data_to_file('bkdbbb', 'backup1') #Create a copy of all the tables in database before the last backup

        #truncate other table in the databse before restore it from dump file
        cmdTrunc = 'psql -d bkdbbb -c "truncate table schema_ao.ao_table1;"'
        self.run_command(cmdTrunc)

        #restore from backup
        self.cleanup()
        self.run_restore('bkdbbb', option = ' -s bkdbbb --ddboost')
        self.compare_table_data('bkdbbb')
        self.drop_database('bkdbbb')
Ejemplo n.º 48
0
    def init_with_prompt(self, filespace_loc):
        standby = self.get_standbyhostnode()
        gp_cmd = "/bin/bash -c 'gpinitstandby -s %s -a'" % (standby)
        logfile = open(local_path('install2.log'), 'w')

        child = pexpect.spawn(gp_cmd, timeout=400)
        child.logfile = logfile
        sleep(5)
        check = child.expect(
            ['.* Enter standby filespace location for filespace.*', ' '])
        child.sendline(filespace_loc)

        sleep(10)
        check = child.expect(['.*Successfully created standby master.*'])
        if check != 0:
            tinctest.logger.error('gpinitstandy failed')
            return False
        child.close()
        return True
Ejemplo n.º 49
0
    def doTest(self, num=None, filename='query', default='-a', match_sub=[]):
	# get file path to queryXX.sql 

        if num == None:
            sql_file = local_path('%s.sql' % (filename))
            out_file = local_path('%s.out' % (filename))
            ans_file = local_path('%s.ans' % (filename))
        else:
            sql_file = local_path('%s%02d.sql' % (filename, num))
            out_file = local_path('%s%02d.out' % (filename, num))
            ans_file = local_path('%s%02d.ans' % (filename, num))
	# run psql on fname, and check result
        PSQL.run_sql_file(sql_file = sql_file, out_file = out_file, flags=default)
        self.assertTrue(Gpdiff.are_files_equal(out_file=out_file, ans_file=ans_file, match_sub=match_sub))
Ejemplo n.º 50
0
 def validate_test_CatalogCheck(self, action,storage):
     file_name =action+'_'+storage
     out_file = self.base_dir+ "/sql/"+file_name+'.out'
     ans_file = self.base_dir+ "/expected/"+file_name+'.ans'
     tinctest.logger.info( 'out-file == %s \n' % out_file)
     tinctest.logger.info( 'ans-file == %s \n' % ans_file)
     # Validate Ans file
     self.validate_sql(ans_file,out_file)
     if storage == 'multisegfiles':
         ''' check if multi_segfile_tab file has  multiple segfiles per column '''
         tablename='multi_segfile_tab'
         relid = self.get_relid(file_name=tablename )
         utilitymodeinfo=self.get_utilitymode_conn_info( relid=relid)
         u_port=utilitymodeinfo[0]
         u_host=utilitymodeinfo[1]
         assert(1 < int(self.get_segment_cnt(relid=relid,host=u_host,port= u_port)))
     # Check Correctness of the catalog
     self.dbstate = DbStateClass('run_validation')
     outfile = local_path("gpcheckcat_"+datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S')+".out")
     self.dbstate.check_catalog(outputFile=outfile)
Ejemplo n.º 51
0
def create_runaway_udf(dbname=None):
    build_udf()

    SQLTemplateTestCase.perform_transformation_on_sqlfile(
        local_path('./udfs/runaway_test.sql.in'),
        local_path('./udfs/runaway_test.sql'),
        {'@source@': local_path('./udfs/runaway_test.so')})

    tinctest.logger.info(
        'Creating Runaway Query Termination testing UDFs by running sql file %s'
        % local_path('./udfs/runaway_test.sql'))

    PSQL.run_sql_file(sql_file=local_path('./udfs/runaway_test.sql'),
                      out_file=local_path('./udfs/runaway_test.out'),
                      dbname=dbname)
Ejemplo n.º 52
0
def search_log(cmd, key, segdir, logname, start=""):
    """
    Search using gplogfilter. Returns True if match has no '0 lines' -
    meaning some match found
    """
    logfilename = local_path(logname)
    GPHOME = os.getenv("GPHOME")
    if len(start) > 0:
        start = "-b %s" % start
    cmd_str = ("gpssh %s -e \"source %s/greenplum_path.sh; "
               "gplogfilter %s -m %s %s\" > %s 2>&1" %
               (cmd, GPHOME, start, key, segdir, logfilename))
    cmd = Command("search_log", cmdStr=cmd_str)
    cmd.run(validateAfter=False)
    f = open(logfilename, "r")
    for line in f.readlines():
        line = line.strip()
        if line.find("match:") > 0:
            if line.find("0 lines") < 0:
                return True
    return False
Ejemplo n.º 53
0
    def run_crash_and_recovery_fast(self,
                                    test_dir,
                                    pass_num,
                                    cluster_state,
                                    test_type,
                                    ddl_type,
                                    aborting_create_needed=False):
        if pass_num == 0:
            self.wait_till_all_sqls_done()
        else:
            mydir = local_path(test_dir) + '/trigger_sql/sql/'
            tinctest.logger.info('mydir = %s ' % mydir)
            trigger_count = len(glob.glob1(mydir, "*trigger.sql"))
            tinctest.logger.info('*** Count of trigger : %s *** ' %
                                 (trigger_count))
            if test_dir == "abort_create_tests":
                ''' vacuum full sql don't hit the suspend fault.'''
                trigger_count = trigger_count - 1
            if test_dir == "abort_create_needed_tests":
                ''' Not all SQLs hit the fault for this case, hence wait for them to complete and then others to hit the fault'''
                self.wait_till_all_sqls_done(8 + 1)
                trigger_count = 8
            if test_dir == "abort_abort_create_needed_tests":
                ''' Not all SQLs hit the fault for this case, hence wait for them to complete and then others to hit the fault'''
                self.wait_till_all_sqls_done(6 + 1)
                trigger_count = 6
            fault_type = self.get_faults_before_executing_trigger_sqls(
                pass_num,
                cluster_state,
                test_type,
                ddl_type,
                aborting_create_needed=False)
            fault_hit = self.fileutil.check_fault_status(
                fault_name=fault_type,
                status="triggered",
                num_times_hit=trigger_count)
            if not fault_hit:
                raise Exception('Fault not hit expected number of times')

        self.stop_start_validate(cluster_state)
Ejemplo n.º 54
0
    def test_reindex_pg_class(self):
        tinctest.logger.info("create checkpoint")
        results = {'rc': 0, 'stdout': '', 'stderr': ''}
        PSQL.run_sql_command("checkpoint", results=results, dbname=self.dbname)
        assert results['rc'] == 0, results['stderr']

        tinctest.logger.info("inject fault to skip checkpoints")
        cmd = Command(
            "skip checkpoint on primaries",
            "gpfaultinjector -f checkpoint -m async -y skip -o 0"
            " -H ALL -r primary")
        cmd.run(validateAfter=True)
        tinctest.logger.info(cmd.get_results().printResult())

        cmd = Command(
            "skip checkpoint on master",
            "gpfaultinjector -f checkpoint -m async -y skip -o 0 -s 1")
        cmd.run(validateAfter=True)
        tinctest.logger.info(cmd.get_results().printResult())

        tinctest.logger.info("reindex pg_class indexes")
        assert PSQL.run_sql_file(local_path('reindex_pg_class.sql'),
                                 dbname=self.dbname)

        tinctest.logger.info("shutdown immediate")
        cmd = Command("shutdown immediate", "gpstop -ai")
        cmd.run(validateAfter=True)
        tinctest.logger.info(cmd.get_results().printResult())

        tinctest.logger.info("trigger recovery")
        cmd = Command("restart the cluster", "gpstart -a")
        cmd.run(validateAfter=True)
        tinctest.logger.info(cmd.get_results().printResult())

        tinctest.logger.info("validate recovery succeeded")
        results = {'rc': 0, 'stdout': '', 'stderr': ''}
        PSQL.run_sql_command("DROP TABLE reindex_pg_class_test",
                             results=results,
                             dbname=self.dbname)
        assert results['rc'] == 0, results['stderr']
Ejemplo n.º 55
0
    def run_gpstate(self, type, phase):
        tinctest.logger.info("running gpstate")

        gphome = os.environ.get('GPHOME')
        masterdd = os.environ.get('MASTER_DATA_DIRECTORY')
        res = {'rc':0, 'stdout':'', 'stderr':''}

        cmd = "gpstate --printSampleExternalTableSql -q -d %s" % (masterdd)
        ok  = run_shell_command(cmd, results=res)
        if res['rc']>0:
            raise Exception("Failed to query gpstate --printSampleExternalTableSql");
        
        out = PSQL.run_sql_command(res['stdout'])
        if out.find('CREATE EXTERNAL TABLE') == -1 :
            raise Exception("Failed to create the external table gpstate_segment_status")

        gpstate_outfile = local_path("gpstate_out")
        cmd = "gpstate -s -a > %s 2>&1" % (gpstate_outfile)

        ok  = run_shell_command(cmd)
        self.check_gpstate(type, phase)
        return ok
Ejemplo n.º 56
0
 def read_config(self, param):
     """Function to read configuration file and get parameters needed for test"""
     configParser = ConfigParser.RawConfigParser()
     configFilePath = local_path(
         "../gpdb/tests/utilities/commandcenter/configs/config.txt")
     configParser.read(configFilePath)
     if (param == 'metadata'):
         tinctest.logger.info('Fetching metdata')
         instance_name = configParser.get('valid', 'instance_name')
         machine = configParser.get('metadata', 'machine')
         pgport = configParser.get('metadata', 'pgport')
         port = configParser.get('metadata', 'port')
         mdd = configParser.get('metadata', 'mdd')
         source = configParser.get('metadata', 'source')
         cc_source = configParser.get('metadata', 'cc_source')
         return (machine, pgport, port, mdd, source, instance_name,
                 cc_source)
     else:
         tinctest.logger.info('Fetching username and password')
         username = configParser.get(param, 'username')
         password = configParser.get(param, 'password')
         return (username, password)
Ejemplo n.º 57
0
    def test_ic_fc_kill_recv_process(self):
        if (self.cluster_platform.lower().find('red hat enterprise linux server') < 0):
            self.skipTest('Test only applies to RHEL platform.')
        
        cmd_str = "psql -a -f " + local_path(self.faultTolerance_sql + str(self._testMethodName) + '.sql') + " &> output_killrecv &"
        out = runShellCommand(cmd_str)
        
        # sleep 1 sec to ensure that some query related to interconnect is running when killing receiver process
        sleep(1)    
    
        (hostname, processID) = self.getHostProcess("receiver")
	
	if(processID == ""):
		self.skipTest("No sender process found, skip this case")

        self.killHostProcess(hostname, processID)
        
	# sleep 30 sec to ensure that sql return some reuslts
        sleep(50)    

        # Check the output file to see whether the expected error info is returned.
        out = self.checkOutputPattern("output", "server closed the connection unexpectedly")
        self.assertTrue(out)
Ejemplo n.º 58
0
    def login(self, param):
        """ Function to login to the CC to execute other tests """
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info('Login operation')
        tinctest.logger.info("-------------------------------")

        (machine, pgport, port, mdd, source, instance_name,
         cc_source) = self.read_config('metadata')
        host = machine + ":" + port
        h = httplib.HTTPConnection(host)
        url = "http://" + machine + ":" + port + "/"

        tinctest.logger.info('Fetching username and password from config file')

        (username, password) = self.read_config(param)

        tinctest.logger.info('In function login')
        tinctest.logger.debug('Establising connection')
        data = {'username': username, 'password': password}
        encoded_data = urllib.urlencode(data)

        tinctest.logger.debug('Opening url' + url)
        request = urllib2.Request(url + "logon", None)
        rs = urllib2.urlopen(request, encoded_data)
        h.request('POST', "/logon", encoded_data)

        login_msg = rs.read()
        # write the xml returned to a file for further processing
        login_status_xml = local_path(
            "../gpdb/tests/utilities/commandcenter/data/login_status")
        f = open(login_status_xml, 'w')
        f.write(login_msg)
        f.close()

        # Call the get_sessionid function to get the session_id
        session_id = self.get_sessionid(login_status_xml)
        return session_id
Ejemplo n.º 59
0
    def setUpClass(cls):
        super(EtablefuncGppcTestCase, cls).setUpClass()
        """
        compile tablefunc_gppc_demo.c and install the tablefunc_gppc_demo.so
        """
        gppkg = Gppkg()
        gpccutil.pre_process(product_version)
        result = gppkg.gppkg_install(product_version, 'libgppc')
        #makeLog = loal_path('test00MakeLog.out')
        if result:
            cmdMakeInstall = 'cd ' + local_path(
                'data'
            ) + ' && make clean && make CPPFLAGS=-D_GNU_SOURCE && make install'
            res = {'rc': 0, 'stderr': '', 'stdout': ''}
            run_shell_command(cmdMakeInstall, 'compile tablefunc_gppc_demo.c',
                              res)

            # Current make file works for linux, but not for Solaris or OSX.
            # If compilation fails or installation fails, force system quit: os._exit(1)
            if res['rc']:
                os._exit(
                    1
                )  # This will exit the test including the next test suites
            sharedObj = '%s/tabfunc_gppc_demo.so' % (LIBDIR)
            if not os.path.isfile(sharedObj):
                os._exit(1)

            # For multinode cluster, need to copy shared object tabfunc_gppc_demo.so to all primary segments
            if gpdbconfig.is_multinode():
                res = {'rc': 0, 'stderr': '', 'stdout': ''}
                hosts = gpdbconfig.get_hosts(segments=True)
                scp_cmd = 'gpscp  -h ' + ' -h '.join(map(
                    str, hosts)) + ' ' + sharedObj + ' =:%s' % LIBDIR
                run_shell_command(scp_cmd)
                if res['rc']:
                    raise Excpetion(
                        'Could not copy shared object to primary segment')
Ejemplo n.º 60
0
 def setUpClass(cls):
     hadoop_type = "cdh"
     # Url of the CDH distribution
     hadoop_artifact_url = os.getenv("HADOOP_ARTIFACT_URL")
     if not hadoop_artifact_url:
         raise HadoopIntegrationException(
             "HADOOP_ARTIFACT_URL is not set!!")
     # hadoop installation directory; where CDH tar ball is untared
     hadoop_install_dir = os.getenv("HADOOP_INSTALL_PATH", "./installation")
     # path of the hadoop data directories and log directories
     hadoop_data_dir = os.getenv("HADOOP_DATA_DIR", "/data")
     template_conf_dir = local_path("./configs")
     secure_hadoop = os.getenv("ENABLE_SECURITY", True)
     # provide list of nodes if cluster is multinode - would be used in future, when multinode is enabled
     node_list = os.getenv("NODES_LIST", [])
     gphdfs_connector = os.getenv("GPHDFS_CONNECTOR", "cdh4.1-gnet-1.2.0.0")
     cls.integration = HadoopIntegration(hadoop_type, gphdfs_connector,
                                         hadoop_artifact_url,
                                         hadoop_install_dir,
                                         hadoop_data_dir, template_conf_dir,
                                         secure_hadoop, node_list)
     cls.integration.integrate()
     cls.test_failures = False
     super(CDHIntegrationTest, cls).setUpClass()