Example #1
0
    def test_uao_crash_vacuum_with_ins_fault(self):
        setup_file = self.get_sql_files("uaocs_crash_update_setup")[0]
        (sql_file1, out_file1,
         ans_file1) = self.get_sql_files("uao_crash_vacuum1")
        (sql_file2, out_file2,
         ans_file2) = self.get_sql_files("uao_crash_vacuum2")
        if not os.path.exists(os.path.dirname(out_file1)):
            os.mkdir(os.path.dirname(out_file1))
        set_fault_in_seg_panic = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f appendonly_insert -t foo -y panic --seg_dbid 2' % (
            os.getenv('GPHOME'), os.getenv('PGPORT'))
        set_fault_in_seg_reset = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f appendonly_insert -t foo -y reset --seg_dbid 2' % (
            os.getenv('GPHOME'), os.getenv('PGPORT'))
        cmd_type = 'fault injector'

        PSQL.run_sql_file(setup_file)
        gpfaultinjector = Command(cmd_type, set_fault_in_seg_panic)

        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file1, out_file=out_file1)

        result1 = Gpdiff.are_files_equal(out_file1, ans_file1)
        # The connection is accepted before it is actually ready, leading to panic messages
        # Therefore we have to sleep here.
        sleep(5)

        gpfaultinjector = Command(cmd_type, set_fault_in_seg_reset)
        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file2, out_file=out_file2)
        result2 = Gpdiff.are_files_equal(out_file2, ans_file2)
        self.assertTrue(result2)
Example #2
0
    def test_uao_crash_vacuum_with_ins_fault(self):
        setup_file = self.get_sql_files("uaocs_crash_update_setup")[0]
        (sql_file1, out_file1,ans_file1) = self.get_sql_files("uao_crash_vacuum1")
        (sql_file2, out_file2, ans_file2) = self.get_sql_files("uao_crash_vacuum2")
        if not os.path.exists(os.path.dirname(out_file1)):
            os.mkdir(os.path.dirname(out_file1))
        set_fault_in_seg_panic = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f appendonly_insert -t foo -y panic --seg_dbid 2'  % (os.getenv('GPHOME'), os.getenv('PGPORT'))
        set_fault_in_seg_reset = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f appendonly_insert -t foo -y reset --seg_dbid 2'  % (os.getenv('GPHOME'), os.getenv('PGPORT'))
        cmd_type = 'fault injector'

        PSQL.run_sql_file(setup_file)
        gpfaultinjector = Command(cmd_type, set_fault_in_seg_panic)

        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file1, out_file=out_file1)
    
        result1 = Gpdiff.are_files_equal(out_file1, ans_file1, match_sub=[gpdiff_init_file])

        PSQL.wait_for_database_up();

        gpfaultinjector = Command(cmd_type, set_fault_in_seg_reset)
        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file2, out_file=out_file2)
        result2 = Gpdiff.are_files_equal(out_file2, ans_file2, match_sub=[gpdiff_init_file])

        self.assertTrue(result1)
        self.assertTrue(result2)
Example #3
0
    def test_uao_crash_compaction_before_cleanup_phase_master_with_aocs(self):
        setup_file = self.get_sql_files("uao_crash_compaction_before_cleanup_phase_master_with_aocs_setup")[0]
        (sql_file1, out_file1,ans_file1) = self.get_sql_files("uao_crash_compaction_before_cleanup_phase_master_with_aocs1")
        (sql_file2, out_file2, ans_file2) = self.get_sql_files("uao_crash_compaction_before_cleanup_phase_master_with_aocs2")
        if not os.path.exists(os.path.dirname(out_file1)):
            os.mkdir(os.path.dirname(out_file1))
        set_fault_in_master_panic = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f compaction_before_cleanup_phase -y panic --seg_dbid 1'  % (os.getenv('GPHOME'), os.getenv('PGPORT'))
        set_fault_in_master_reset = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f compaction_before_cleanup_phase -y reset --seg_dbid 1'  % (os.getenv('GPHOME'), os.getenv('PGPORT'))
        cmd_type = 'fault injector'

        PSQL.run_sql_file(setup_file)
        gpfaultinjector = Command(cmd_type, set_fault_in_master_panic)

        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file1, out_file=out_file1)
    
        result1 = Gpdiff.are_files_equal(out_file1, ans_file1, match_sub=[gpdiff_init_file])

        PSQL.wait_for_database_up();

        gpfaultinjector = Command(cmd_type, set_fault_in_master_reset)
        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file2, out_file=out_file2)
        result2 = Gpdiff.are_files_equal(out_file2, ans_file2, match_sub=[gpdiff_init_file])

        self.assertTrue(result1)
        self.assertTrue(result2)
Example #4
0
    def test_uao_crash_compaction_before_cleanup_phase(self):
        setup_file = self.get_sql_files("uao_crash_compaction_before_cleanup_phase_setup")[0]
        (sql_file1, out_file1, ans_file1) = self.get_sql_files("uao_crash_compaction_before_cleanup_phase1")
        (sql_file2, out_file2, ans_file2) = self.get_sql_files("uao_crash_compaction_before_cleanup_phase2")
        if not os.path.exists(os.path.dirname(out_file1)):
            os.mkdir(os.path.dirname(out_file1))

        PSQL.run_sql_file(setup_file)
        set_fault_in_seg_panic = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f compaction_before_cleanup_phase -y panic --seg_dbid 2'  % (os.getenv('GPHOME'), os.getenv('PGPORT'))
        set_fault_in_seg_reset = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f compaction_before_cleanup_phase -y reset --seg_dbid 2'  % (os.getenv('GPHOME'), os.getenv('PGPORT'))
        cmd_type = 'fault injector'
        gpfaultinjector = Command(cmd_type, set_fault_in_seg_panic)
        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file1, out_file=out_file1)
        result1 = Gpdiff.are_files_equal(out_file1, ans_file1)

        gpfaultinjector = Command(cmd_type, set_fault_in_seg_reset)
        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file2, out_file=out_file2)
        result2 = Gpdiff.are_files_equal(out_file2, ans_file2)

        self.assertTrue(result1)        
        self.assertTrue(result2)
Example #5
0
    def test_uao_crash_compaction_before_cleanup_phase_master_with_aocs(self):
        setup_file = self.get_sql_files("uao_crash_compaction_before_cleanup_phase_master_with_aocs_setup")[0]
        (sql_file1, out_file1,ans_file1) = self.get_sql_files("uao_crash_compaction_before_cleanup_phase_master_with_aocs1")
        (sql_file2, out_file2, ans_file2) = self.get_sql_files("uao_crash_compaction_before_cleanup_phase_master_with_aocs2")
        if not os.path.exists(os.path.dirname(out_file1)):
            os.mkdir(os.path.dirname(out_file1))
        set_fault_in_master_panic = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f compaction_before_cleanup_phase -y panic --seg_dbid 1'  % (os.getenv('GPHOME'), os.getenv('PGPORT'))
        set_fault_in_master_reset = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f compaction_before_cleanup_phase -y reset --seg_dbid 1'  % (os.getenv('GPHOME'), os.getenv('PGPORT'))
        cmd_type = 'fault injector'

        PSQL.run_sql_file(setup_file)
        gpfaultinjector = Command(cmd_type, set_fault_in_master_panic)

        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file1, out_file=out_file1)
    
        result1 = Gpdiff.are_files_equal(out_file1, ans_file1)
        # The connection is accepted before it is actually ready, leading to panic messages
        # Therefore we have to sleep here.
        sleep(5)

        gpfaultinjector = Command(cmd_type, set_fault_in_master_reset)
        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file2, out_file=out_file2)
        result2 = Gpdiff.are_files_equal(out_file2, ans_file2)
        self.assertTrue(result2)
    def test_uao_crash_compaction_before_cleanup_phase(self):
        setup_file = self.get_sql_files(
            "uao_crash_compaction_before_cleanup_phase_setup")[0]
        (sql_file1, out_file1, ans_file1
         ) = self.get_sql_files("uao_crash_compaction_before_cleanup_phase1")
        (sql_file2, out_file2, ans_file2
         ) = self.get_sql_files("uao_crash_compaction_before_cleanup_phase2")
        if not os.path.exists(os.path.dirname(out_file1)):
            os.mkdir(os.path.dirname(out_file1))

        PSQL.run_sql_file(setup_file)
        set_fault_in_seg_panic = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f compaction_before_cleanup_phase -y panic --seg_dbid 2' % (
            os.getenv('GPHOME'), os.getenv('PGPORT'))
        set_fault_in_seg_reset = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f compaction_before_cleanup_phase -y reset --seg_dbid 2' % (
            os.getenv('GPHOME'), os.getenv('PGPORT'))
        cmd_type = 'fault injector'
        gpfaultinjector = Command(cmd_type, set_fault_in_seg_panic)
        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file1, out_file=out_file1)
        result1 = Gpdiff.are_files_equal(out_file1,
                                         ans_file1,
                                         match_sub=[gpdiff_init_file])

        gpfaultinjector = Command(cmd_type, set_fault_in_seg_reset)
        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file2, out_file=out_file2)
        result2 = Gpdiff.are_files_equal(out_file2,
                                         ans_file2,
                                         match_sub=[gpdiff_init_file])

        self.assertTrue(result1)
        self.assertTrue(result2)
Example #7
0
    def test_uao_crash_vacuum_with_ins_fault(self):
        setup_file = self.get_sql_files("uaocs_crash_update_setup")[0]
        (sql_file1, out_file1,ans_file1) = self.get_sql_files("uao_crash_vacuum1")
        (sql_file2, out_file2, ans_file2) = self.get_sql_files("uao_crash_vacuum2")
        if not os.path.exists(os.path.dirname(out_file1)):
            os.mkdir(os.path.dirname(out_file1))
        set_fault_in_seg_panic = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f appendonly_insert -t foo -y panic --seg_dbid 2'  % (os.getenv('GPHOME'), os.getenv('PGPORT'))
        set_fault_in_seg_reset = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f appendonly_insert -t foo -y reset --seg_dbid 2'  % (os.getenv('GPHOME'), os.getenv('PGPORT'))
        cmd_type = 'fault injector'

        PSQL.run_sql_file(setup_file)
        gpfaultinjector = Command(cmd_type, set_fault_in_seg_panic)

        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file1, out_file=out_file1)
    
        result1 = Gpdiff.are_files_equal(out_file1, ans_file1, match_sub=[gpdiff_init_file])

        PSQL.wait_for_database_up();

        gpfaultinjector = Command(cmd_type, set_fault_in_seg_reset)
        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file2, out_file=out_file2)
        result2 = Gpdiff.are_files_equal(out_file2, ans_file2, match_sub=[gpdiff_init_file])

        self.assertTrue(result1)
        self.assertTrue(result2)
Example #8
0
    def test_insert_commit_before_truncate(self):
        '''
        @description We suspend the vacuum on master after the first
                     transaction, and connect to segment.  Modify the
                     relation in vacuum and commit the segment local
                     transaction before the truncate transaction starts.
        '''
        fault_name = 'vacuum_relation_end_of_first_round'

        gpdbconfig = GPDBConfig()
        seghost, segport = gpdbconfig.get_hostandport_of_segment(0, 'p')
        filereputil = Filerepe2e_Util()
        filereputil.inject_fault(f=fault_name, y='suspend', seg_id='1')

        # run vacuum in background, it'll be blocked.
        sql_file1, ans_file1, out_file1 = self.get_file_names('conn1')
        psql1 = PSQL(sql_file=sql_file1, out_file=out_file1)
        thread1 = threading.Thread(target=self.run_psql, args=(psql1,))
        thread1.start()

        self.check_fault_triggered(fault_name)

        sql_file2, ans_file2, out_file2 = self.get_file_names('conn2')
        # utility to seg0
        psql2 = PSQL(sql_file=sql_file2, out_file=out_file2,
                     host=seghost, port=segport,
                     PGOPTIONS='-c gp_session_role=utility')
        self.run_psql(psql2)

        # resume vacuum
        filereputil.inject_fault(f=fault_name, y='reset', seg_id='1')
        thread1.join()
        self.assertTrue(Gpdiff.are_files_equal(out_file1, ans_file1))
        self.assertTrue(Gpdiff.are_files_equal(out_file2, ans_file2))
Example #9
0
 def do_test_fixture(self, fixture):
     """
     @summary: Runs a setup or teardown routine
     
     @param fixture: Set to either 'setup' or 'teardown'. Used to determine sql file suffix.
     """
     testcase1 = inspect.stack()[1][3] 
     testcase = self.id().split(".")[2]
     init_file = local_path('init_file') 
     init_file_list = []
     init_file_list.append(init_file)
     if fixture == 'setup':
         sqlfile = local_path(testcase + "_setup.sql")
         outfile = local_path(testcase + "_setup.out")
         ansfile = local_path(testcase + "_setup.ans")
     elif fixture == 'teardown':
         sqlfile = local_path(testcase + "_teardown.sql")
         outfile = local_path(testcase + "_teardown.out")
         ansfile = local_path(testcase + "_teardown.ans")
     else:
         raise Exception("do_test_fixture(): Invalid value for fixture. Acceptable values are 'setup' or 'teardown'")
     
     # check if setup sql file exists
     if os.path.isfile(sqlfile):
         # if exists, run setup sql, and validate result
         PSQL.run_sql_file(sql_file = sqlfile, out_file = outfile)            
         Gpdiff.are_files_equal(outfile, ansfile, match_sub=init_file_list)
     else:
         pass
Example #10
0
    def checkResult(self, ifile, optionalFlags="", ignoreInfoLines = False):

        """
        PURPOSE: compare the actual and expected output files and report an 
            error if they don't match.
        PARAMETERS:
            ifile: the name of the .sql file whose actual and expected outputs 
                we want to compare.  You may include the path as well as the 
                filename.  This function will process this file name to 
                figure out the proper names of the .out and .ans files.
            optionalFlags: command-line options (if any) for diff.  
                For example, pass " -B " (with the blank spaces) to ignore 
                blank lines.
            ignoreInfoLines: set to True to ignore lines from gpcheckcat (and 
                possibly other sources) that contain "[INFO]" so we get diffs 
                only for errors, warnings, etc.
        LAST MODIFIED: 
            2010-05-06 mgilkey
                I added the ignoreInfoLines option so that we can ignore 
                differences in gpcheckcat output that are not significant.
            2010-02-17 mgilkey
                I added the "optionalFlags" parameter.
        """

        f1 = self.outFile(ifile)
        f2 = ansFile(ifile)
        self.appendMatchSub(f1)
        self.appendMatchSub(f2)
        Gpdiff.are_files_equal(out_file = f1, ans_file = f2)
        return True
Example #11
0
    def checkResult(self, ifile, optionalFlags="", ignoreInfoLines=False):
        """
        PURPOSE: compare the actual and expected output files and report an 
            error if they don't match.
        PARAMETERS:
            ifile: the name of the .sql file whose actual and expected outputs 
                we want to compare.  You may include the path as well as the 
                filename.  This function will process this file name to 
                figure out the proper names of the .out and .ans files.
            optionalFlags: command-line options (if any) for diff.  
                For example, pass " -B " (with the blank spaces) to ignore 
                blank lines.
            ignoreInfoLines: set to True to ignore lines from gpcheckcat (and 
                possibly other sources) that contain "[INFO]" so we get diffs 
                only for errors, warnings, etc.
        LAST MODIFIED: 
            2010-05-06 mgilkey
                I added the ignoreInfoLines option so that we can ignore 
                differences in gpcheckcat output that are not significant.
            2010-02-17 mgilkey
                I added the "optionalFlags" parameter.
        """

        f1 = self.outFile(ifile)
        f2 = ansFile(ifile)
        self.appendMatchSub(f1)
        self.appendMatchSub(f2)
        Gpdiff.are_files_equal(out_file=f1, ans_file=f2)
        return True
Example #12
0
    def test_uao_crash_compaction_before_drop_master_with_ao(self):
        setup_file = self.get_sql_files("uao_crash_compaction_before_drop_master_with_aocs_setup")[0]
        (sql_file1, out_file1,ans_file1) = self.get_sql_files("uao_crash_compaction_before_drop_master_with_ao1")
        (sql_file2, out_file2, ans_file2) = self.get_sql_files("uao_crash_compaction_before_drop_master_with_ao2")
        if not os.path.exists(os.path.dirname(out_file1)):
            os.mkdir(os.path.dirname(out_file1))
        set_fault_in_master_panic = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f compaction_before_segmentfile_drop -y panic --seg_dbid 1'  % (os.getenv('GPHOME'), os.getenv('PGPORT'))
        set_fault_in_master_reset = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f compaction_before_segmentfile_drop -y reset --seg_dbid 1'  % (os.getenv('GPHOME'), os.getenv('PGPORT'))
        cmd_type = 'fault injector'

        PSQL.run_sql_file(setup_file)
        gpfaultinjector = Command(cmd_type, set_fault_in_master_panic)

        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file1, out_file=out_file1)
    
        result1 = Gpdiff.are_files_equal(out_file1, ans_file1, match_sub=[gpdiff_init_file])

        PSQL.wait_for_database_up();

        gpfaultinjector = Command(cmd_type, set_fault_in_master_reset)
        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file2, out_file=out_file2)
        result2 = Gpdiff.are_files_equal(out_file2, ans_file2, match_sub=[gpdiff_init_file])

        self.assertTrue(result1)
        self.assertTrue(result2)
Example #13
0
    def test_uaocs_crash_alterdropcol(self):
        setup_file = self.get_sql_files("uaocs_crash_update_setup")[0]
        (sql_file1, out_file1,ans_file1) = self.get_sql_files("uaocs_crash_alterdropcol1")
        (sql_file2, out_file2, ans_file2) = self.get_sql_files("uaocs_crash_alterdropcol2")
        if not os.path.exists(os.path.dirname(out_file1)):
            os.mkdir(os.path.dirname(out_file1))
        set_fault_in_seg_panic = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f appendonly_delete -t foo -y panic --seg_dbid 2'  % (os.getenv('GPHOME'), os.getenv('PGPORT'))
        set_fault_in_seg_reset = 'source %s/greenplum_path.sh;gpfaultinjector -p %s -f appendonly_delete -t foo -y reset --seg_dbid 2'  % (os.getenv('GPHOME'), os.getenv('PGPORT'))
        cmd_type = 'fault injector'

        PSQL.run_sql_file(setup_file)
        gpfaultinjector = Command(cmd_type, set_fault_in_seg_panic)

        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file1, out_file=out_file1)
    
        result1 = Gpdiff.are_files_equal(out_file1, ans_file1)
        # The connection is accepted before it is actually ready, leading to panic messages
        # Therefore we have to sleep here.
        sleep(5)

        gpfaultinjector = Command(cmd_type, set_fault_in_seg_reset)
        gpfaultinjector.run()

        PSQL.run_sql_file(sql_file2, out_file=out_file2)
        result2 = Gpdiff.are_files_equal(out_file2, ans_file2)
        self.assertTrue(result2)
Example #14
0
    def test_insert_unlock_before_truncate(self):
        '''
        @description This is rather complicated.  We suspend the vacuum on
                     master after the first transaction, and connect to
                     segment, modify the relation in question, and release the
                     lock, keep the transaction.  To release the lock, we need
                     a special UDF.  Vacuum is supposed to skip truncate if it
                     sees such in-progress transaction.  Usually this should
                     not happen, but it rather simulates catalog DDL.
        '''
        fault_name = 'vacuum_relation_end_of_first_round'

        gpdbconfig = GPDBConfig()
        seghost, segport = gpdbconfig.get_hostandport_of_segment(0, 'p')
        filereputil = Filerepe2e_Util()
        filereputil.inject_fault(f=fault_name, y='suspend', seg_id='1')

        PSQL.run_sql_command(
            sql_cmd=
            'drop table if exists sync_table; create table sync_table(a int)')
        # Use pygresql to keep the connection and issue commands seprately.
        # thread2 will wait on sync_table before finish its work, so we
        # can keep the transaction open until the vacuum completes its work.
        conn = pygresql.pg.connect(host=seghost,
                                   port=int(segport),
                                   opt='-c gp_session_role=utility')
        conn.query('begin')
        conn.query('lock sync_table in access exclusive mode')

        # run vacuum background, it'll be blocked.
        sql_file1, ans_file1, out_file1 = self.get_file_names('conn1')
        psql1 = PSQL(sql_file=sql_file1, out_file=out_file1)
        thread1 = threading.Thread(target=self.run_psql, args=(psql1, ))
        thread1.start()

        self.check_fault_triggered(fault_name)

        sql_file2, ans_file2, out_file2 = self.get_file_names('conn2')
        # utility to seg0
        psql2 = PSQL(sql_file=sql_file2,
                     out_file=out_file2,
                     host=seghost,
                     port=segport,
                     PGOPTIONS='-c gp_session_role=utility')
        thread2 = threading.Thread(target=self.run_psql, args=(psql2, ))
        thread2.start()

        # resume vacuum
        filereputil.inject_fault(f=fault_name, y='reset', seg_id='1')

        # Once thread1 finishes, we can now release the lock on sync_table,
        # so that thread2 can proceed.
        thread1.join()
        conn.query('commit')
        thread2.join()

        self.assertTrue(Gpdiff.are_files_equal(out_file1, ans_file1))
        self.assertTrue(Gpdiff.are_files_equal(out_file2, ans_file2))
Example #15
0
    def run_test(self):
        sql_file = self.sql_file
        ans_file = self.ans_file
        
        source_file = sys.modules[self.__class__.__module__].__file__
        source_dir = os.path.dirname(source_file)
        out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))


        guc_sql_file = self._add_gucs_to_sql_file(sql_file)
        adj_ans_file = self._add_opt_diff_too_ans_file(ans_file)
        PSQL.run_sql_file(guc_sql_file, dbname = self.db_name, out_file = out_file)

        init_files = []
        init_file_path = os.path.join(self.get_sql_dir(), 'init_file')
        if os.path.exists(init_file_path):
            init_files.append(init_file_path)


        if out_file[-2:] == '.t':
            out_file = out_file[:-2]

        if adj_ans_file is not None:
            if (self.executemode == 'normal'):
                result = Gpdiff.are_files_equal(out_file, ans_file, match_sub = init_files)
            elif (self.executemode == 'ORCA_PLANNER_DIFF'):
                out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
                new_ans_file = os.path.join(self.get_out_dir(), os.path.basename(ans_file).replace('.ans', '_mod.ans'))
                guc_sql_file = self._add_gucs_to_sql_file(sql_file)
                self.gucs.add('optimizer=off')
                self.gucs.add('optimizer_log=off')
                guc_off_sql_file = self._add_gucs_to_sql_file(sql_file)

                PSQL.run_sql_file(guc_off_sql_file, dbname = self.db_name, out_file = new_ans_file)
                PSQL.run_sql_file(guc_sql_file, dbname = self.db_name, out_file = out_file)

                sedcmd = "sed -i -e 's/transactionid,,,,,[[:digit:]]\+,,,,[[:digit:]]\+,[[:digit:]]\+,ExclusiveLock,t,[[:digit:]]\+,/transactionid,,,,,XXXXX,,,,XXXXX,XXXXX,ExclusiveLock,t,XXXXX,/' " +new_ans_file
                sedcmd2 = "sed -i -e 's/transactionid,,,,,[[:digit:]]\+,,,,[[:digit:]]\+,[[:digit:]]\+,ExclusiveLock,t,[[:digit:]]\+,/transactionid,,,,,XXXXX,,,,XXXXX,XXXXX,ExclusiveLock,t,XXXXX,/' " +out_file
                sedcmd3 = "sed -i -e 's/pg_aoseg_[[:digit:]]\+/pg_aoseg_XXXXX/' " +new_ans_file
                sedcmd4 = "sed -i -e 's/pg_aoseg_[[:digit:]]\+/pg_aoseg_XXXXX/' " +out_file
                run_shell_command(sedcmd, "replace dynamic values in planner output with XXXXX")
                run_shell_command(sedcmd2, "replace dynamic values in ORCA output with XXXXX")
                run_shell_command(sedcmd3, "replace dynamic values in pg_aoseg.pg_aoseg_")
                run_shell_command(sedcmd4, "replace dynamic values in pg_aoseg.pg_aoseg_")

                result = Gpdiff.are_files_equal(out_file, new_ans_file, match_sub = init_files)
            else:
                result = Gpdiff.are_files_equal(out_file, adj_ans_file, match_sub = init_files)

            if result == False:
                self.test_artifacts.append(out_file.replace('.out', '.diff'))

        return result
Example #16
0
    def test_insert_unlock_before_truncate(self):
        '''
        @description This is rather complicated.  We suspend the vacuum on
                     master after the first transaction, and connect to
                     segment, modify the relation in question, and release the
                     lock, keep the transaction.  To release the lock, we need
                     a special UDF.  Vacuum is supposed to skip truncate if it
                     sees such in-progress transaction.  Usually this should
                     not happen, but it rather simulates catalog DDL.
        '''
        fault_name = 'vacuum_relation_end_of_first_round'

        gpdbconfig = GPDBConfig()
        seghost, segport = gpdbconfig.get_hostandport_of_segment(0, 'p')
        filereputil = Filerepe2e_Util()
        filereputil.inject_fault(f=fault_name, y='suspend', seg_id='1')

        PSQL.run_sql_command(sql_cmd='drop table if exists sync_table; create table sync_table(a int)')
        # Use pygresql to keep the connection and issue commands seprately.
        # thread2 will wait on sync_table before finish its work, so we
        # can keep the transaction open until the vacuum completes its work.
        conn = pygresql.pg.connect(host=seghost, port=int(segport), opt='-c gp_session_role=utility')
        conn.query('begin')
        conn.query('lock sync_table in access exclusive mode')

        # run vacuum background, it'll be blocked.
        sql_file1, ans_file1, out_file1 = self.get_file_names('conn1')
        psql1 = PSQL(sql_file=sql_file1, out_file=out_file1)
        thread1 = threading.Thread(target=self.run_psql, args=(psql1,))
        thread1.start()

        self.check_fault_triggered(fault_name)

        sql_file2, ans_file2, out_file2 = self.get_file_names('conn2')
        # utility to seg0
        psql2 = PSQL(sql_file=sql_file2, out_file=out_file2,
                     host=seghost, port=segport,
                     PGOPTIONS='-c gp_session_role=utility')
        thread2 = threading.Thread(target=self.run_psql, args=(psql2,))
        thread2.start()

        # resume vacuum
        filereputil.inject_fault(f=fault_name, y='reset', seg_id='1')

        # Once thread1 finishes, we can now release the lock on sync_table,
        # so that thread2 can proceed.
        thread1.join()
        conn.query('commit')
        thread2.join()

        self.assertTrue(Gpdiff.are_files_equal(out_file1, ans_file1))
        self.assertTrue(Gpdiff.are_files_equal(out_file2, ans_file2))
Example #17
0
    def test_ignore_settings_and_rows_in_plans(self):
        """
        Test whether gpdiff automatically ignores Settings: and number of rows in plan diffs.
        Two plans differing only in 'Settings:' and 'number of rows' should be consdiered the same.
        """
        file1 = os.path.join(os.path.dirname(__file__), 'explain_with_settings.out')
        file2 = os.path.join(os.path.dirname(__file__), 'explain_without_settings.out')

        self.assertTrue(Gpdiff.are_files_equal(file1, file2))

        # However other diffs should be considered
        file3 = os.path.join(os.path.dirname(__file__), 'explain_with_optimizer.out')
        self.assertFalse(Gpdiff.are_files_equal(file1, file3))
        self.assertFalse(Gpdiff.are_files_equal(file2, file3))
Example #18
0
    def _run_and_measure_sql_file(self, sql_file, iteration, ans_file=None):
        """
        Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql
        against the test case databse (self.db_name) and verifies the output with the ans file.
        """
        result = True

        self.test_artifacts.append(sql_file)
        out_file = os.path.join(
            self.get_out_dir(),
            os.path.basename(sql_file).replace(".sql",
                                               "_iter_%s.out" % iteration))
        self.test_artifacts.append(out_file)

        PSQL.run_sql_file(sql_file, dbname=self.db_name, out_file=out_file)

        if ans_file is not None:
            self.test_artifacts.append(ans_file)
            result = Gpdiff.are_files_equal(out_file, ans_file)
            if result == False:
                self.test_artifacts.append(out_file.replace('.out', '.diff'))
                self.fail('Diff failed between %s and %s' %
                          (out_file, ans_file))

        return self._get_runtime(out_file)
Example #19
0
 def test_01(self):
     "SPI: plpgsql"
     sql_file = local_path("query01.sql")
     out_file = local_path("query01.out")
     ans_file = local_path("query01.ans")
     PSQL.run_sql_file(sql_file=sql_file, out_file=out_file)
     self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
    def test_13_gp_persistent_relation_node_uaotable_del(self):
        tablename = "uao_table_test13"
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info(
            "test_13 Verify the eof mark in pg_aoseg and gp_persistant_rel_node table for uao relation after delete "
        )
        tinctest.logger.info("-------------------------------\n")
        out_file = os.path.join(self.outpath, "create_tab_gp_persistent_relation_node_uaotable_del_13.out")
        sql_file = os.path.join(self.sqlpath, "create_tab_gp_persistent_relation_node_uaotable_del_13.sql")
        ans_file = os.path.join(self.anspath, "create_tab_gp_persistent_relation_node_uaotable_del_13.ans")

        # create uao table and insert 10 rows
        sql_out = PSQL.run_sql_file(sql_file=sql_file, out_file=out_file)
        assert Gpdiff.are_files_equal(out_file, ans_file)
        # get relid for newly created table
        relid = self.get_relid(file_name=tablename)
        # get utility mode connection info
        utilitymodeinfo = self.get_utilitymode_conn_info(relid=relid)
        u_port = utilitymodeinfo[0]
        u_host = utilitymodeinfo[1]

        # get eof info before delete
        assert self.is_same_eof_uao_on_segment(relid=relid, host=u_host, port=u_port)

        # delete 1 row
        sql_cmd3 = "delete from %s  where i = (select min(i) from %s );" % (tablename, tablename)
        PSQL.run_sql_command_utility_mode(sql_cmd=sql_cmd3, host=u_host, port=u_port, flags="-q -t")
        self.vacuum_full(tablename=tablename)

        # get eof info after delete
        # eof_pg_aoseg_aft=self.get_eofuncompressed_pgaoseg_on_segment(relid=relid,host=u_host,port= u_port)
        # eof_gp_persistant_rel_node_aft=self.get_eof_gp_persistent_relation_node_on_segment(relid=relid,host=u_host,port= u_port)
        # assert (int(eof_pg_aoseg_aft) == (int(eof_gp_persistant_rel_node_aft)))
        assert self.is_same_eof_uao_on_segment(relid=relid, host=u_host, port=u_port)
    def test_21_use_udf_gp_aovisimap_hidden_info_uao_upd_vacuum(self):
        tablename ='uao_table_test11'
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info('test_21 Verify the hidden tup_count using UDF gp_aovisimap_hidden_info(oid)  for uao relation after update_vacuum')
        tinctest.logger.info("-------------------------------\n")
        out_file = os.path.join(self.outpath,'create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.out')
        sql_file = os.path.join(self.sqlpath,'create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.sql')
        ans_file= os.path.join(self.anspath,'create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.ans')

        #create uao table and insert 10 rows
        sql_out=PSQL.run_sql_file(sql_file = sql_file,out_file=out_file)
        assert Gpdiff.are_files_equal(out_file, ans_file)
        #get relid for newly created table
        relid = self.get_relid(file_name=tablename )

        #get relid for newly created table
        relid = self.get_relid(file_name=tablename )
        #get utility mode connection info
        utilitymodeinfo=self.get_utilitymode_conn_info( relid=relid)
        u_port=utilitymodeinfo[0]
        u_host=utilitymodeinfo[1]

        assert(0 == int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)))

        # update  rows
        sql_cmd3="update %s set j = 'test11' ;" % ( tablename)
        PSQL.run_sql_command_utility_mode(sql_cmd= sql_cmd3,host=u_host, port=u_port,flags='-q -t')

        assert(int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)) > 0)

        self.vacuum_full(tablename=tablename)
        assert(0 == int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)))
 def test_06_deletetable_visimap_for_uao_tables(self):
     tinctest.logger.info("-------------------------------")
     tinctest.logger.info(
         'test_06 Verify that the visimap updates with delete row in uao table test run'
     )
     tinctest.logger.info("-------------------------------\n")
     out_file = os.path.join(self.outpath, 'deletetablevisimapinfo_06.out')
     sql_file = os.path.join(self.sqlpath, 'deletetablevisimapinfo_06.sql')
     ans_file = os.path.join(self.anspath, 'deletetablevisimapinfo_06.ans')
     sql_cmd1 = "drop table if exists uao_visimap_test06 ;create table uao_visimap_test06 (i int, j varchar(20), k int ) with (appendonly=true) DISTRIBUTED BY (i);\n"
     sql_out = PSQL.run_sql_command(sql_cmd=sql_cmd1)
     self.assertIsNotNone(re.search('CREATE TABLE', sql_out))
     sql_cmd2 = "\\pset tuples_only\n\\pset footer off\nSELECT relfilenode FROM pg_class WHERE relname='uao_visimap_test06';\n"
     with open(sql_file, 'w') as f:
         f.write(sql_cmd2)
     sql_out = PSQL.run_sql_file(sql_file=sql_file,
                                 out_file=out_file,
                                 flags='-q')
     with open(out_file, 'r') as f:
         relid = f.read()
     aovisimap_cmd = "select * from gp_dist_random('pg_aoseg.pg_aovisimap_%s');\n" % relid.strip(
     )
     sql_cmd3 = "select * from uao_visimap_test06;\n" + aovisimap_cmd + "insert into uao_visimap_test06 select i,'aa'||i,i+10 from generate_series(1,5) as i;\ndelete from uao_visimap_test06 where i=3;\nselect * from uao_visimap_test06;\n" + aovisimap_cmd
     with open(sql_file, 'w') as f:
         f.write(sql_cmd3)
     sql_out = PSQL.run_sql_file(sql_file=sql_file,
                                 out_file=out_file,
                                 flags='-q')
     assert Gpdiff.are_files_equal(out_file, ans_file)
Example #23
0
 def doQuery(self, sqlfile, default=''):
     sql_file = local_path(sqlfile)
     filename_prefix = sqlfile.split('.sql')[0]
     out_file = local_path(filename_prefix + '.out')
     ans_file = local_path(filename_prefix + '.ans')
     PSQL.run_sql_file(sql_file = sql_file, out_file = out_file)
     self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
Example #24
0
    def test_20_use_udf_gp_aovisimap_hidden_info_uaocs_del(self):
        tablename ='uaocs_table_test14'
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info('test_20 Verify the hidden tup_count using UDF gp_aovisimap_hidden_info(oid)  for uaocs relation after delete ')
        tinctest.logger.info("-------------------------------\n")
        out_file = os.path.join(self.outpath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.out')
        sql_file = os.path.join(self.sqlpath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.sql')
        ans_file= os.path.join(self.anspath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.ans')
        #create uaocs table and insert 10 rows
        sql_out=PSQL.run_sql_file(sql_file = sql_file,out_file=out_file)
        assert Gpdiff.are_files_equal(out_file, ans_file)
        #get relid for newly created table
        relid = self.get_relid(file_name=tablename )
        #get utility mode connection info
        utilitymodeinfo=self.get_utilitymode_conn_uaocs_info( relid=relid)
        u_port=utilitymodeinfo[0]
        u_host=utilitymodeinfo[1]

        assert(0 == int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)))

        # delete 1 row
        sql_cmd3="delete from  %s  where i = (select min(i) from %s );" % (tablename, tablename)
        PSQL.run_sql_command_utility_mode(sql_cmd= sql_cmd3,host=u_host, port=u_port,flags='-q -t')

        assert(1 == int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)))

        self.vacuum_full(tablename=tablename)
        assert(0 == int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)))
Example #25
0
    def test_09_call_udf_gp_aovisimap_forupdate(self):
	tablename='uao_visimap_test09'
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info('test_09 Verify the usage of UDF gp_aovisimap in utility mode for update tuple')
        tinctest.logger.info("-------------------------------\n")
        out_file = os.path.join(self.outpath,'create_tab_gp_aovisimap_upd_09.out')
        sql_file = os.path.join(self.sqlpath,'create_tab_gp_aovisimap_upd_09.sql')
        ans_file= os.path.join(self.anspath,'create_tab_gp_aovisimap_upd_09.ans')

        #create uao table and insert 10 rows
        sql_out=PSQL.run_sql_file(sql_file = sql_file,out_file=out_file)
        assert Gpdiff.are_files_equal(out_file, ans_file)
        #get relid for newly created table
        relid = self.get_relid(file_name=tablename)
	utilitymodeinfo=self.get_utilitymode_conn_info( relid=relid)
        u_port=utilitymodeinfo[0]
        u_host=utilitymodeinfo[1]
        #login to segment in utility mode and execute the gp_aovisimap(relid) UDF
        before_tablerowcnt=self.get_rowcnt_table_on_segment(tablename=tablename, host=u_host,port=u_port)
        before_visimaprowcnt=self.get_visimap_cnt_on_segment(relid=relid,host=u_host,port=u_port)
        assert(int(before_visimaprowcnt) == 0)
        sql_cmd="update %s set j = j || '_9';" % (tablename)
        PSQL.run_sql_command_utility_mode(sql_cmd=sql_cmd,host=u_host, port=u_port,flags='-q -t')
        after_visimaprowcnt=self.get_visimap_cnt_on_segment(relid=relid,host=u_host,port=u_port)
        assert(int(after_visimaprowcnt) > 0)
    def test_MPP24237(self):

        cmd_cleanup = "psql -Atc \"select datname from pg_database where datname != \'template0\'\" | while read a; do echo \"check for ${a}\";psql -Atc \"select \'drop schema if exists \' || nspname || \' cascade;\' from (select nspname from pg_namespace where nspname like \'pg_temp%\' union select nspname from gp_dist_random(\'pg_namespace\') where nspname like \'pg_temp%\' except select \'pg_temp_\' || sess_id::varchar from pg_stat_activity) as foo\" ${a}; done"

        res = {'rc':0, 'stderr':'', 'stdout':''}
        run_shell_command(cmd_cleanup, 'do_clean', res)

        if res['rc'] > 0:
            raise Exception("Failed to do cleanup %s" %res[stderr])

        PSQL.run_sql_file(local_path('pre_script.sql'), out_file=local_path('pre_script.out'))
        self.assertTrue(Gpdiff.are_files_equal(local_path('pre_script.out'), local_path('pre_script.ans')))

        cmd = "select count(*) from pg_tables where schemaname like 'pg_temp%';"
        out = PSQL.run_sql_command(cmd, flags ='-q -t')

        if int(out) != 0:
            tinctest.logger.info("temp tables found")
            tinctest.logger.info(PSQL.run_sql_command("select * from pg_tables where schemaname like 'pg_temp%';"))
            self.fail("temp tables were found")
            PSQL.run_sql_file(local_path('clean_script.sql'))

        PSQL.run_sql_file(local_path('clean_script.sql'))

        run_shell_command(cmd_cleanup, 'do_clean', res)
        if res['rc'] > 0:
            raise Exception("Failed to do cleanup %s" %res[stderr])
Example #27
0
 def setUpClass(cls):
     super(AOCSAlterColumnTestCase, cls).setUpClass()
     '''
     Create tables with multiple varblocks and multiple segfiles upfront. They will be used in the tests .
     '''
     base_dir = os.path.dirname(sys.modules[cls.__module__].__file__)
     crtable_name = [
         'create_tabfor_utility_mode', 'create_multivarblock_table',
         'create_multisegfile_table', 'create_large_table'
     ]
     for sname in crtable_name:
         aoco_alter_sql = ''
         aoco_alter_sql = os.path.join(
             os.path.dirname(sys.modules[cls.__module__].__file__),
             "sql") + '/' + sname + '.sql'
         aoco_alter_ans = os.path.join(
             os.path.dirname(sys.modules[cls.__module__].__file__),
             "expected") + '/' + sname + '.ans'
         aoco_alter_out = os.path.join(
             os.path.dirname(sys.modules[cls.__module__].__file__),
             "sql") + '/' + sname + '.out'
         tinctest.logger.info('\n Creating TABLE :  %s' % aoco_alter_sql)
         res = PSQL.run_sql_file(sql_file=aoco_alter_sql,
                                 out_file=aoco_alter_out)
         init_file = os.path.join(base_dir, "sql", 'init_file')
         result = Gpdiff.are_files_equal(aoco_alter_out,
                                         aoco_alter_ans,
                                         match_sub=[init_file])
         errmsg = 'Gpdiff are not equal for file ' + sname
         assert result, errmsg
Example #28
0
 def test_01(self):
     "SPI: plpgsql"
     sql_file = local_path('query01.sql')
     out_file = local_path('query01.out')
     ans_file = local_path('query01.ans')
     PSQL.run_sql_file(sql_file=sql_file, out_file=out_file)
     self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
Example #29
0
    def do_test(self, timeout=0, sqlfile=None, host=None, port=None, username=None, password=None, flags='-a', ans_version=False):
        """
        @summary: Run a test case
        
        @param timeout: Number of seconds to run sql file before timing out
        @param sqlfile: The path to sql file (relative to TEST.py directory)
        @param host: The GPDB master host name to use to connect to database
        @param port: The GPDB port used to make connections to the database
        @param username: The database username to use to connect to the database
        @param password: The password for the database user used to connect to database
        """
        (gpdb_version, build) = self.gpdb.GetGpdbVersion()
        if sqlfile is None:
            testcase = inspect.stack()[1][3]
            filename = testcase.split('test_')[1]
            sql_file = local_path(filename +".sql")
            out_file = local_path(filename + ".out")
            ans_file = local_path(filename + ".ans")
        else:
            sql_file = local_path(sqlfile)
            out_file = local_path(sqlfile.split('.')[0] + '.out')
            ans_file = local_path(sqlfile.split('.')[0] + '.ans')
        if ans_version:
            (gpdb_version, _) = self.gpdb.GetGpdbVersion()
            if gpdb_version.startswith('4.3'):
                ans_file = ans_file+'.4.3'

        init_file = local_path('init_file')
        init_file_list = []
        init_file_list.append(init_file)

        # run psql on file, and check result
        PSQL.run_sql_file(sql_file=sql_file, out_file=out_file, timeout=timeout, host=host, port=port, username=username, password=password,flags=flags)
        self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file, match_sub=init_file_list))
 def setUpClass(cls):
     super(UDFTestCase, cls).setUpClass()
     '''
     Create UDF to exercise subtransactions upfront. They will be used in the tests .
     '''
     base_dir = os.path.dirname(sys.modules[cls.__module__].__file__)
     udf_name = [
         'test_excep', 'test_protocol_allseg', 'setup_sql_exceptionhandling'
     ]
     for uname in udf_name:
         udfsql = ''
         udfsql = os.path.join(
             os.path.dirname(sys.modules[cls.__module__].__file__),
             "sql") + '/' + uname + '.sql'
         udfans = os.path.join(
             os.path.dirname(sys.modules[cls.__module__].__file__),
             "expected") + '/' + uname + '.ans'
         udfout = os.path.join(
             os.path.dirname(sys.modules[cls.__module__].__file__),
             "sql") + '/' + uname + '.out'
         tinctest.logger.info('\n Creating UDF :  %s' % udfsql)
         res = PSQL.run_sql_file(sql_file=udfsql, out_file=udfout)
         init_file = os.path.join(base_dir, "sql", 'init_file')
         result = Gpdiff.are_files_equal(udfout,
                                         udfans,
                                         match_sub=[init_file])
         assert result, 'Gpdiff are not equal'
Example #31
0
 def doQuery(self, sqlfile, default=''):
     sql_file = local_path(sqlfile)
     filename_prefix = sqlfile.split('.sql')[0]
     out_file = local_path(filename_prefix + '.out')
     ans_file = local_path(filename_prefix + '.ans')
     PSQL.run_sql_file(sql_file=sql_file, out_file=out_file)
     self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
Example #32
0
    def run_test(self, data_file, sql_file, out_file, ans_file, table):
        base_dir = os.path.dirname(sys.modules[self.__module__].__file__)
        out_file = os.path.join(base_dir, 'output', out_file)
        ans_file = os.path.join(base_dir, 'expected', ans_file)
        sql_file = os.path.join(base_dir, 'sql', sql_file)

        data_out_file = os.path.join(base_dir, 'output', data_file.strip('.sql') + '.out')
        data_ans_file = os.path.join(base_dir, 'expected', data_file.strip('.sql') + '.ans')
        data_file = os.path.join(base_dir, 'sql', data_file) 

        PSQL.run_sql_file(data_file, out_file=data_out_file)
        self.assertTrue(Gpdiff.are_files_equal(data_out_file, data_ans_file))

        host, port = self._get_host_and_port_for_table(table)
        PSQL.run_sql_file_utility_mode(sql_file, host=host, port=port, out_file=out_file)
        self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
    def test_08_call_udf_gp_aovisimap_fordelete(self):
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info('test_08 Verify the usage of UDF gp_aovisimap in utility mode for deleted tuple')
        tinctest.logger.info("-------------------------------\n")
        out_file = os.path.join(self.outpath,'create_tab_gp_aovisimap_del_08.out')
        sql_file = os.path.join(self.sqlpath,'create_tab_gp_aovisimap_del_08.sql')
        ans_file= os.path.join(self.anspath,'create_tab_gp_aovisimap_del_08.ans')
	tablename='uao_visimap_test08'

	#create uao table and insert 10 rows
        sql_out=PSQL.run_sql_file(sql_file = sql_file,out_file=out_file)
	assert Gpdiff.are_files_equal(out_file, ans_file)
	#get relid for newly created table
        relid = self.get_relid(file_name=tablename )
	#get the segment_id where we'll log in utility mode and then get the hostname and port for this segment
        utilitymodeinfo=self.get_utilitymode_conn_info( relid=relid)
        u_port=utilitymodeinfo[0]
        u_host=utilitymodeinfo[1]

	before_tablerowcnt=self.get_rowcnt_table_on_segment(tablename=tablename, host=u_host,port=u_port)
	before_visimaprowcnt=self.get_visimap_cnt_on_segment(relid=relid,host=u_host,port=u_port)
	assert(int(before_visimaprowcnt) == 0)
	sql_cmd="delete from uao_visimap_test08 ;"
        PSQL.run_sql_command_utility_mode(sql_cmd=sql_cmd,host=u_host, port=u_port,flags='-q -t')	
	after_tablerowcnt=self.get_rowcnt_table_on_segment(tablename=tablename, host=u_host,port=u_port)
	after_visimaprowcnt=self.get_visimap_cnt_on_segment(relid=relid,host=u_host,port=u_port)
	assert(int(after_tablerowcnt) == 0)
Example #34
0
 def test_43_alter_table_with_oid(self):
     '''MPP-13870: Alter table Set Without Oids fails in case of inheritance'''
     sql_file = local_path('alter_table_with_oid.sql')
     out_file = local_path('alter_table_with_oid.out')
     ans_file = local_path('alter_table_with_oid.ans')
     PSQL.run_sql_file(sql_file = sql_file, out_file = out_file)
     self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
    def test_18_gp_persistent_relation_node_uaocs_table_eof_upd(self):
        tablename ='uaocs_table_test14'
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info('test_18 Verify the eof mark in pg_aoseg and gp_persistant_rel_node table for uaocs relation after update ')
        tinctest.logger.info("-------------------------------\n")
        out_file = os.path.join(self.outpath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.out')
        sql_file = os.path.join(self.sqlpath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.sql')
        ans_file= os.path.join(self.anspath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.ans')
	#create uaocs table and insert 10 rows
        sql_out=PSQL.run_sql_file(sql_file = sql_file,out_file=out_file)
        assert Gpdiff.are_files_equal(out_file, ans_file)
        #get relid for newly created table
        relid = self.get_relid(file_name=tablename )
	#get utility mode connection info
        utilitymodeinfo=self.get_utilitymode_conn_uaocs_info( relid=relid)
        u_port=utilitymodeinfo[0]
        u_host=utilitymodeinfo[1]

        assert (self.is_same_eof_uaocs_on_segment(relid=relid,host=u_host,port= u_port))

	# delete 1 row
        sql_cmd3="delete from  %s  where i = (select min(i) from %s );" % (tablename, tablename)
        PSQL.run_sql_command_utility_mode(sql_cmd= sql_cmd3,host=u_host, port=u_port,flags='-q -t')
        self.vacuum_full(tablename=tablename)

        assert (self.is_same_eof_uaocs_on_segment(relid=relid,host=u_host,port= u_port))
Example #36
0
 def validate_sql_file(self, file):
     out_file = file.replace('.sql', '.out')
     ans_file = file.replace('.sql', '.ans')
     if os.path.exists(ans_file):
         assert Gpdiff.are_files_equal(out_file, ans_file)
     else:
         raise Exception("No .ans file exists for %s " % out_file)
    def test_17_gp_persistent_relation_node_uaocs_table_eof_del(self):
        tablename = "uaocs_table_test14"
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info(
            "test_17 Verify the eof mark in pg_aoseg and gp_persistant_rel_node table for uaocs relation after delete "
        )
        tinctest.logger.info("-------------------------------\n")
        out_file = os.path.join(self.outpath, "create_tab_gp_persistent_relation_node_uaocs_table_upd_14.out")
        sql_file = os.path.join(self.sqlpath, "create_tab_gp_persistent_relation_node_uaocs_table_upd_14.sql")
        ans_file = os.path.join(self.anspath, "create_tab_gp_persistent_relation_node_uaocs_table_upd_14.ans")
        # create uaocs table and insert 10 rows
        sql_out = PSQL.run_sql_file(sql_file=sql_file, out_file=out_file)
        assert Gpdiff.are_files_equal(out_file, ans_file)
        # get relid for newly created table
        relid = self.get_relid(file_name=tablename)
        # get utility mode connection info
        utilitymodeinfo = self.get_utilitymode_conn_uaocs_info(relid=relid)
        u_port = utilitymodeinfo[0]
        u_host = utilitymodeinfo[1]
        assert self.is_same_eof_uaocs_on_segment(relid=relid, host=u_host, port=u_port)

        # update 1 row
        sql_cmd3 = "update %s set k = k+ 100 where i = (select min(i) from %s );" % (tablename, tablename)
        PSQL.run_sql_command_utility_mode(sql_cmd=sql_cmd3, host=u_host, port=u_port, flags="-q -t")
        self.vacuum()
        # self.vacuum(tablename=tablename)

        assert self.is_same_eof_uaocs_on_segment(relid=relid, host=u_host, port=u_port)
Example #38
0
 def test_ao_read_check_subtransaction(self):
     sql_file = os.path.join(self.sql_dir, 'sub_transaction.sql') 
     ans_file = os.path.join(self.ans_dir, 'sub_transaction.ans') 
     out_file = os.path.join(self.output_dir, 'sub_transaction.out')
     PSQL.run_sql_file(sql_file=sql_file, out_file=out_file)
     if not Gpdiff.are_files_equal(out_file, ans_file):
         raise Exception('Subtransaction tests failed !')
Example #39
0
 def test_vacuum_appendonly(self):
     out_file = os.path.join(self.output_dir, 'vacuum_ao_co.out')
     ans_file = os.path.join(self.ans_dir, 'vacuum_ao_co.ans')
     sql_file = os.path.join(self.sql_dir, 'vacuum_ao_co.sql')
     PSQL.run_sql_file(sql_file, out_file=out_file)
     if not Gpdiff.are_files_equal(out_file, ans_file):
         raise Exception('Vacuum table failed for append only tables !')
Example #40
0
    def test_08_call_udf_gp_aovisimap_fordelete(self):
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info('test_08 Verify the usage of UDF gp_aovisimap in utility mode for deleted tuple')
        tinctest.logger.info("-------------------------------\n")
        out_file = os.path.join(self.outpath,'create_tab_gp_aovisimap_del_08.out')
        sql_file = os.path.join(self.sqlpath,'create_tab_gp_aovisimap_del_08.sql')
        ans_file= os.path.join(self.anspath,'create_tab_gp_aovisimap_del_08.ans')
	tablename='uao_visimap_test08'

	#create uao table and insert 10 rows
        sql_out=PSQL.run_sql_file(sql_file = sql_file,out_file=out_file)
	assert Gpdiff.are_files_equal(out_file, ans_file)
	#get relid for newly created table
        relid = self.get_relid(file_name=tablename )
	#get the segment_id where we'll log in utility mode and then get the hostname and port for this segment
        utilitymodeinfo=self.get_utilitymode_conn_info( relid=relid)
        u_port=utilitymodeinfo[0]
        u_host=utilitymodeinfo[1]

	before_tablerowcnt=self.get_rowcnt_table_on_segment(tablename=tablename, host=u_host,port=u_port)
	before_visimaprowcnt=self.get_visimap_cnt_on_segment(relid=relid,host=u_host,port=u_port)
	assert(int(before_visimaprowcnt) == 0)
	sql_cmd="delete from uao_visimap_test08 ;"
        PSQL.run_sql_command_utility_mode(sql_cmd=sql_cmd,host=u_host, port=u_port,flags='-q -t')	
	after_tablerowcnt=self.get_rowcnt_table_on_segment(tablename=tablename, host=u_host,port=u_port)
	after_visimaprowcnt=self.get_visimap_cnt_on_segment(relid=relid,host=u_host,port=u_port)
	assert(int(after_tablerowcnt) == 0)
Example #41
0
    def do_test(self, timeout=0, sqlfile=None, host=None, port=None, username=None, password=None, flags='-a', usetemplate=False):
        """
        @summary: Run a PostGIS test case
        
        @param timeout: Number of seconds to run sql file before timing out
        @param sqlfile: The path to sql file (relative to TEST.py directory)
        @param host: The GPDB master host name to use to connect to database
        @param port: The GPDB port used to make connections to the database
        @param username: The database username to use to connect to the database
        @param password: The password for the database user used to connect to database
        """
        if sqlfile is None:
            testcase = inspect.stack()[1][3].split('test_')[1]
            
            #file = mkpath(testcase +".sql")
            file = local_path(testcase +".sql")
        else:
            #file = mkpath(sqlfile)
            file = local_path(sqlfile)
        # run psql on file, and check result
        #psql.runfile(file,timeout=timeout,host=host,port=port,username=username,password=password,flag=flags)
        #self.checkResult(ifile=file, optionalFlags=" -B")

        out_file = local_path(testcase + ".out")
        ans_file = local_path(testcase +".ans")
        PSQL.run_sql_file(sql_file = file, out_file = out_file)
        self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
Example #42
0
    def test_18_gp_persistent_relation_node_uaocs_table_eof_upd(self):
        tablename ='uaocs_table_test14'
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info('test_18 Verify the eof mark in pg_aoseg and gp_persistant_rel_node table for uaocs relation after update ')
        tinctest.logger.info("-------------------------------\n")
        out_file = os.path.join(self.outpath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.out')
        sql_file = os.path.join(self.sqlpath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.sql')
        ans_file= os.path.join(self.anspath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.ans')
	#create uaocs table and insert 10 rows
        sql_out=PSQL.run_sql_file(sql_file = sql_file,out_file=out_file)
        assert Gpdiff.are_files_equal(out_file, ans_file)
        #get relid for newly created table
        relid = self.get_relid(file_name=tablename )
	#get utility mode connection info
        utilitymodeinfo=self.get_utilitymode_conn_uaocs_info( relid=relid)
        u_port=utilitymodeinfo[0]
        u_host=utilitymodeinfo[1]

        assert (self.is_same_eof_uaocs_on_segment(relid=relid,host=u_host,port= u_port))

	# delete 1 row
        sql_cmd3="delete from  %s  where i = (select min(i) from %s );" % (tablename, tablename)
        PSQL.run_sql_command_utility_mode(sql_cmd= sql_cmd3,host=u_host, port=u_port,flags='-q -t')
        self.vacuum_full(tablename=tablename)

        assert (self.is_same_eof_uaocs_on_segment(relid=relid,host=u_host,port= u_port))
    def verify_out_file(self, out_file, ans_file):
        """
        The ans file might be replaced by a customized ans
        file.
        """
        def check_valid_suffix(suffix):
            if not re.match("[a-zA-Z0-9]+", suffix):
                raise Exception("Invalid ans file suffix %s" % suffix)

        # Modify the ans file based on the suffix
        suffix = self.get_ans_suffix()
        if suffix:
            check_valid_suffix(suffix)
            new_ans_file = ans_file[:-4] + "_" + suffix + ".ans"
            if os.path.exists(new_ans_file):
                tinctest.logger.debug(
                    "Using customized ans file %s for this test" %
                    new_ans_file)
                ans_file = new_ans_file

        if ans_file is not None:
            self._transform_output_file(out_file)

            self.test_artifacts.append(ans_file)
            # Check if an init file exists in the same location as the sql file
            init_files = []
            init_file_path = os.path.join(self.get_sql_dir(), 'init_file')
            if os.path.exists(init_file_path):
                init_files.append(init_file_path)
            result = Gpdiff.are_files_equal(out_file,
                                            ans_file,
                                            match_sub=init_files)
            if result == False:
                self.test_artifacts.append(out_file.replace('.out', '.diff'))
        return result
Example #44
0
    def test_21_use_udf_gp_aovisimap_hidden_info_uao_upd_vacuum(self):
        tablename ='uao_table_test11'
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info('test_21 Verify the hidden tup_count using UDF gp_aovisimap_hidden_info(oid)  for uao relation after update_vacuum')
        tinctest.logger.info("-------------------------------\n")
        out_file = os.path.join(self.outpath,'create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.out')
        sql_file = os.path.join(self.sqlpath,'create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.sql')
        ans_file= os.path.join(self.anspath,'create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.ans')

        #create uao table and insert 10 rows
        sql_out=PSQL.run_sql_file(sql_file = sql_file,out_file=out_file)
        assert Gpdiff.are_files_equal(out_file, ans_file)
        #get relid for newly created table
        relid = self.get_relid(file_name=tablename )

        #get relid for newly created table
        relid = self.get_relid(file_name=tablename )
        #get utility mode connection info
        utilitymodeinfo=self.get_utilitymode_conn_info( relid=relid)
        u_port=utilitymodeinfo[0]
        u_host=utilitymodeinfo[1]

        assert(0 == int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)))

        # update  rows
        sql_cmd3="update %s set j = 'test11' ;" % ( tablename)
        PSQL.run_sql_command_utility_mode(sql_cmd= sql_cmd3,host=u_host, port=u_port,flags='-q -t')

        assert(int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)) > 0)

        self.vacuum_full(tablename=tablename)
        assert(0 == int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)))
Example #45
0
 def validate_sql(self, ans_file, out_file):
     ''' Compare the out and ans files '''
     init_file = os.path.join(self.base_dir, "sql", 'init_file')
     result1 = Gpdiff.are_files_equal(out_file,
                                      ans_file,
                                      match_sub=[init_file])
     self.assertTrue(result1, 'Gpdiff.are_files_equal')
    def test_09_call_udf_gp_aovisimap_forupdate(self):
	tablename='uao_visimap_test09'
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info('test_09 Verify the usage of UDF gp_aovisimap in utility mode for update tuple')
        tinctest.logger.info("-------------------------------\n")
        out_file = os.path.join(self.outpath,'create_tab_gp_aovisimap_upd_09.out')
        sql_file = os.path.join(self.sqlpath,'create_tab_gp_aovisimap_upd_09.sql')
        ans_file= os.path.join(self.anspath,'create_tab_gp_aovisimap_upd_09.ans')

        #create uao table and insert 10 rows
        sql_out=PSQL.run_sql_file(sql_file = sql_file,out_file=out_file)
        assert Gpdiff.are_files_equal(out_file, ans_file)
        #get relid for newly created table
        relid = self.get_relid(file_name=tablename)
	utilitymodeinfo=self.get_utilitymode_conn_info( relid=relid)
        u_port=utilitymodeinfo[0]
        u_host=utilitymodeinfo[1]
        #login to segment in utility mode and execute the gp_aovisimap(relid) UDF
        before_tablerowcnt=self.get_rowcnt_table_on_segment(tablename=tablename, host=u_host,port=u_port)
        before_visimaprowcnt=self.get_visimap_cnt_on_segment(relid=relid,host=u_host,port=u_port)
        assert(int(before_visimaprowcnt) == 0)
        sql_cmd="update %s set j = j || '_9';" % (tablename)
        PSQL.run_sql_command_utility_mode(sql_cmd=sql_cmd,host=u_host, port=u_port,flags='-q -t')
        after_visimaprowcnt=self.get_visimap_cnt_on_segment(relid=relid,host=u_host,port=u_port)
        assert(int(after_visimaprowcnt) > 0)
Example #47
0
 def test_vacuum_appendonly(self):
     out_file = os.path.join(self.output_dir, 'vacuum_ao_co.out')
     ans_file = os.path.join(self.ans_dir, 'vacuum_ao_co.ans')
     sql_file = os.path.join(self.sql_dir, 'vacuum_ao_co.sql')
     PSQL.run_sql_file(sql_file, out_file=out_file)
     if not Gpdiff.are_files_equal(out_file, ans_file):
         raise Exception('Vacuum table failed for append only tables !')
    def test_20_use_udf_gp_aovisimap_hidden_info_uaocs_del(self):
        tablename ='uaocs_table_test14'
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info('test_20 Verify the hidden tup_count using UDF gp_aovisimap_hidden_info(oid)  for uaocs relation after delete ')
        tinctest.logger.info("-------------------------------\n")
        out_file = os.path.join(self.outpath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.out')
        sql_file = os.path.join(self.sqlpath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.sql')
        ans_file= os.path.join(self.anspath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.ans')
        #create uaocs table and insert 10 rows
        sql_out=PSQL.run_sql_file(sql_file = sql_file,out_file=out_file)
        assert Gpdiff.are_files_equal(out_file, ans_file)
        #get relid for newly created table
        relid = self.get_relid(file_name=tablename )
        #get utility mode connection info
        utilitymodeinfo=self.get_utilitymode_conn_uaocs_info( relid=relid)
        u_port=utilitymodeinfo[0]
        u_host=utilitymodeinfo[1]

        assert(0 == int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)))

        # delete 1 row
        sql_cmd3="delete from  %s  where i = (select min(i) from %s );" % (tablename, tablename)
        PSQL.run_sql_command_utility_mode(sql_cmd= sql_cmd3,host=u_host, port=u_port,flags='-q -t')

        assert(1 == int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)))

        self.vacuum_full(tablename=tablename)
        assert(0 == int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)))
Example #49
0
 def validate_sql_file(self, file):
     out_file = file.replace('.sql', '.out')
     ans_file = file.replace('.sql', '.ans')
     if os.path.exists(ans_file):
         assert Gpdiff.are_files_equal(out_file, ans_file)
     else:
         raise Exception("No .ans file exists for %s " % out_file)
Example #50
0
    def do_test(self,
                timeout=0,
                sqlfile=None,
                host=None,
                port=None,
                username=None,
                password=None,
                flags='-a',
                usetemplate=False):
        """
        @summary: Run a PostGIS test case
        
        @param timeout: Number of seconds to run sql file before timing out
        @param sqlfile: The path to sql file (relative to TEST.py directory)
        @param host: The GPDB master host name to use to connect to database
        @param port: The GPDB port used to make connections to the database
        @param username: The database username to use to connect to the database
        @param password: The password for the database user used to connect to database
        """
        if sqlfile is None:
            testcase = inspect.stack()[1][3].split('test_')[1]

            #file = mkpath(testcase +".sql")
            file = local_path(testcase + ".sql")
        else:
            #file = mkpath(sqlfile)
            file = local_path(sqlfile)
        # run psql on file, and check result
        #psql.runfile(file,timeout=timeout,host=host,port=port,username=username,password=password,flag=flags)
        #self.checkResult(ifile=file, optionalFlags=" -B")

        out_file = local_path(testcase + ".out")
        ans_file = local_path(testcase + ".ans")
        PSQL.run_sql_file(sql_file=file, out_file=out_file)
        self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
    def test_11_tupcount_in_pg_aoseg_uaotable_upd(self):
        tablename = "uao_table_test11"
        tinctest.logger.info("-------------------------------")
        tinctest.logger.info("test_11 Verify the tupcount update to pg_aoseg wile updating uao table ")
        tinctest.logger.info("-------------------------------\n")
        out_file = os.path.join(self.outpath, "create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.out")
        sql_file = os.path.join(self.sqlpath, "create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.sql")
        ans_file = os.path.join(self.anspath, "create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.ans")

        # create uao table and insert 10 rows
        sql_out = PSQL.run_sql_file(sql_file=sql_file, out_file=out_file)
        assert Gpdiff.are_files_equal(out_file, ans_file)
        # get relid for newly created table
        relid = self.get_relid(file_name=tablename)

        # get the count from pg_aoseg before the update is performed
        pgaoseg_tupcount = self.get_tupcount_pgaoseg(relid=relid)
        tab_rowcount_bfrupd = self.get_rowcnt_table(tablename=tablename)

        sql_cmd3 = "update %s set j=j||'test11' where i = 1;" % tablename
        PSQL.run_sql_command(sql_cmd=sql_cmd3, flags="-q -t")

        # get the count from pg_aoseg after  the update is performed
        pgaoseg_tupcount_aftupd = self.get_tupcount_pgaoseg(relid=relid)
        tab_rowcount_aftupd = self.get_rowcnt_table(tablename=tablename)

        assert int(pgaoseg_tupcount_aftupd) == (int(tab_rowcount_aftupd)) + 1
        assert int(tab_rowcount_bfrupd) == (int(tab_rowcount_aftupd))
        assert int(pgaoseg_tupcount_aftupd) == (int(pgaoseg_tupcount)) + 1
        # vacuum
        self.vacuum_full(tablename=tablename)

        pgaoseg_tupcount_aftvacuum = self.get_tupcount_pgaoseg(relid=relid)
        tab_rowcount_aftvacuum = self.get_rowcnt_table(tablename=tablename)
        assert int(pgaoseg_tupcount_aftvacuum) == (int(tab_rowcount_aftvacuum))
Example #52
0
    def verify_out_file(self, out_file, ans_file):
        """
        The ans file might be replaced by a customized ans
        file.
        """

        def check_valid_suffix(suffix):
            if not re.match("[a-zA-Z0-9]+", suffix):
                raise Exception("Invalid ans file suffix %s" % suffix)

        # Modify the ans file based on the suffix
        suffix = self.get_ans_suffix()
        if suffix:
            check_valid_suffix(suffix)
            new_ans_file = ans_file[:-4] + "_" + suffix + ".ans"
            if os.path.exists(new_ans_file):
                tinctest.logger.debug("Using customized ans file %s for this test" % new_ans_file)
                ans_file = new_ans_file

        if ans_file is not None:
            self._transform_output_file(out_file)

            self.test_artifacts.append(ans_file)
            # Check if an init file exists in the same location as the sql file
            init_files = []
            init_file_path = os.path.join(self.get_sql_dir(), "init_file")
            if os.path.exists(init_file_path):
                init_files.append(init_file_path)
            result = Gpdiff.are_files_equal(out_file, ans_file, match_sub=init_files)
            if result == False:
                self.test_artifacts.append(out_file.replace(".out", ".diff"))
        return result
Example #53
0
 def test_ao_read_check_subtransaction(self):
     sql_file = os.path.join(self.sql_dir, 'sub_transaction.sql')
     ans_file = os.path.join(self.ans_dir, 'sub_transaction.ans')
     out_file = os.path.join(self.output_dir, 'sub_transaction.out')
     PSQL.run_sql_file(sql_file=sql_file, out_file=out_file)
     if not Gpdiff.are_files_equal(out_file, ans_file):
         raise Exception('Subtransaction tests failed !')