コード例 #1
0
    def template0_wrap_around(self):
        """
        Raise next xid so that age(template0) suffers a wrap around and
        becomes negative.  Create a new database off template0, which
        also suffers wrap around.  Reset the new db's age.  Sanity
        must succeed on the new db.

        """
        self._raise_template0_age(self.WRAP_LIMIT, self.gparray.master)
        PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
            validateAfter=True)
        sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'"
        dburl = dbconn.DbURL()
        with dbconn.connect(dburl, utility=True) as conn:
            age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
        # Xid wrap-around should cause template0 and newdb's age to be negative.
        self.assertTrue(age_newdb < 0)
        # All xids in newdb are frozen at this point.  Therefore, we
        # can reset its age so that it is not negative.
        self._reset_age("newdb")
        with dbconn.connect(dburl, utility=True) as conn:
            age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
        self.assertTrue(age_newdb > 0)
        # Verify that normal operations can be performed on newdb post recovery
        # from wraparound.
        self._basic_sanity_check("clean", {"dbname":"newdb"})
        logger.info("Sanity succeeded on newdb, dropping it.")
        PSQL.drop_database(dbname="newdb")
コード例 #2
0
    def test_autovacuum_signaling_on_segment(self):
        """
        Same as above, but on a segment.
        """
        # connect to the master to build gparray
        primary, _ = self._get_primary_mirror_pair()
        logger.info('Isolated segment %d at %s:%d' % (primary.dbid, primary.hostname, primary.port))
        dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port)

        with dbconn.connect(dburl, utility=True) as conn:
            oldest_xid = int(dbconn.execSQLForSingleton(conn, 'select get_oldest_xid()'))
            autovacuum_freeze_max_age = int(dbconn.execSQLForSingleton(conn, 'show autovacuum_freeze_max_age'))
            autovacuum_xid_limit = xid_sum(oldest_xid, autovacuum_freeze_max_age)
            logger.info('Raising segment xid to autovacuum_xid_limit %d' % autovacuum_xid_limit)
            dbconn.execSQLForSingleton(conn, "select spoof_next_xid('%d'::xid)" % autovacuum_xid_limit)

        # A new connection to the postmaster, at this point, will ensure that we roll through
        # the ServerLoop and potentially fork an autovacuum process... if enabled.
        with dbconn.connect(dburl, utility=True) as conn:
            self.assertEqual(1, int(dbconn.execSQLForSingleton(conn, 'select 1')))

        cmd = Command('check for autovacuum',
                      'ssh %s ps -ef | grep -v grep | grep postgres | grep autovacuum' % primary.hostname)
        cmd.run()
        self.assertEqual(cmd.get_results().stdout, "", "Seriously? Found a postgres autovacuum process!")

        self._basic_sanity_check('clean')
コード例 #3
0
 def template0_wrap_around_on_segment(self, primary):
     """
     Same as template0_wrap_around, but on segment.
     """
     logger.info("template0_wrap_around_on_segment: dbid(%d) %s:%d'" %
                 (primary.dbid, primary.hostname, primary.port))
     self._raise_template0_age(self.WRAP_LIMIT, primary)
     PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
         validateAfter=True)
     sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'"
     # Verify that age of newdb on the segment is negative.
     dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port)
     with dbconn.connect(dburl, utility=True) as conn:
         age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
     self.assertTrue(age_newdb < 0)
     # Reset newdb age so as to recover from wrap around.
     self._reset_age("newdb", primary)
     # Verify that normal operations can be performed on newdb whose age was
     # reset to a correct value.
     self._basic_sanity_check("clean", {"dbname":"newdb"})
     # Verify that age of newdb on the segment is valid.
     with dbconn.connect(dburl, utility=True) as conn:
         age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
     self.assertTrue(age_newdb > 0)
     PSQL.drop_database(dbname="newdb")
コード例 #4
0
    def test_autovacuum_signaling(self):
        """
        Raise the nextXid to oldest_frozenxid + autovacuum_freeze_max_age.
        Run a transaction.
        Ensure that no autovacuum daemon is started.
        """
        dburl = dbconn.DbURL()
        with dbconn.connect(dburl) as conn:
            oldest_xid = int(dbconn.execSQLForSingleton(conn, 'select get_oldest_xid()'))
            autovacuum_freeze_max_age = int(dbconn.execSQLForSingleton(conn, 'show autovacuum_freeze_max_age'))
            autovacuum_xid_limit = xid_sum(oldest_xid, autovacuum_freeze_max_age)
            logger.info('Raising master xid to autovacuum_xid_limit %d' % autovacuum_xid_limit)
            dbconn.execSQLForSingleton(conn, "select spoof_next_xid('%d'::xid)" % autovacuum_xid_limit)

        # A new connection to the postmaster, at this point, will ensure that we roll through
        # the ServerLoop and potentially fork an autovacuum process... if enabled.
        # Burn a transaction to trigger any undesirable behavior that we're disabling.
        with dbconn.connect(dburl) as conn:
            self.assertEqual(1, int(dbconn.execSQLForSingleton(conn, 'select 1')))

        cmd = Command('check for autovacuum',
                      'ps -ef | grep -v grep | grep postgres | grep autovacuum')
        cmd.run()
        self.assertEqual(cmd.get_results().stdout, "", "Seriously? Found a postgres autovacuum process!")

        self._basic_sanity_check('clean')
コード例 #5
0
ファイル: environment.py プロジェクト: davecramer/gpdb
def before_feature(context, feature):
    drop_database_if_exists(context, 'testdb')
    drop_database_if_exists(context, 'bkdb')
    drop_database_if_exists(context, 'fullbkdb')
    drop_database_if_exists(context, 'schematestdb')

    # enable postgis when running test cases for postgis
    if 'postgis' in feature.tags:
        drop_database_if_exists(context, 'opengeo')
        create_database(context, 'opengeo')
        enable_postgis_and_load_test_data(context)

    if 'postgis_14' in feature.tags:
        drop_database_if_exists(context, 'opengeo')
        create_database(context, 'opengeo')
        enable_postgis_and_load_test_data_for_postgis_1(context)

    if 'gpperfmon' in feature.tags:
        drop_database_if_exists(context, 'gpperfmon')
        pgport = os.getenv('PGPORT', 5432)
        command = "gpperfmon_install --enable --password changeme --port %s" % pgport
        run_gpcommand(context, command)
        run_gpcommand(context, "gpstop -ar")

    if 'analyzedb' in feature.tags:
        start_database_if_not_started(context)
        drop_database_if_exists(context, 'incr_analyze')
        create_database(context, 'incr_analyze')
        drop_database_if_exists(context, 'incr_analyze_2')
        create_database(context, 'incr_analyze_2')
        context.conn = dbconn.connect(dbconn.DbURL(dbname='incr_analyze'))
        context.dbname = 'incr_analyze'

        # setting up the tables that will be used
        context.execute_steps(u"""
        Given there is a regular "ao" table "t1_ao" with column name list "x,y,z" and column type list "int,text,real" in schema "public"
        And there is a regular "heap" table "t2_heap" with column name list "x,y,z" and column type list "int,text,real" in schema "public"
        And there is a regular "ao" table "t3_ao" with column name list "a,b,c" and column type list "int,text,real" in schema "public"
        And there is a hard coded ao partition table "sales" with 4 child partitions in schema "public"
        """)

    if 'minirepro' in feature.tags:
        start_database_if_not_started(context)
        minirepro_db = 'minireprodb'
        drop_database_if_exists(context, minirepro_db)
        create_database(context, minirepro_db)
        context.conn = dbconn.connect(dbconn.DbURL(dbname=minirepro_db))
        context.dbname = minirepro_db
        dbconn.execSQL(context.conn, 'create table t1(a integer, b integer)')
        dbconn.execSQL(context.conn, 'create table t2(c integer, d integer)')
        dbconn.execSQL(context.conn, 'create table t3(e integer, f integer)')
        dbconn.execSQL(context.conn, 'create view v1 as select a, b from t1, t3 where t1.a=t3.e')
        dbconn.execSQL(context.conn, 'create view v2 as select c, d from t2, t3 where t2.c=t3.f')
        dbconn.execSQL(context.conn, 'create view v3 as select a, d from v1, v2 where v1.a=v2.c')
        dbconn.execSQL(context.conn, 'insert into t1 values(1, 2)')
        dbconn.execSQL(context.conn, 'insert into t2 values(1, 3)')
        dbconn.execSQL(context.conn, 'insert into t3 values(1, 4)')
        context.conn.commit()
コード例 #6
0
ファイル: gplog.py プロジェクト: 50wu/gpdb
 def _test_connection(host='localhost',port=_DEFAULT_PORT, user=_DEFAULT_USER,
                      dbname=_DEFAULT_USER):
     try:
         connect(DbURL(hostname=host,
                       port=port,
                       dbname=dbname,
                       username=user))
     except Exception, expt:
         tinctest.logger.error("Failed to connect to hostname %s, port %s, database %s, as user %s"
                               % (host, port, dbname, user))
         tinctest.logger.exception(expt)
         return False
コード例 #7
0
 def template0_stop_limit_on_segment(self, primary):
     """
     Same as template0_stop_limit, but on segment.
     """
     logger.info("template0_stop_limit_on_segment: dbid(%d) %s:%d'" %
                 (primary.dbid, primary.hostname, primary.port))
     dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port)
     with dbconn.connect(dburl, utility=True) as conn:
         sql = "SHOW xid_stop_limit"
         slimit_guc = int(dbconn.execSQLForSingleton(conn, sql))
     new_limit = xid_sum(slimit_guc, -(10**6))
     # Raise nextXid so that template0 age would cross stop limit.
     self._raise_template0_age(self.STOP_LIMIT, primary)
     # newdb's age crosses stop limit and GPDB stops accepting commands.
     PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
         validateAfter=True)
     logger.info("newdb created off template0")
     # Ensure that utility connections to the segment fail with error.
     psql_args = {"PGOPTIONS":"-c 'gp_session_role=utility'",
                  "host":primary.hostname,
                  "port":primary.port}
     self._basic_sanity_check("error", psql_args)
     logger.info("Utility connection to dbid(%d) reported stop limit "
                 "error, as expected." % primary.dbid)
     try:
         # Verify that SQL commands from master fail.
         PSQL(sql_cmd="CREATE TABLE test (a int, b int)").run(
             validateAfter=True)
         self.fail("CREATE TABLE succeeded from master, when expecting "
                   "stop limit error on segment.")
     except ExecutionError:
         logger.info("CREATE TABLE failed from master, as expected.")
     # Reduce xid_stop_limit as per the standard procedure.
     self._reduce_stop_limit_guc(primary, new_limit)
     # Vacuum freezing newdb should be suffice to recover.
     PSQL(sql_cmd="VACUUM FREEZE", dbname="newdb",
          out_file="vacuum_newdb_wl.out").run(validateAfter=True)
     # Ensure that utility connections to the segment are successful.
     sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'"
     with dbconn.connect(dburl, utility=True) as conn:
         age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
     self.assertTrue(age_newdb > 0)
     # Verify SQL commands from master are successful.
     self._basic_sanity_check("clean")
     self._restore_stop_limit_guc(primary.datadir)
     # Verify SQL commands after restoring xid_stop_limit GUC.
     self._basic_sanity_check("clean")
     PSQL.drop_database(dbname="newdb")
コード例 #8
0
ファイル: dump.py プロジェクト: PivotalBigData/incubator-hawq
    def execute(self):
        needed_space = 0
        dburl = dbconn.DbURL(dbname=self.dump_database, port=self.segport)
        conn = None
        try:
            conn = dbconn.connect(dburl, utility=True)
            if self.include_dump_tables:
                for dump_table in self.include_dump_tables:
                    needed_space += execSQLForSingleton(conn, "SELECT pg_relation_size('%s')/1024;" % dump_table)
            else:
                needed_space = execSQLForSingleton(conn, "SELECT pg_database_size('%s')/1024;" % self.dump_database)
        finally:
            if conn is not None:
                conn.close()
        if self.compress:
            needed_space = needed_space / COMPRESSION_FACTOR
        
        # get free available space
        stat_res = os.statvfs(self.datadir);
        free_space = (stat_res.f_bavail * stat_res.f_frsize) / 1024

        if free_space == 0 or (free_space - needed_space) / free_space < self.free_space_percent / 100:
            logger.error("Disk space: [Need: %dK, Free %dK]" % (needed_space, free_space))
            raise NotEnoughDiskSpace(free_space, needed_space)
        logger.info("Disk space: [Need: %dK, Free %dK]" % (needed_space, free_space))
コード例 #9
0
    def template0_stop_limit(self):
        """
        Raise next xid so that age(template0) grows beyond stop limit.
        Create a new database off template0, let GPDB stop accepting
        commands.  Recover GPDB using the documented proceudure.
        Ensure that the new database is sane.

        """
        dburl = dbconn.DbURL()
        with dbconn.connect(dburl, utility=True) as conn:
            sql = "SHOW xid_stop_limit"
            slimit_guc = int(dbconn.execSQLForSingleton(conn, sql))
        new_limit = xid_sum(slimit_guc, -(10**6))
        # Raise nextXid so that template0 age would cross stop limit.
        self._raise_template0_age(self.STOP_LIMIT, self.gparray.master)
        # newdb's age crosses stop limit and GPDB stops accepting commands.
        PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
            validateAfter=True)
        self._basic_sanity_check("error")
        # Reduce xid_stop_limit as per the standard procedure.
        self._reduce_stop_limit_guc(self.gparray.master, new_limit)
        # Vacuum freezing newdb should be suffice to recover.
        PSQL(sql_cmd="VACUUM FREEZE",
             dbname="newdb",
             out_file="vacuum_newdb_stop_master.out").run(validateAfter=True)
        self._basic_sanity_check("clean")
        PSQL.drop_database(dbname="newdb")
        self._restore_stop_limit_guc(self.gparray.master.datadir)
コード例 #10
0
ファイル: utils.py プロジェクト: pf-qiu/gpdb
def create_int_table(context, table_name, table_type='heap', dbname='testdb'):
    CREATE_TABLE_SQL = None
    NROW = 1000

    table_type = table_type.upper()
    if table_type == 'AO':
        CREATE_TABLE_SQL = 'create table %s WITH(APPENDONLY=TRUE) as select generate_series(1,%d) as c1' % (
        table_name, NROW)
    elif table_type == 'CO':
        CREATE_TABLE_SQL = 'create table %s WITH(APPENDONLY=TRUE, orientation=column) as select generate_series(1, %d) as c1' % (
        table_name, NROW)
    elif table_type == 'HEAP':
        CREATE_TABLE_SQL = 'create table %s as select generate_series(1, %d) as c1' % (table_name, NROW)

    if CREATE_TABLE_SQL is None:
        raise Exception('Invalid table type specified')

    SELECT_TABLE_SQL = 'select count(*) from %s' % table_name
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        dbconn.execSQL(conn, CREATE_TABLE_SQL)
        conn.commit()

        result = dbconn.execSQLForSingleton(conn, SELECT_TABLE_SQL)
        if result != NROW:
            raise Exception('Integer table creation was not successful. Expected %d does not match %d' % (NROW, result))
コード例 #11
0
ファイル: reload.py プロジェクト: 50wu/gpdb
    def validate_mid_level_partitions(self, schema_name, table_name):
        partition_level, max_level = None, None
        with dbconn.connect(dbconn.DbURL(dbname=self.database, port=self.port)) as conn:
            parent_schema, parent_table = self.parent_partition_map[(schema_name, table_name)]
            if (parent_schema, parent_table) == (schema_name, table_name):
                return
            try:
                max_level = dbconn.execSQLForSingleton(conn, 
                                                   """SELECT max(partitionlevel)
                                                      FROM pg_partitions
                                                      WHERE tablename='%s'
                                                      AND schemaname='%s'
                                                   """ % (parent_table, parent_schema))
            except Exception as e:
                logger.debug('Unable to get the maximum partition level for table %s: (%s)' % (table_name, str(e)))

            try:
                partition_level = dbconn.execSQLForSingleton(conn,
                                                         """SELECT partitionlevel
                                                            FROM pg_partitions
                                                            WHERE partitiontablename='%s'
                                                            AND partitionschemaname='%s'
                                                         """ % (table_name, schema_name))
            except Exception as e:
                logger.debug('Unable to get the partition level for table %s: (%s)' % (table_name, str(e)))

            if partition_level != max_level:
                logger.error('Partition level of the table = %s, Max partition level = %s' % (partition_level, max_level))
                raise Exception('Mid Level partition %s.%s is not supported by gpreload. Please specify only leaf partitions or parent table name' % (schema_name, table_name))
コード例 #12
0
ファイル: utils.py プロジェクト: pf-qiu/gpdb
def check_count_for_specific_query(dbname, query, nrows):
    NUM_ROWS_QUERY = '%s' % query
    # We want to bubble up the exception so that if table does not exist, the test fails
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        result = dbconn.execSQLForSingleton(conn, NUM_ROWS_QUERY)
    if result != nrows:
        raise Exception('%d rows in query: %s. Expected row count = %d' % (result, query, nrows))
コード例 #13
0
ファイル: test_cluster_dbconn.py プロジェクト: adam8157/gpdb
    def test_client_encoding_can_be_set(self):
        encoding = 'SQL_ASCII'

        with dbconn.connect(self.url, encoding=encoding) as conn:
            actual = dbconn.execSQLForSingleton(conn, 'SHOW client_encoding')

        self.assertEqual(actual, encoding)
コード例 #14
0
    def _reduce_stop_limit_guc(self, segdb, new_slimit):
        """
        Reduce the xid_stop_limit GUC by the specified value.

        @param datadir: PGDATA directory containing postgresql.conf that needs
        to be modified.

        @param new_slimit: New value of xid_stop_limit GUC, less than the
        default value of 10**9.
        """
        for seg in self.gparray.getDbList(True):
           logger.info("Stopping segment %d at %s" % (seg.dbid, seg.datadir))
           cmd = "pg_ctl -D %s stop" % seg.datadir
           Command("stop segment", cmd).run(validateAfter=True)
        logger.info("New xid_stop_limit: %s" % new_slimit)
        cmd = ('echo "xid_stop_limit=%d" >> %s/postgresql.conf' %
               (new_slimit, segdb.datadir))
        Command("revise xid_stop_limit", cmd).run(validateAfter=True)
        logger.info("Starting the cluster")
        cmd = "source $GPHOME/greenplum_path.sh && gpstart -a"
        Command("start cluster", cmd).run(validateAfter=True)
        dburl = dbconn.DbURL(hostname=segdb.hostname, port=segdb.port)
        with dbconn.connect(dburl, utility=True) as conn:
            stop_limit = int(
                dbconn.execSQLForSingleton(conn, "SHOW xid_stop_limit"))
        self.assertEqual(stop_limit, new_slimit, "Failed to set xid_stop_limit")
コード例 #15
0
def connectToDb(hawq_master_host, hawq_master_port, database):
   'connect to database'
   url = dbconn.DbURL(hawq_master_host
                     ,port = hawq_master_port
				     ,dbname = database
                     )
   return dbconn.connect(dburl   = url)
コード例 #16
0
ファイル: icg.py プロジェクト: PengJi/gpdb-comments
    def count_wal_sender(self):
        sql = "SELECT count(*) FROM pg_stat_replication"
        with dbconn.connect(dbconn.DbURL()) as conn:
            curs = dbconn.execSQL(conn, sql)
            results = curs.fetchall()

        return results[0][0]
コード例 #17
0
def _get_segment_version(seg):
    try:
        if seg.role == gparray.ROLE_PRIMARY:
            dburl = dbconn.DbURL(hostname=seg.hostname, port=seg.port, dbname="template1")
            conn = dbconn.connect(dburl, utility=True)
            return dbconn.execSQLForSingleton(conn, "select version()")

        if seg.role == gparray.ROLE_MIRROR:
            cmd = base.Command("Try connecting to mirror",
                               "psql -h %s -p %s template1 -c 'select 1'"
                               %(seg.hostname, seg.port))
            cmd.run(validateAfter=False)
            if cmd.results.rc == 0:
                raise RuntimeError("Connection to mirror succeeded unexpectedly")

            stderr = cmd.results.stderr.splitlines()
            for line in stderr:
                match = _version_regex.match(line)
                if match:
                    return match.group(1)

            raise RuntimeError("Unexpected error from mirror connection: %s" % cmd.results.stderr)

        logger.error("Invalid role '%s' for dbid %d", seg.role, seg.dbid)
        return None

    except Exception as ex:
        logger.error("Could not get segment version for dbid %d", seg.dbid, exc_info=ex)
        return None
コード例 #18
0
ファイル: verify.py プロジェクト: nellaivijay/incubator-hawq
 def execute(self):
     dburl = dbconn.DbURL()
     query = self.UPDATE_VERIFICATION_ENTRY % (self.state, self.done,
                                               self.mismatch, self.token)
     with dbconn.connect(dburl, allowSystemTableMods='dml') as conn:
         dbconn.execSQL(conn, query)
         conn.commit()
コード例 #19
0
ファイル: verify.py プロジェクト: nellaivijay/incubator-hawq
 def execute(self):
     dburl = dbconn.DbURL()
     query = self.INSERT_VERIFICATION_ENTRY % (
         self.token, self.type, self.content, VerificationState.RUNNING)
     with dbconn.connect(dburl, allowSystemTableMods='dml') as conn:
         dbconn.execSQL(conn, query)
         conn.commit()
コード例 #20
0
ファイル: utils.py プロジェクト: pf-qiu/gpdb
def create_partition(context, tablename, storage_type, dbname, compression_type=None, partition=True, rowcount=1094,
                     with_data=True, host=None, port=0, user=None):
    interval = '1 year'

    table_definition = 'Column1 int, Column2 varchar(20), Column3 date'
    create_table_str = "Create table " + tablename + "(" + table_definition + ")"
    storage_type_dict = {'ao': 'row', 'co': 'column'}

    part_table = " Distributed Randomly Partition by list(Column2) \
                    Subpartition by range(Column3) Subpartition Template \
                    (start (date '%s') end (date '%s') every (interval '%s')) \
                    (Partition p1 values('backup') , Partition p2 values('restore')) " \
                 % (PARTITION_START_DATE, PARTITION_END_DATE, interval)

    if storage_type == "heap":
        create_table_str = create_table_str
        if partition:
            create_table_str = create_table_str + part_table

    elif storage_type == "ao" or storage_type == "co":
        create_table_str = create_table_str + " WITH(appendonly = true, orientation = %s) " % storage_type_dict[
            storage_type]
        if compression_type is not None:
            create_table_str = create_table_str[:-2] + ", compresstype = " + compression_type + ") "
        if partition:
            create_table_str = create_table_str + part_table

    create_table_str = create_table_str + ";"

    with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
        dbconn.execSQL(conn, create_table_str)
        conn.commit()

    if with_data:
        populate_partition(tablename, PARTITION_START_DATE, dbname, 0, rowcount, host, port, user)
コード例 #21
0
ファイル: utils.py プロジェクト: pf-qiu/gpdb
def drop_schema(context, schema_name, dbname):
    SQL = 'drop schema %s cascade' % schema_name
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        dbconn.execSQL(conn, SQL)
        conn.commit()
    if check_schema_exists(context, schema_name, dbname):
        raise Exception('Unable to successfully drop the schema %s' % schema_name)
コード例 #22
0
ファイル: utils.py プロジェクト: pf-qiu/gpdb
def create_external_partition(context, tablename, dbname, port, filename):
    table_definition = 'Column1 int, Column2 varchar(20), Column3 date'
    create_table_str = "Create table %s (%s) Distributed randomly \
                        Partition by range(Column3) ( \
                        partition p_1  start(date '2010-01-01') end(date '2011-01-01') with (appendonly=true, orientation=column, compresstype=zlib, compresslevel=1), \
                        partition p_2  start(date '2011-01-01') end(date '2012-01-01') with (appendonly=true, orientation=row, compresstype=zlib, compresslevel=1), \
                        partition s_3  start(date '2012-01-01') end(date '2013-01-01') with (appendonly=true, orientation=column), \
                        partition s_4  start(date '2013-01-01') end(date '2014-01-01') with (appendonly=true, orientation=row), \
                        partition s_5  start(date '2014-01-01') end(date '2015-01-01') ) \
                        ;" % (tablename, table_definition)

    master_hostname = get_master_hostname();
    create_ext_table_str = "Create readable external table %s_ret (%s) \
                            location ('gpfdist://%s:%s/%s') \
                            format 'csv' encoding 'utf-8' \
                            log errors segment reject limit 1000 \
                            ;" % (tablename, table_definition, master_hostname[0][0].strip(), port, filename)

    alter_table_str = "Alter table %s exchange partition p_2 \
                       with table %s_ret without validation \
                       ;" % (tablename, tablename)

    drop_table_str = "Drop table %s_ret;" % (tablename)

    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        dbconn.execSQL(conn, create_table_str)
        dbconn.execSQL(conn, create_ext_table_str)
        dbconn.execSQL(conn, alter_table_str)
        dbconn.execSQL(conn, drop_table_str)
        conn.commit()

    populate_partition(tablename, '2010-01-01', dbname, 0, 100)
コード例 #23
0
ファイル: backup_utils.py プロジェクト: asimrp/gpdb
def get_all_parent_tables(dbname):
    SQL = "SELECT DISTINCT (schemaname || '.' || tablename) FROM pg_partitions"
    data = []
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        curs = dbconn.execSQL(conn, SQL)
        data = curs.fetchall()
    return set([d[0] for d in data])
コード例 #24
0
ファイル: gpexpand_dml.py プロジェクト: adam8157/gpdb
    def run(self):
        conn = dbconn.connect(dbconn.DbURL(dbname=self.dbname))

        self.loop(conn)
        self.verify(conn)

        conn.commit()
コード例 #25
0
ファイル: reload.py プロジェクト: 50wu/gpdb
 def reload_tables(self):
     with dbconn.connect(dbconn.DbURL(dbname=self.database, port=self.port)) as conn:
         conn.commit()   #Commit the implicit transaction started by connect
         for schema_name, table_name, sort_column_list in self.table_list:
             logger.info('Starting reload for table {schema}.{table}'.format(schema=schema_name, table=table_name))
             logger.info('Table {schema}.{table} has {rows} rows and {size} size'
                     .format(schema=schema_name, table=table_name,
                      rows=self.get_row_count('%s.%s' % (schema_name, table_name)),
                      size=self.get_table_size(schema_name, table_name)))
             if not self.check_indexes(schema_name, table_name):
                 logger.info('Skipping reload for {schema}.{table}'.format(schema=schema_name, table=table_name))
                 continue
             start = time.time()
             dbconn.execSQL(conn, 'BEGIN')
             dbconn.execSQL(conn, """CREATE TEMP TABLE temp_{table} AS SELECT * FROM {schema}.{table}"""
                                  .format(schema=schema_name, table=table_name))
             temp_row_count = dbconn.execSQLForSingleton(conn, """SELECT count(*) FROM temp_{table}""".format(table=table_name))
             table_row_count = dbconn.execSQLForSingleton(conn, """SELECT count(*) from {schema}.{table}"""
                                                                 .format(table=table_name, schema=schema_name))
             if temp_row_count != table_row_count:
                 raise Exception('Row count for temp table(%s) does not match(%s)' % (temp_row_count, table_row_count))
             dbconn.execSQL(conn, 'TRUNCATE TABLE {schema}.{table}'.format(schema=schema_name, table=table_name))
             sort_order = ['%s %s' % (c[0], c[1]) for c in sort_column_list]
             parent_schema_name, parent_table_name = self.parent_partition_map[(schema_name, table_name)]
             dbconn.execSQL(conn, """INSERT INTO {parent_schema}.{parent_table} SELECT * FROM temp_{table} ORDER BY {column_list}"""
                                  .format(parent_schema=parent_schema_name, parent_table=parent_table_name, 
                                          table=table_name, column_list=','.join(sort_order)))
             conn.commit()
             end = time.time()
             logger.info('Finished reload for table {schema}.{table} in time {sec} seconds'
                         .format(schema=schema_name, table=table_name, sec=(end-start)))
コード例 #26
0
    def test_xlogcleanup(self):
        """
        Test for verifying if xlog seg created while basebackup
        dumps out data does not get cleaned
        """

        shutil.rmtree('base', True)
        PSQL.run_sql_command('DROP table if exists foo')

        # Inject fault at post checkpoint create (basebackup)
        logger.info ('Injecting base_backup_post_create_checkpoint fault ...')
        result = self.suspend_at(
                    'base_backup_post_create_checkpoint')
        logger.info(result.stdout)
        self.assertEqual(result.rc, 0, result.stdout)

        # Now execute basebackup. It will be blocked due to the
        # injected fault.
        logger.info ('Perform basebackup with xlog & recovery.conf...')
        pg_basebackup = subprocess.Popen(['pg_basebackup', '-x', '-R', '-D', 'base']
                                          , stdout = subprocess.PIPE
                                          , stderr = subprocess.PIPE)

        # Give basebackup a moment to reach the fault & 
        # trigger it
        logger.info('Check if suspend fault is hit ...')
        triggered = self.wait_triggered(
                    'base_backup_post_create_checkpoint')
        self.assertTrue(triggered, 'Fault was not triggered')

        # Perform operations that causes xlog seg generation
        logger.info ('Performing xlog seg generation ...')
        count = 0
        while (count < 10):
            PSQL.run_sql_command('select pg_switch_xlog(); select pg_switch_xlog(); checkpoint;')
            count = count + 1

        # Resume basebackup
        result = self.resume('base_backup_post_create_checkpoint')
        logger.info(result.stdout)
        self.assertEqual(result.rc, 0, result.stdout)

        # Wait until basebackup end
        logger.info('Waiting for basebackup to end ...')

        sql = "SELECT count(*) FROM pg_stat_replication"
        with dbconn.connect(dbconn.DbURL(), utility=True) as conn:
            while (True):
                curs = dbconn.execSQL(conn, sql)
                results = curs.fetchall()

                if (int(results[0][0]) == 0):
                    break;

        # Verify if basebackup completed successfully
        # See if recovery.conf exists (Yes - Pass)
        self.assertTrue(os.path.exists(os.path.join('base','recovery.conf')))

        logger.info ('Found recovery.conf in the backup directory.')
        logger.info ('Pass')
コード例 #27
0
ファイル: restore.py プロジェクト: BALDELab/incubator-hawq
    def _process_createdb(self, restore_timestamp, restore_db, master_datadir, master_port): 
        conn = None
        try:
            dburl = dbconn.DbURL(port=master_port)
            conn = dbconn.connect(dburl)
            count = execSQLForSingleton(conn, "select count(*) from pg_database where datname='%s';" % restore_db)

            if count == 1:
                logger.info("Dropping database %s" % restore_db)
                try:
                    cursor=conn.cursor()
                    cursor.execute("commit")                          # hack to move drop stmt out of implied transaction
                    cursor.execute("drop database %s" % restore_db)
                    cursor.close()
                except Exception, e:
                    logger.exception("Could not create database %s" % restore_db)
                    raise ExceptionNoStackTraceNeeded('Failed to drop database %s' % restore_db)
                else:
                    logger.info('Dropped database %s' % restore_db)
        finally:
            if conn is not None:
                conn.close()

        createdb_file = os.path.join(master_datadir, DUMP_DIR, restore_timestamp[0:8], "%s%s" % (CREATEDB_PREFIX, restore_timestamp))
        logger.info('Invoking %s' % createdb_file)
        Psql('Invoking schema dump', filename=createdb_file).run(validateAfter=True)
コード例 #28
0
ファイル: clsRecoverSegment.py プロジェクト: pf-qiu/gpdb
 def run(self):
     logger.debug('Executing query (%s:%s) for segment (%s:%s) on database (%s)' % (
         self.qname, self.query, self.hostname, self.port, self.dbname))
     with dbconn.connect(dbconn.DbURL(hostname=self.hostname, port=self.port, dbname=self.dbname),
                         utility=True) as conn:
         res = dbconn.execSQL(conn, self.query)
         self.res = res.fetchall()
コード例 #29
0
ファイル: restore.py プロジェクト: BALDELab/incubator-hawq
    def execute(self): 
        existing_tables = []
        table_counts = []
        conn = None
        try:
            dburl = dbconn.DbURL(port=self.master_port, dbname=self.restore_db)
            conn = dbconn.connect(dburl)
            for restore_table in self.restore_tables:
                if '.' not in restore_table:
                    logger.warn("No schema name supplied for %s, removing from list of tables to restore" % restore_table)
                    continue

                schema, table = restore_table.split('.')
                count = execSQLForSingleton(conn, "select count(*) from pg_class, pg_namespace where pg_class.relname = '%s' and pg_class.relnamespace = pg_namespace.oid and pg_namespace.nspname = '%s'" % (table, schema))
                if count == 0:
                    logger.warn("Table %s does not exist in database %s, removing from list of tables to restore" % (table, self.restore_db))
                    continue

                count = execSQLForSingleton(conn, "select count(*) from %s.%s" % (schema, table))
                if count > 0:
                    logger.warn('Table %s has %d records %s' % (restore_table, count, WARN_MARK))
                existing_tables.append(restore_table)
                table_counts.append((restore_table, count))
        finally:
            if conn is not None:
                conn.close()

        if len(existing_tables) == 0:
            raise ExceptionNoStackTraceNeeded("Have no tables to restore")
        logger.info("Have %d tables to restore, will continue" % len(existing_tables))

        return (existing_tables, table_counts)
コード例 #30
0
    def _get_dist_policies(self):
        policies = []
        with dbconn.connect(dbconn.DbURL()) as conn:
            cursor = dbconn.execSQL(conn, 'select * from gp_distribution_policy;').fetchall()
            for row in cursor:
                policies.append(row)

        return policies
コード例 #31
0
 def _drop_tables(self):
     with dbconn.connect(dbconn.DbURL()) as conn:
         for i in range(self.NUM_TABLES):
             dbconn.execSQL(conn, 'drop table tab%d' % i)
         conn.commit()
コード例 #32
0
def create_database_if_not_exists(context, dbname, host=None, port=0, user=None):
    if not check_db_exists(dbname, host, port, user):
        create_database(context, dbname, host, port, user)
    context.dbname = dbname
    context.conn = dbconn.connect(dbconn.DbURL(dbname=context.dbname))
コード例 #33
0
ファイル: backup_mgmt_utils.py プロジェクト: roommush/gpdb
def impl(context, dbname, expected_owner):
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        query = "SELECT pg_catalog.pg_get_userbyid(d.datdba) FROM pg_catalog.pg_database d WHERE d.datname = '%s';" % dbname
        actual_owner = dbconn.execSQLForSingleton(conn, query)
    if actual_owner != expected_owner:
        raise Exception("Database %s has owner %s when it should have owner %s" % (dbname, actual_owner, expected_owner))
コード例 #34
0
ファイル: backup_mgmt_utils.py プロジェクト: yanchaomars/gpdb
def impl(context, table_name, lock_mode, conn, dbname):
    query = "begin; lock table %s in %s" % (table_name, lock_mode)
    conn = dbconn.connect(dbconn.DbURL(
        dbname=dbname))  # todo not truthful about using conn parameter
    dbconn.execSQL(conn, query)
    context.conn = conn
コード例 #35
0
def create_long_lived_conn(context, dbname):
    context.long_lived_conn = dbconn.connect(dbconn.DbURL(dbname=dbname),
                                             unsetSearchPath=False)
コード例 #36
0
ファイル: environment.py プロジェクト: zhangshengdong/gpdb
def before_feature(context, feature):
    # we should be able to run gpexpand without having a cluster initialized
    tags_to_skip = [
        'gpexpand', 'gpaddmirrors', 'gpstate', 'gpmovemirrors', 'gpconfig',
        'gpssh-exkeys', 'gpstop', 'gpinitsystem'
    ]
    if set(context.feature.tags).intersection(tags_to_skip):
        return

    drop_database_if_exists(context, 'testdb')
    drop_database_if_exists(context, 'bkdb')
    drop_database_if_exists(context, 'fullbkdb')
    drop_database_if_exists(context, 'schematestdb')

    if 'analyzedb' in feature.tags:
        start_database_if_not_started(context)
        drop_database_if_exists(context, 'incr_analyze')
        create_database(context, 'incr_analyze')
        drop_database_if_exists(context, 'incr_analyze_2')
        create_database(context, 'incr_analyze_2')
        context.conn = dbconn.connect(dbconn.DbURL(dbname='incr_analyze'),
                                      unsetSearchPath=False)
        context.dbname = 'incr_analyze'

        # setting up the tables that will be used
        context.execute_steps(u"""
        Given there is a regular "ao" table "t1_ao" with column name list "x,y,z" and column type list "int,text,real" in schema "public"
        And there is a regular "heap" table "t2_heap" with column name list "x,y,z" and column type list "int,text,real" in schema "public"
        And there is a regular "ao" table "t3_ao" with column name list "a,b,c" and column type list "int,text,real" in schema "public"
        And there is a hard coded ao partition table "sales" with 4 child partitions in schema "public"
        """)

    if 'minirepro' in feature.tags:
        start_database_if_not_started(context)
        minirepro_db = 'minireprodb'
        drop_database_if_exists(context, minirepro_db)
        create_database(context, minirepro_db)
        context.conn = dbconn.connect(dbconn.DbURL(dbname=minirepro_db),
                                      unsetSearchPath=False)
        context.dbname = minirepro_db
        dbconn.execSQL(context.conn, 'create table t1(a integer, b integer)')
        dbconn.execSQL(context.conn, 'create table t2(c integer, d integer)')
        dbconn.execSQL(context.conn, 'create table t3(e integer, f integer)')
        dbconn.execSQL(
            context.conn,
            'create view v1 as select a, b from t1, t3 where t1.a=t3.e')
        dbconn.execSQL(
            context.conn,
            'create view v2 as select c, d from t2, t3 where t2.c=t3.f')
        dbconn.execSQL(
            context.conn,
            'create view v3 as select a, d from v1, v2 where v1.a=v2.c')
        dbconn.execSQL(context.conn, 'insert into t1 values(1, 2)')
        dbconn.execSQL(context.conn, 'insert into t2 values(1, 3)')
        dbconn.execSQL(context.conn, 'insert into t3 values(1, 4)')
        context.conn.commit()

    if 'gppkg' in feature.tags:
        run_command(context,
                    'bash demo/gppkg/generate_sample_gppkg.sh buildGppkg')
        run_command(
            context,
            'cp -f /tmp/sample-gppkg/sample.gppkg test/behave/mgmt_utils/steps/data/'
        )
コード例 #37
0
 def test_no_transaction_after_connect(self):
     with dbconn.connect(self.url) as conn:
         db = pg.DB(conn)
         # this would fail if we were in a transaction DROP DATABASE cannot
         # run inside a transaction block
         db.query("DROP DATABASE IF EXISTS some_nonexistent_database")
コード例 #38
0
ファイル: utils.py プロジェクト: zhuomingliang/gpdb
def create_schema(context, schema_name, dbname):
    if not check_schema_exists(context, schema_name, dbname):
        schema_sql = "create schema %s" % schema_name
        with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
            dbconn.execSQL(conn, schema_sql)
            conn.commit()
コード例 #39
0
ファイル: configurationImplGpdb.py プロジェクト: z17033/gpdb
    def updateSystemConfig( self, gpArray, textForConfigTable, dbIdToForceMirrorRemoveAdd, useUtilityMode, allowPrimary) :
        """
        Update the configuration for the given segments in the underlying
        configuration store to match the current values

        Also resets any dirty bits on saved/updated objects

        @param textForConfigTable label to be used when adding to segment configuration history
        @param dbIdToForceMirrorRemoveAdd a map of dbid -> True for mirrors for which we should force updating the mirror
        @param useUtilityMode True if the operations we're doing are expected to run via utility moed
        @param allowPrimary True if caller authorizes add/remove primary operations (e.g. gpexpand)
        """

        # ensure initializeProvider() was called
        checkNotNone("masterDbUrl", self.__masterDbUrl)

        logger.debug("Validating configuration changes...")

        if not gpArray.is_array_valid():
            logger.critical("Configuration is invalid")
            raise InvalidSegmentConfiguration(gpArray)

        conn = dbconn.connect(self.__masterDbUrl, useUtilityMode, allowSystemTableMods=True)
        dbconn.execSQL(conn, "BEGIN")

        # compute what needs to be updated
        update = ComputeCatalogUpdate(gpArray, dbIdToForceMirrorRemoveAdd, useUtilityMode, allowPrimary)
        update.validate()

        # put the mirrors in a map by content id so we can update them later
        mirror_map = {}
        for seg in update.mirror_to_add:
            mirror_map[ seg.getSegmentContentId() ] = seg

        # reset dbId of new mirror segments to -1
        # before invoking the operations which will assign them new ids
        for seg in update.mirror_to_add:
            seg.setSegmentDbId(-1)

        # remove mirror segments (e.g. for gpexpand rollback)
        for seg in update.mirror_to_remove:
            self.__updateSystemConfigRemoveMirror(conn, seg, textForConfigTable)

        # remove primary segments (e.g for gpexpand rollback)
        for seg in update.primary_to_remove:
            self.__updateSystemConfigRemovePrimary(conn, seg, textForConfigTable)

        # add new primary segments
        for seg in update.primary_to_add:
            self.__updateSystemConfigAddPrimary(conn, gpArray, seg, textForConfigTable, mirror_map)

        # add new mirror segments
        for seg in update.mirror_to_add:
            self.__updateSystemConfigAddMirror(conn, gpArray, seg, textForConfigTable)

        # remove and add mirror segments necessitated by catalog attribute update
        for seg in update.mirror_to_remove_and_add:
            self.__updateSystemConfigRemoveAddMirror(conn, gpArray, seg, textForConfigTable)

        # apply updates to existing segments
        for seg in update.segment_to_update:
            originalSeg = update.dbsegmap.get(seg.getSegmentDbId())
            self.__updateSystemConfigUpdateSegment(conn, gpArray, seg, originalSeg, textForConfigTable)

        # commit changes
        logger.debug("Committing configuration table changes")
        dbconn.execSQL(conn, "COMMIT")
        conn.close()

        gpArray.setSegmentsAsLoadedFromDb([seg.copy() for seg in gpArray.getDbList()])
コード例 #40
0
 def cleanup():
     with dbconn.connect(dburl, utility=True) as myconn:
         dbconn.execSQLForSingleton(myconn, "select spoof_next_xid('%d'::xid) "% teardown_next_xid)
         dbconn.execSQL(myconn, "checkpoint")
     self._basic_sanity_check('clean')
     logger.info('Returned segment to xid %d' % teardown_next_xid)
コード例 #41
0
ファイル: utils.py プロジェクト: zhuomingliang/gpdb
def getRow(dbname, exec_sql):
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        curs = dbconn.execSQL(conn, exec_sql)
        result = curs.fetchone()
    return result
コード例 #42
0
    def _reset_age(self, dbname, segdb=None):
        """
        Resets datfrozenxid and relfrozenxid's in pg_class of the
        specified dbname to a value close to the current xid.  This is
        a recommended way of resetting age of dbname or a database
        that is created off template0.

        @param segdb: identifies the segment on which to operate.  It is an
        instance of GpDB class.

        Note that the database dbname must have all tuples frozen (xmin=2).
        This holds true of template0 and of a database created off template0,
        only if there are no modifications done to the database.

        """
        if segdb is None:
            segdb = self.gparray.master
        dburl = dbconn.DbURL(hostname=segdb.hostname, port=segdb.port)
        dburl_dbname = dbconn.DbURL(hostname=segdb.hostname,
                                    port=segdb.port,
                                    dbname=dbname)
        with dbconn.connect(dburl,
                            utility=True,
                            allowSystemTableMods="dml") as conn:
            sql = "SELECT get_next_xid()"
            next_xid = int(dbconn.execSQLForSingleton(conn, sql))
            sql = "UPDATE pg_database SET datfrozenxid='%d'::xid WHERE datname='%s'"
            dbconn.execSQL(conn, sql % (next_xid, dbname))
            conn.commit()
        if dbname == "template0":
            self._set_allowconn_template0(True)
        with dbconn.connect(dburl_dbname,
                            utility=True,
                            allowSystemTableMods="dml") as conn:
            sql = ("UPDATE pg_class SET relfrozenxid='%d'::xid WHERE "
                   "int8in(xidout(relfrozenxid)) > 0")
            dbconn.execSQL(conn, sql % next_xid)
            conn.commit()
        PSQL(sql_cmd="VACUUM FREEZE pg_class",
             dbname=dbname,
             PGOPTIONS="-c 'gp_session_role=utility'",
             host=segdb.hostname,
             port=segdb.port,
             out_file="vacuum_%s.out" % dbname).run(validateAfter=True)
        with dbconn.connect(dburl_dbname,
                            utility=True,
                            allowSystemTableMods="dml") as conn:
            dbconn.execSQL(conn, "DELETE FROM pg_stat_last_operation")
            conn.commit()
        PSQL(sql_cmd="VACUUM FREEZE pg_stat_last_operation",
             dbname=dbname,
             PGOPTIONS="-c 'gp_session_role=utility'",
             host=segdb.hostname,
             port=segdb.port,
             out_file="vacuum_%s.out" % dbname).run(validateAfter=True)
        if dbname == "template0":
            self._set_allowconn_template0(False)
        with dbconn.connect(dburl, utility=True) as conn:
            sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='%s'"
            age_dbname = dbconn.execSQLForSingleton(conn, sql % dbname)
            age_dbname = int(age_dbname)
        logger.info("Age of %s reset to %d" % (dbname, age_dbname))
        # We are OK as long as dbname age is less than xid_warn_limit.  The
        # 10000 is just a number assumed to be less than xid_warn_limit.
        self.assertTrue(age_dbname > 0 and age_dbname < 10000,
                        "age(%s) = %d, next xid = %d" %
                        (dbname, age_dbname, next_xid))
コード例 #43
0
    def _raise_template0_age(self, limit, segdb):
        """
        Increase age of template0 beyond the specified limit on the specified
        segment.  When a new database is created off template0, the limit will
        be exceeded.  Assumption: template0 age =~ 0 or at least not already
        crossing any of the xid limits.  Because this function can only raise
        the age, cannot decrease it.

        @param limit: one of WARN_LIMIT, STOP_LIMIT and WRAP_LIMIT.

        @param segdb: an instance of GpDB class representing the segment on
        which the limit will be exceeded.
        """
        dburl = dbconn.DbURL(hostname=segdb.hostname, port=segdb.port)
        databases = []
        with dbconn.connect(dburl, utility=True) as conn:
            sql = "SELECT datname FROM pg_database WHERE datallowconn='t'"
            for row in dbconn.execSQL(conn, sql):
                databases.append(row[0])
            sql = "SHOW xid_stop_limit"
            stop_limit_guc = int(dbconn.execSQLForSingleton(conn, sql))
            sql = "SHOW xid_warn_limit"
            warn_limit_guc = int(dbconn.execSQLForSingleton(conn, sql))
            sql = ("SELECT datfrozenxid, age(datfrozenxid) FROM pg_database "
                   "WHERE datname='template0'")
            row = dbconn.execSQL(conn, sql).fetchone()
            datfxid, age = int(row[0]), int(row[1])
            sql = "SELECT get_next_xid()"
            current_xid = int(dbconn.execSQLForSingleton(conn, sql))
        # Estimate of XIDs consumed by vacuum freeze operaiton on all databases.
        vacuum_xids = len(databases) * 500
        logger.info("Estimated xids for vacuume freeze: %d" % vacuum_xids)
        if limit == self.WARN_LIMIT:
            target_age = (2**31) - stop_limit_guc - warn_limit_guc
            target_xid = xid_sum(datfxid, target_age)
            keep_raising = lambda x: x < target_age
        elif limit == self.STOP_LIMIT:
            target_age = (2**31) - stop_limit_guc
            target_xid = xid_sum(datfxid, target_age)
            keep_raising = lambda x: x < target_age
        elif limit == self.WRAP_LIMIT:
            target_xid = xid_sum(datfxid, 2**31)
            keep_raising = lambda x: x > 0
        logger.info("Target xid = %d, limit = %d" % (target_xid, limit))
        self.assertEqual(preceding_xid(target_xid, current_xid), current_xid,
                         "Target xid (%d) precedes current xid (%d)" %
                         (target_xid, current_xid))
        while keep_raising(age):
            with dbconn.connect(dburl, utility=True) as conn:
                sql = "SELECT get_stop_limit()"
                stop_limit = int(dbconn.execSQLForSingleton(conn, sql))
                # GPDB may stop accepting connections if we spoof nextXid beyond
                # max_xid.
                max_xid = xid_sum(stop_limit, -vacuum_xids)
                new_xid = preceding_xid(target_xid, max_xid)
                logger.info("Spoofing next xid to %d, current stop limit = %d" %
                            (new_xid, stop_limit))
                sql = "SELECT spoof_next_xid('%d'::xid)"
                dbconn.execSQL(conn, sql % new_xid)
                conn.commit()
                sql = ("SELECT age(datfrozenxid) FROM pg_database "
                       "WHERE datname='template0'")
                age = int(dbconn.execSQLForSingleton(conn, sql))
            logger.info("template0 age raised to %d" % age)
            # The vacuum freeze of all databases advances stop_limit further,
            # necessary for iterating the while loop.  And template0 becomes the
            # oldest database aka the only culprit to violate the specified
            # limit.
            for datname in databases:
                logger.info('vacuum freeze %s' % datname)
                PSQL(sql_cmd='VACUUM FREEZE',
                     dbname=datname,
                     out_file='vacuum_%s.out' % datname).run(validateAfter=True)
コード例 #44
0
    def _create_expansion_input_file(self):
        """This code has been taken from system_management utilities
           test suite.
           creates a expansion input file"""

        with dbconn.connect(dbconn.DbURL()) as conn:
            next_dbid = dbconn.execSQLForSingletonRow(
                conn, "select max(dbid)+1 \
                                                       from pg_catalog.gp_segment_configuration"
            )[0]
            next_content = dbconn.execSQL(
                conn, "select max(content)+1 \
                                           from pg_catalog.gp_segment_configuration"
            ).fetchall()[0][0]
            next_pri_port = dbconn.execSQL(
                conn, "select max(port)+1 \
                                            from pg_catalog.gp_segment_configuration \
                                            where role='p'").fetchall()[0][0]
            self.primary_host_name = dbconn.execSQL(
                conn, "select distinct hostname \
                                                     from gp_segment_configuration \
                                                     where content >= 0 and preferred_role = 'p'"
            ).fetchall()[0][0]
            next_mir_port = dbconn.execSQL(
                conn, "select max(port)+1 \
                                            from pg_catalog.gp_segment_configuration \
                                            where role='m'").fetchall()[0][0]

            if next_mir_port == None or next_mir_port == ' ' or next_mir_port == 0:
                mirroring_on = False
            else:
                mirroring_on = True
                next_pri_replication_port = dbconn.execSQL(
                    conn, "select max(replication_port)+1 \
                                                            from pg_catalog.gp_segment_configuration \
                                                            where role='p'"
                ).fetchall()[0][0]
                next_mir_replication_port = dbconn.execSQL(
                    conn, "select max(replication_port)+1 \
                                                            from pg_catalog.gp_segment_configuration \
                                                            where role='m'"
                ).fetchall()[0][0]
                select_mirror = "select distinct hostname \
                                 from gp_segment_configuration \
                                 where content >= 0 and preferred_role = 'm' and hostname != '%s'" % self.primary_host_name
                mirror_name_row = dbconn.execSQL(conn,
                                                 select_mirror).fetchall()
                if mirror_name_row == None or len(mirror_name_row) == 0:
                    self.mirror_host_name = self.primary_host_name
                else:
                    self.mirror_host_name = mirror_name_row[0][0]

                self.primary_host_address = socket.getaddrinfo(
                    self.primary_host_name, None)[0][4][0]
                self.mirror_host_address = socket.getaddrinfo(
                    self.mirror_host_name, None)[0][4][0]

            with open(self.EXPANSION_INPUT_FILE, 'w') as outfile:
                for i in range(self.SEGMENTS):
                    pri_datadir = os.path.join(os.getcwd(),
                                               'new_pri_seg%d' % i)
                    mir_datadir = os.path.join(os.getcwd(),
                                               'new_mir_seg%d' % i)

                    temp_str = "%s:%s:%d:%s:%d:%d:%s" % (
                        self.primary_host_name, self.primary_host_address,
                        next_pri_port, pri_datadir, next_dbid, next_content,
                        'p')
                    if mirroring_on:
                        temp_str = temp_str + ":" + str(
                            next_pri_replication_port)
                    temp_str = temp_str + "\n"
                    outfile.write(temp_str)

                    if mirroring_on:  # The content number for mirror is same as the primary segment's content number
                        next_dbid += 1
                        outfile.write("%s:%s:%d:%s:%d:%d:%s:%s\n" %
                                      (self.mirror_host_name,
                                       self.mirror_host_address, next_mir_port,
                                       mir_datadir, next_dbid, next_content,
                                       'm', str(next_mir_replication_port)))
                        next_mir_port += 1
                        next_pri_replication_port += 1
                        next_mir_replication_port += 1

                    next_pri_port += 1
                    next_dbid += 1
                    next_content += 1
コード例 #45
0
 def trigger_fts_probe(self, gpArray):
     self.logger.info('Triggering FTS probe')
     with dbconn.connect(dbconn.DbURL()) as conn:
         res = dbconn.execSQL(conn, "SELECT gp_request_fts_probe_scan()")
     return res.fetchall()
コード例 #46
0
 def setUp(self):
     self.dburl = dbconn.DbURL()
     self.conn = dbconn.connect(self.dburl)
コード例 #47
0
ファイル: backup_utils.py プロジェクト: yangaoquan/gpdb
def execute_sql(query, master_port, dbname):
    dburl = dbconn.DbURL(port=master_port, dbname=dbname)
    conn = dbconn.connect(dburl)
    cursor = execSQL(conn, query)
    return cursor.fetchall()
コード例 #48
0
def connectToDb(hawq_master_host, hawq_master_port, database):
    'connect to database'
    url = dbconn.DbURL(hawq_master_host,
                       port=hawq_master_port,
                       dbname=database)
    return dbconn.connect(dburl=url)
コード例 #49
0
    def initFromCatalog(dbURL, utility=False, useAllSegmentFileSpaces=False):
        """
        Factory method, initializes a HAWQArray from provided database URL

        Please note that -
        useAllSegmentFilespaces when set to true makes this method add *all* filespaces
        to the segments of hawqarray. If false, only returns Master/Standby all filespaces
        This is *hacky* and we know that it is not the right way to design methods/interfaces
        We are doing this so that we do not affect behavior of existing tools like upgrade, gprecoverseg etc
        """

        conn = dbconn.connect(dbURL, utility)

        # Get the version from the database:
        version_str = None
        for row in dbconn.execSQL(conn, "SELECT version()"):
            version_str = row[0]
        version = GpVersion(version_str)

        # Only for HAWQ 2.0
        if version.getVersionRelease() in ("2.0"):

            hawq_site = HawqXMLParser(GPHOME)
            master_data_directory = hawq_site.get_value_from_name(
                'hawq_master_directory')
            segment_data_directory = hawq_site.get_value_from_name(
                'hawq_segment_directory')

            # strategy_rows = dbconn.execSQL(conn, "show gp_fault_action")
            strategy_rows = []

            config_rows = dbconn.execSQL(
                conn, '''
                               SELECT sc.registration_order,
                                      sc.role,
                                      sc.status,
                                      sc.hostname,
                                      sc.address,
                                      sc.port,
                                      fs.oid,
                                      CASE
                                          WHEN sc.registration_order <= 0 THEN '%s'
                                          ELSE '%s'
                                      END AS datadir
                               FROM pg_catalog.gp_segment_configuration sc,
                                    pg_catalog.pg_filespace fs,
                                    pg_catalog.pg_filespace_entry fse
                               WHERE fse.fsefsoid = fs.oid
                               ORDER BY sc.registration_order;''' %
                (master_data_directory, segment_data_directory))

            # All of filesystem is shared storage
            filesystemRows = dbconn.execSQL(
                conn, '''
                SELECT oid, fsysname, true AS fsysshared
                FROM pg_filesystem
                ORDER BY fsysname
            ''')

            filesystemArr = [
                HAWQFilesystemObj(fsysRow[0], fsysRow[1], fsysRow[2])
                for fsysRow in filesystemRows
            ]

            filespaceRows = dbconn.execSQL(
                conn, '''
                SELECT oid, fsname, fsfsys AS fsoid
                FROM pg_filespace
                WHERE oid != %d
                ORDER BY fsname;
            ''' % (SYSTEM_FILESPACE))

            filespaceArr = [
                HAWQFilespaceObj(
                    fsRow[0], fsRow[1],
                    HAWQFilesystemObj.getFilesystemObj(filesystemArr,
                                                       fsRow[2]))
                for fsRow in filespaceRows
            ]

        else:
            raise Exception("HAWQ version is invalid: %s" % version)

        hawqdbs = []
        print "### initFromCatalog ###"
        hdb = None
        for row in config_rows:

            print row

            # Extract fields from the row
            (registration_order, role, status, hostname, address, port, fsoid,
             datadir) = row

            # In GPSQL, only master maintain the filespace information.
            # if registration_order != MASTER_REGISTRATION_ORDER and \
            #    fsoid != SYSTEM_FILESPACE and \
            #    not useAllSegmentFileSpaces:
            #    print "### initFromCatalog ... continue ###"
            #    continue

            # The query returns all the filespaces for a segment on separate
            # rows.  If this row is the same dbid as the previous row simply
            # add this filespace to the existing list, otherwise create a
            # new segment.
            # if seg and seg.getSegmentRegistrationOrder() == registration_order:
            #     seg.addSegmentFilespace(fsoid, fslocation)
            # else:
            #     seg = HAWQDB(registration_order, role, status,
            #                 hostname, address, port, datadir)
            #    segments.append(seg)

            hdb = HAWQDB(registration_order, role, status, hostname, address,
                         port, datadir)
            print "### initFromCatalog ... hdb ###"
            print hdb
            hawqdbs.append(hdb)
            print "### initFromCatalog ... hawqdbs ###"
            print hawqdbs

        conn.close()

        # origSegments = [seg.copy() for seg in segments]

        array = HAWQArray(hawqdbs)
        array.version = version
        array.setFilespaces(filespaceArr)
        array.setFilesystem(filesystemArr)

        return array
コード例 #50
0
ファイル: verify.py プロジェクト: stefanieqiang/gpdb-1
 def run_SQLQuery(self, exec_sql, dbname='template1'):
     with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
         curs = dbconn.execSQL(conn, exec_sql)
         results = curs.fetchall()
     return results
コード例 #51
0
def create_long_lived_conn(context, dbname):
    context.long_lived_conn = dbconn.connect(dbconn.DbURL(dbname=dbname))
コード例 #52
0
ファイル: gpstart.py プロジェクト: zyclove/gpdb
def has_expected_status(content, preferred_role, expected_status):
    with closing(dbconn.connect(dbconn.DbURL(dbname="template1"), unsetSearchPath=False)) as conn:
        status = dbconn.querySingleton(conn, "SELECT status FROM gp_segment_configuration WHERE content = %s AND preferred_role = '%s'" % (content, preferred_role))
    return status == expected_status
コード例 #53
0
 def _get_dblist(self):
     # template0 does not accept any connections so we exclude it
     with dbconn.connect(dbconn.DbURL()) as conn:
         res = dbconn.query(conn, "SELECT datname FROM PG_DATABASE WHERE datname != 'template0'")
     conn.close()
     return res.fetchall()
コード例 #54
0
ファイル: gpstart.py プロジェクト: zyclove/gpdb
def must_have_expected_status(content, preferred_role, expected_status):
    with closing(dbconn.connect(dbconn.DbURL(dbname="template1"), unsetSearchPath=False)) as conn:
        status = dbconn.querySingleton(conn, "SELECT status FROM gp_segment_configuration WHERE content = %s AND preferred_role = '%s'" % (content, preferred_role))
    if status != expected_status:
        raise Exception("Expected status for role %s to be %s, but it is %s" % (preferred_role, expected_status, status))
コード例 #55
0
def drop_table_if_exists(context, table_name, dbname, host=None, port=0, user=None):
    SQL = 'drop table if exists %s' % table_name
    with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
        dbconn.execSQL(conn, SQL)
        conn.commit()
コード例 #56
0
ファイル: gpstart.py プロジェクト: zyclove/gpdb
def get_guc_value(guc):
    with closing(dbconn.connect(dbconn.DbURL(dbname="template1"), unsetSearchPath=False)) as conn:
        value = dbconn.querySingleton(conn, "show %s" % guc)
    return value
コード例 #57
0
ファイル: backup_mgmt_utils.py プロジェクト: roommush/gpdb
def impl(context, configString, dbname):
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        query = "select datconfig from pg_database where datname in ('%s');" % dbname
        datconfig = dbconn.execSQLForSingleton(conn, query)
    if not datconfig or configString not in datconfig:
        raise Exception("%s is not in the datconfig for database '%s':\n %s" % (configString, dbname, datconfig))
コード例 #58
0
ファイル: gpstart.py プロジェクト: zyclove/gpdb
def change_hostname(content, preferred_role, hostname):
    with closing(dbconn.connect(dbconn.DbURL(dbname="template1"), allowSystemTableMods=True, unsetSearchPath=False)) as conn:
        dbconn.execSQL(conn, "UPDATE gp_segment_configuration SET hostname = '{0}', address = '{0}' WHERE content = {1} AND preferred_role = '{2}'".format(hostname, content, preferred_role))
コード例 #59
0
 def _create_tables(self):
     with dbconn.connect(dbconn.DbURL()) as conn:
         for i in range(self.NUM_TABLES):
             dbconn.execSQL(conn, 'create table tab%d(i integer)' % i)
         conn.commit()
コード例 #60
0
class Context(object):
    filename = os.path.join(gp.get_coordinatordatadir(), 'gpexpand.status')
    dbname = os.getenv('PGDATABASE', 'postgres')
    dburl = dbconn.DbURL(dbname=dbname)
    conn = dbconn.connect(dburl)
    day = 0