Пример #1
0
    def execute(self): 
        existing_tables = []
        table_counts = []
        conn = None
        try:
            dburl = dbconn.DbURL(port=self.master_port, dbname=self.restore_db)
            conn = dbconn.connect(dburl)
            for restore_table in self.restore_tables:
                if '.' not in restore_table:
                    logger.warn("No schema name supplied for %s, removing from list of tables to restore" % restore_table)
                    continue

                schema, table = restore_table.split('.')
                count = execSQLForSingleton(conn, "select count(*) from pg_class, pg_namespace where pg_class.relname = '%s' and pg_class.relnamespace = pg_namespace.oid and pg_namespace.nspname = '%s'" % (table, schema))
                if count == 0:
                    logger.warn("Table %s does not exist in database %s, removing from list of tables to restore" % (table, self.restore_db))
                    continue

                count = execSQLForSingleton(conn, "select count(*) from %s.%s" % (schema, table))
                if count > 0:
                    logger.warn('Table %s has %d records %s' % (restore_table, count, WARN_MARK))
                existing_tables.append(restore_table)
                table_counts.append((restore_table, count))
        finally:
            if conn is not None:
                conn.close()

        if len(existing_tables) == 0:
            raise ExceptionNoStackTraceNeeded("Have no tables to restore")
        logger.info("Have %d tables to restore, will continue" % len(existing_tables))

        return (existing_tables, table_counts)
Пример #2
0
    def execute(self): 
        existing_tables = []
        table_counts = []
        conn = None
        try:
            dburl = dbconn.DbURL(port=self.master_port, dbname=self.restore_db)
            conn = dbconn.connect(dburl)
            for restore_table in self.restore_tables:
                if '.' not in restore_table:
                    logger.warn("No schema name supplied for %s, removing from list of tables to restore" % restore_table)
                    continue

                schema, table = restore_table.split('.')
                count = execSQLForSingleton(conn, "select count(*) from pg_class, pg_namespace where pg_class.relname = '%s' and pg_class.relnamespace = pg_namespace.oid and pg_namespace.nspname = '%s'" % (table, schema))
                if count == 0:
                    logger.warn("Table %s does not exist in database %s, removing from list of tables to restore" % (table, self.restore_db))
                    continue

                count = execSQLForSingleton(conn, "select count(*) from %s.%s" % (schema, table))
                if count > 0:
                    logger.warn('Table %s has %d records %s' % (restore_table, count, WARN_MARK))
                existing_tables.append(restore_table)
                table_counts.append((restore_table, count))
        finally:
            if conn is not None:
                conn.close()

        if len(existing_tables) == 0:
            raise ExceptionNoStackTraceNeeded("Have no tables to restore")
        logger.info("Have %d tables to restore, will continue" % len(existing_tables))

        return (existing_tables, table_counts)
Пример #3
0
    def verify_for_gpexpand(self, hostname=None, port=0):
        """
        For gpexpand, we need make sure:
          1. data is the same after redistribution finished
          2. the table's numsegments is enlarged to the new cluster size
        """
        url = dbconn.DbURL(hostname=hostname, port=port, dbname=self.dbname)
        with dbconn.connect(url, unsetSearchPath=False) as conn:
            db = pg.DB(conn)
            data = db.query("SELECT gp_segment_id, i FROM tbl").getresult()
            tbl_numsegments = dbconn.execSQLForSingleton(conn,
                                                         "SELECT numsegments FROM gp_distribution_policy "
                                                         "WHERE localoid = 'tbl'::regclass::oid")
            num_segments = dbconn.execSQLForSingleton(conn,
                                                     "SELECT COUNT(DISTINCT(content)) - 1 FROM gp_segment_configuration")

        if tbl_numsegments != num_segments:
            raise Exception("After gpexpand the numsegments for tablespace table 'tbl' %d does not match "
                            "the number of segments in the cluster %d." % (tbl_numsegments, num_segments))

        initial_data = [i for _, i in self.initial_data]
        data_without_segid = [i for _, i in data]
        if sorted(data_without_segid) != sorted(initial_data):
            raise Exception("Tablespace data is not identically distributed after running gp_expand. "
                            "Expected pre-gpexpand data:\n%\n but found post-gpexpand data:\n%r" % (
                                sorted(self.initial_data), sorted(data)))
Пример #4
0
    def execute(self):
        needed_space = 0
        dburl = dbconn.DbURL(dbname=self.dump_database, port=self.segport)
        conn = None
        try:
            conn = dbconn.connect(dburl, utility=True)
            if self.include_dump_tables:
                for dump_table in self.include_dump_tables:
                    needed_space += execSQLForSingleton(
                        conn,
                        "SELECT pg_relation_size('%s')/1024;" % dump_table)
            else:
                needed_space = execSQLForSingleton(
                    conn,
                    "SELECT pg_database_size('%s')/1024;" % self.dump_database)
        finally:
            if conn is not None:
                conn.close()
        if self.compress:
            needed_space = needed_space / COMPRESSION_FACTOR

        # get free available space
        stat_res = os.statvfs(self.datadir)
        free_space = (stat_res.f_bavail * stat_res.f_frsize) / 1024

        if free_space == 0 or (free_space - needed_space
                               ) / free_space < self.free_space_percent / 100:
            logger.error("Disk space: [Need: %dK, Free %dK]" %
                         (needed_space, free_space))
            raise NotEnoughDiskSpace(free_space, needed_space)
        logger.info("Disk space: [Need: %dK, Free %dK]" %
                    (needed_space, free_space))
Пример #5
0
    def template0_wrap_around(self):
        """
        Raise next xid so that age(template0) suffers a wrap around and
        becomes negative.  Create a new database off template0, which
        also suffers wrap around.  Reset the new db's age.  Sanity
        must succeed on the new db.

        """
        self._raise_template0_age(self.WRAP_LIMIT, self.gparray.master)
        PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
            validateAfter=True)
        sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'"
        dburl = dbconn.DbURL()
        with dbconn.connect(dburl, utility=True) as conn:
            age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
        # Xid wrap-around should cause template0 and newdb's age to be negative.
        self.assertTrue(age_newdb < 0)
        # All xids in newdb are frozen at this point.  Therefore, we
        # can reset its age so that it is not negative.
        self._reset_age("newdb")
        with dbconn.connect(dburl, utility=True) as conn:
            age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
        self.assertTrue(age_newdb > 0)
        # Verify that normal operations can be performed on newdb post recovery
        # from wraparound.
        self._basic_sanity_check("clean", {"dbname": "newdb"})
        logger.info("Sanity succeeded on newdb, dropping it.")
        PSQL.drop_database(dbname="newdb")
Пример #6
0
 def reload_tables(self):
     with dbconn.connect(dbconn.DbURL(dbname=self.database, port=self.port)) as conn:
         conn.commit()   #Commit the implicit transaction started by connect
         for schema_name, table_name, sort_column_list in self.table_list:
             logger.info('Starting reload for table {schema}.{table}'.format(schema=schema_name, table=table_name))
             logger.info('Table {schema}.{table} has {rows} rows and {size} size'
                     .format(schema=schema_name, table=table_name,
                      rows=self.get_row_count('%s.%s' % (schema_name, table_name)),
                      size=self.get_table_size(schema_name, table_name)))
             if not self.check_indexes(schema_name, table_name):
                 logger.info('Skipping reload for {schema}.{table}'.format(schema=schema_name, table=table_name))
                 continue
             start = time.time()
             dbconn.execSQL(conn, 'BEGIN')
             dbconn.execSQL(conn, """CREATE TEMP TABLE temp_{table} AS SELECT * FROM {schema}.{table}"""
                                  .format(schema=schema_name, table=table_name))
             temp_row_count = dbconn.execSQLForSingleton(conn, """SELECT count(*) FROM temp_{table}""".format(table=table_name))
             table_row_count = dbconn.execSQLForSingleton(conn, """SELECT count(*) from {schema}.{table}"""
                                                                 .format(table=table_name, schema=schema_name))
             if temp_row_count != table_row_count:
                 raise Exception('Row count for temp table(%s) does not match(%s)' % (temp_row_count, table_row_count))
             dbconn.execSQL(conn, 'TRUNCATE TABLE {schema}.{table}'.format(schema=schema_name, table=table_name))
             sort_order = ['%s %s' % (c[0], c[1]) for c in sort_column_list]
             parent_schema_name, parent_table_name = self.parent_partition_map[(schema_name, table_name)]
             dbconn.execSQL(conn, """INSERT INTO {parent_schema}.{parent_table} SELECT * FROM temp_{table} ORDER BY {column_list}"""
                                  .format(parent_schema=parent_schema_name, parent_table=parent_table_name, 
                                          table=table_name, column_list=','.join(sort_order)))
             conn.commit()
             end = time.time()
             logger.info('Finished reload for table {schema}.{table} in time {sec} seconds'
                         .format(schema=schema_name, table=table_name, sec=(end-start)))
Пример #7
0
 def template0_wrap_around_on_segment(self, primary):
     """
     Same as template0_wrap_around, but on segment.
     """
     logger.info("template0_wrap_around_on_segment: dbid(%d) %s:%d'" %
                 (primary.dbid, primary.hostname, primary.port))
     self._raise_template0_age(self.WRAP_LIMIT, primary)
     PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
         validateAfter=True)
     sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'"
     # Verify that age of newdb on the segment is negative.
     dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port)
     with dbconn.connect(dburl, utility=True) as conn:
         age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
     self.assertTrue(age_newdb < 0)
     # Reset newdb age so as to recover from wrap around.
     self._reset_age("newdb", primary)
     # Verify that normal operations can be performed on newdb whose age was
     # reset to a correct value.
     self._basic_sanity_check("clean", {"dbname": "newdb"})
     # Verify that age of newdb on the segment is valid.
     with dbconn.connect(dburl, utility=True) as conn:
         age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
     self.assertTrue(age_newdb > 0)
     PSQL.drop_database(dbname="newdb")
Пример #8
0
    def test_autovacuum_signaling(self):
        """
        Raise the nextXid to oldest_frozenxid + autovacuum_freeze_max_age.
        Run a transaction.
        Ensure that no autovacuum daemon is started.
        """
        dburl = dbconn.DbURL()
        with dbconn.connect(dburl) as conn:
            oldest_xid = int(dbconn.execSQLForSingleton(conn, 'select get_oldest_xid()'))
            autovacuum_freeze_max_age = int(dbconn.execSQLForSingleton(conn, 'show autovacuum_freeze_max_age'))
            autovacuum_xid_limit = xid_sum(oldest_xid, autovacuum_freeze_max_age)
            logger.info('Raising master xid to autovacuum_xid_limit %d' % autovacuum_xid_limit)
            dbconn.execSQLForSingleton(conn, "select spoof_next_xid('%d'::xid)" % autovacuum_xid_limit)

        # A new connection to the postmaster, at this point, will ensure that we roll through
        # the ServerLoop and potentially fork an autovacuum process... if enabled.
        # Burn a transaction to trigger any undesirable behavior that we're disabling.
        with dbconn.connect(dburl) as conn:
            self.assertEqual(1, int(dbconn.execSQLForSingleton(conn, 'select 1')))

        cmd = Command('check for autovacuum',
                      'ps -ef | grep -v grep | grep postgres | grep autovacuum')
        cmd.run()
        self.assertEqual(cmd.get_results().stdout, "", "Seriously? Found a postgres autovacuum process!")

        self._basic_sanity_check('clean')
Пример #9
0
 def template0_wrap_around_on_segment(self, primary):
     """
     Same as template0_wrap_around, but on segment.
     """
     logger.info("template0_wrap_around_on_segment: dbid(%d) %s:%d'" %
                 (primary.dbid, primary.hostname, primary.port))
     self._raise_template0_age(self.WRAP_LIMIT, primary)
     PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
         validateAfter=True)
     sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'"
     # Verify that age of newdb on the segment is negative.
     dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port)
     with dbconn.connect(dburl, utility=True) as conn:
         age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
     self.assertTrue(age_newdb < 0)
     # Reset newdb age so as to recover from wrap around.
     self._reset_age("newdb", primary)
     # Verify that normal operations can be performed on newdb whose age was
     # reset to a correct value.
     self._basic_sanity_check("clean", {"dbname":"newdb"})
     # Verify that age of newdb on the segment is valid.
     with dbconn.connect(dburl, utility=True) as conn:
         age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
     self.assertTrue(age_newdb > 0)
     PSQL.drop_database(dbname="newdb")
Пример #10
0
    def template0_wrap_around(self):
        """
        Raise next xid so that age(template0) suffers a wrap around and
        becomes negative.  Create a new database off template0, which
        also suffers wrap around.  Reset the new db's age.  Sanity
        must succeed on the new db.

        """
        self._raise_template0_age(self.WRAP_LIMIT, self.gparray.master)
        PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
            validateAfter=True)
        sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'"
        dburl = dbconn.DbURL()
        with dbconn.connect(dburl, utility=True) as conn:
            age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
        # Xid wrap-around should cause template0 and newdb's age to be negative.
        self.assertTrue(age_newdb < 0)
        # All xids in newdb are frozen at this point.  Therefore, we
        # can reset its age so that it is not negative.
        self._reset_age("newdb")
        with dbconn.connect(dburl, utility=True) as conn:
            age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
        self.assertTrue(age_newdb > 0)
        # Verify that normal operations can be performed on newdb post recovery
        # from wraparound.
        self._basic_sanity_check("clean", {"dbname":"newdb"})
        logger.info("Sanity succeeded on newdb, dropping it.")
        PSQL.drop_database(dbname="newdb")
Пример #11
0
    def test_autovacuum_signaling_on_segment(self):
        """
        Same as above, but on a segment.
        """
        # connect to the master to build gparray
        primary, _ = self._get_primary_mirror_pair()
        logger.info('Isolated segment %d at %s:%d' % (primary.dbid, primary.hostname, primary.port))
        dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port)

        with dbconn.connect(dburl, utility=True) as conn:
            oldest_xid = int(dbconn.execSQLForSingleton(conn, 'select get_oldest_xid()'))
            autovacuum_freeze_max_age = int(dbconn.execSQLForSingleton(conn, 'show autovacuum_freeze_max_age'))
            autovacuum_xid_limit = xid_sum(oldest_xid, autovacuum_freeze_max_age)
            logger.info('Raising segment xid to autovacuum_xid_limit %d' % autovacuum_xid_limit)
            dbconn.execSQLForSingleton(conn, "select spoof_next_xid('%d'::xid)" % autovacuum_xid_limit)

        # A new connection to the postmaster, at this point, will ensure that we roll through
        # the ServerLoop and potentially fork an autovacuum process... if enabled.
        with dbconn.connect(dburl, utility=True) as conn:
            self.assertEqual(1, int(dbconn.execSQLForSingleton(conn, 'select 1')))

        cmd = Command('check for autovacuum',
                      'ssh %s ps -ef | grep -v grep | grep postgres | grep autovacuum' % primary.hostname)
        cmd.run()
        self.assertEqual(cmd.get_results().stdout, "", "Seriously? Found a postgres autovacuum process!")

        self._basic_sanity_check('clean')
Пример #12
0
    def validate_mid_level_partitions(self, schema_name, table_name):
        partition_level, max_level = None, None
        with dbconn.connect(dbconn.DbURL(dbname=self.database, port=self.port)) as conn:
            parent_schema, parent_table = self.parent_partition_map[(schema_name, table_name)]
            if (parent_schema, parent_table) == (schema_name, table_name):
                return
            try:
                max_level = dbconn.execSQLForSingleton(conn, 
                                                   """SELECT max(partitionlevel)
                                                      FROM pg_partitions
                                                      WHERE tablename='%s'
                                                      AND schemaname='%s'
                                                   """ % (parent_table, parent_schema))
            except Exception as e:
                logger.debug('Unable to get the maximum partition level for table %s: (%s)' % (table_name, str(e)))

            try:
                partition_level = dbconn.execSQLForSingleton(conn,
                                                         """SELECT partitionlevel
                                                            FROM pg_partitions
                                                            WHERE partitiontablename='%s'
                                                            AND partitionschemaname='%s'
                                                         """ % (table_name, schema_name))
            except Exception as e:
                logger.debug('Unable to get the partition level for table %s: (%s)' % (table_name, str(e)))

            if partition_level != max_level:
                logger.error('Partition level of the table = %s, Max partition level = %s' % (partition_level, max_level))
                raise Exception('Mid Level partition %s.%s is not supported by gpreload. Please specify only leaf partitions or parent table name' % (schema_name, table_name))
Пример #13
0
    def execute(self):
        needed_space = 0
        dburl = dbconn.DbURL(dbname=self.dump_database, port=self.segport)
        conn = None
        try:
            conn = dbconn.connect(dburl, utility=True)
            if self.include_dump_tables:
                for dump_table in self.include_dump_tables:
                    needed_space += execSQLForSingleton(conn, "SELECT pg_relation_size('%s')/1024;" % dump_table)
            else:
                needed_space = execSQLForSingleton(conn, "SELECT pg_database_size('%s')/1024;" % self.dump_database)
        finally:
            if conn is not None:
                conn.close()
        if self.compress:
            needed_space = needed_space / COMPRESSION_FACTOR
        
        # get free available space
        stat_res = os.statvfs(self.datadir);
        free_space = (stat_res.f_bavail * stat_res.f_frsize) / 1024

        if free_space == 0 or (free_space - needed_space) / free_space < self.free_space_percent / 100:
            logger.error("Disk space: [Need: %dK, Free %dK]" % (needed_space, free_space))
            raise NotEnoughDiskSpace(free_space, needed_space)
        logger.info("Disk space: [Need: %dK, Free %dK]" % (needed_space, free_space))
Пример #14
0
 def template0_stop_limit_on_segment(self, primary):
     """
     Same as template0_stop_limit, but on segment.
     """
     logger.info("template0_stop_limit_on_segment: dbid(%d) %s:%d'" %
                 (primary.dbid, primary.hostname, primary.port))
     dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port)
     with dbconn.connect(dburl, utility=True) as conn:
         sql = "SHOW xid_stop_limit"
         slimit_guc = int(dbconn.execSQLForSingleton(conn, sql))
     new_limit = xid_sum(slimit_guc, -(10**6))
     # Raise nextXid so that template0 age would cross stop limit.
     self._raise_template0_age(self.STOP_LIMIT, primary)
     # newdb's age crosses stop limit and GPDB stops accepting commands.
     PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
         validateAfter=True)
     logger.info("newdb created off template0")
     # The xid limits in shared memory are only updated at a VACUUM,
     # so run one now.
     PSQL(sql_cmd='VACUUM FREEZE',
          dbname='postgres',
          out_file='vacuum_postgres.out').run(validateAfter=True)
     # Ensure that utility connections to the segment fail with error.
     psql_args = {
         "PGOPTIONS": "-c 'gp_session_role=utility'",
         "host": primary.hostname,
         "port": primary.port
     }
     self._basic_sanity_check("error", psql_args)
     logger.info("Utility connection to dbid(%d) reported stop limit "
                 "error, as expected." % primary.dbid)
     try:
         # Verify that SQL commands from master fail.
         PSQL(sql_cmd="CREATE TABLE test (a int, b int)").run(
             validateAfter=True)
         self.fail("CREATE TABLE succeeded from master, when expecting "
                   "stop limit error on segment.")
     except ExecutionError:
         logger.info("CREATE TABLE failed from master, as expected.")
     # Reduce xid_stop_limit as per the standard procedure.
     self._reduce_stop_limit_guc(primary, new_limit)
     # Vacuum freezing newdb should be suffice to recover.
     PSQL(sql_cmd="VACUUM FREEZE",
          dbname="newdb",
          out_file="vacuum_newdb_wl.out").run(validateAfter=True)
     # Ensure that utility connections to the segment are successful.
     sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'"
     with dbconn.connect(dburl, utility=True) as conn:
         age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
     self.assertTrue(age_newdb > 0)
     # Verify SQL commands from master are successful.
     self._basic_sanity_check("clean")
     self._restore_stop_limit_guc(primary.datadir)
     # Verify SQL commands after restoring xid_stop_limit GUC.
     self._basic_sanity_check("clean")
     PSQL.drop_database(dbname="newdb")
Пример #15
0
 def template0_stop_limit_on_segment(self, primary):
     """
     Same as template0_stop_limit, but on segment.
     """
     logger.info("template0_stop_limit_on_segment: dbid(%d) %s:%d'" %
                 (primary.dbid, primary.hostname, primary.port))
     dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port)
     with dbconn.connect(dburl, utility=True) as conn:
         sql = "SHOW xid_stop_limit"
         slimit_guc = int(dbconn.execSQLForSingleton(conn, sql))
     new_limit = xid_sum(slimit_guc, -(10**6))
     # Raise nextXid so that template0 age would cross stop limit.
     self._raise_template0_age(self.STOP_LIMIT, primary)
     # newdb's age crosses stop limit and GPDB stops accepting commands.
     PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
         validateAfter=True)
     logger.info("newdb created off template0")
     # Ensure that utility connections to the segment fail with error.
     psql_args = {"PGOPTIONS":"-c 'gp_session_role=utility'",
                  "host":primary.hostname,
                  "port":primary.port}
     self._basic_sanity_check("error", psql_args)
     logger.info("Utility connection to dbid(%d) reported stop limit "
                 "error, as expected." % primary.dbid)
     try:
         # Verify that SQL commands from master fail.
         PSQL(sql_cmd="CREATE TABLE test (a int, b int)").run(
             validateAfter=True)
         self.fail("CREATE TABLE succeeded from master, when expecting "
                   "stop limit error on segment.")
     except ExecutionError:
         logger.info("CREATE TABLE failed from master, as expected.")
     # Reduce xid_stop_limit as per the standard procedure.
     self._reduce_stop_limit_guc(primary, new_limit)
     # Vacuum freezing newdb should be suffice to recover.
     PSQL(sql_cmd="VACUUM FREEZE", dbname="newdb",
          out_file="vacuum_newdb_wl.out").run(validateAfter=True)
     # Ensure that utility connections to the segment are successful.
     sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'"
     with dbconn.connect(dburl, utility=True) as conn:
         age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
     self.assertTrue(age_newdb > 0)
     # Verify SQL commands from master are successful.
     self._basic_sanity_check("clean")
     self._restore_stop_limit_guc(primary.datadir)
     # Verify SQL commands after restoring xid_stop_limit GUC.
     self._basic_sanity_check("clean")
     PSQL.drop_database(dbname="newdb")
Пример #16
0
def _get_segment_version(seg):
    try:
        if seg.role == gparray.ROLE_PRIMARY:
            dburl = dbconn.DbURL(hostname=seg.hostname, port=seg.port, dbname="template1")
            conn = dbconn.connect(dburl, utility=True)
            return dbconn.execSQLForSingleton(conn, "select version()")

        if seg.role == gparray.ROLE_MIRROR:
            cmd = base.Command("Try connecting to mirror",
                               "psql -h %s -p %s template1 -c 'select 1'"
                               %(seg.hostname, seg.port))
            cmd.run(validateAfter=False)
            if cmd.results.rc == 0:
                raise RuntimeError("Connection to mirror succeeded unexpectedly")

            stderr = cmd.results.stderr.splitlines()
            for line in stderr:
                match = _version_regex.match(line)
                if match:
                    return match.group(1)

            raise RuntimeError("Unexpected error from mirror connection: %s" % cmd.results.stderr)

        logger.error("Invalid role '%s' for dbid %d", seg.role, seg.dbid)
        return None

    except Exception as ex:
        logger.error("Could not get segment version for dbid %d", seg.dbid, exc_info=ex)
        return None
Пример #17
0
def _get_segment_version(seg):
    try:
        if seg.role == gparray.ROLE_PRIMARY:
            dburl = dbconn.DbURL(hostname=seg.hostname, port=seg.port, dbname="template1")
            conn = dbconn.connect(dburl, utility=True)
            return dbconn.execSQLForSingleton(conn, "select version()")

        if seg.role == gparray.ROLE_MIRROR:
            cmd = base.Command("Try connecting to mirror",
                               "psql -h %s -p %s template1 -c 'select 1'"
                               %(seg.hostname, seg.port))
            cmd.run(validateAfter=False)
            if cmd.results.rc == 0:
                raise RuntimeError("Connection to mirror succeeded unexpectedly")

            stderr = cmd.results.stderr.splitlines()
            for line in stderr:
                match = _version_regex.match(line)
                if match:
                    return match.group(1)

            raise RuntimeError("Unexpected error from mirror connection: %s" % cmd.results.stderr)

        logger.error("Invalid role '%s' for dbid %d", seg.role, seg.dbid)
        return None

    except Exception as ex:
        logger.error("Could not get segment version for dbid %d", seg.dbid, exc_info=ex)
        return None
Пример #18
0
    def template0_stop_limit(self):
        """
        Raise next xid so that age(template0) grows beyond stop limit.
        Create a new database off template0, let GPDB stop accepting
        commands.  Recover GPDB using the documented proceudure.
        Ensure that the new database is sane.

        """
        dburl = dbconn.DbURL()
        with dbconn.connect(dburl, utility=True) as conn:
            sql = "SHOW xid_stop_limit"
            slimit_guc = int(dbconn.execSQLForSingleton(conn, sql))
        new_limit = xid_sum(slimit_guc, -(10**6))
        # Raise nextXid so that template0 age would cross stop limit.
        self._raise_template0_age(self.STOP_LIMIT, self.gparray.master)
        # newdb's age crosses stop limit and GPDB stops accepting commands.
        PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
            validateAfter=True)
        self._basic_sanity_check("error")
        # Reduce xid_stop_limit as per the standard procedure.
        self._reduce_stop_limit_guc(self.gparray.master, new_limit)
        # Vacuum freezing newdb should be suffice to recover.
        PSQL(sql_cmd="VACUUM FREEZE",
             dbname="newdb",
             out_file="vacuum_newdb_stop_master.out").run(validateAfter=True)
        self._basic_sanity_check("clean")
        PSQL.drop_database(dbname="newdb")
        self._restore_stop_limit_guc(self.gparray.master.datadir)
Пример #19
0
def create_int_table(context, table_name, table_type='heap', dbname='testdb'):
    CREATE_TABLE_SQL = None
    NROW = 1000

    table_type = table_type.upper()
    if table_type == 'AO':
        CREATE_TABLE_SQL = 'create table %s WITH(APPENDONLY=TRUE) as select generate_series(1,%d) as c1' % (
            table_name, NROW)
    elif table_type == 'CO':
        CREATE_TABLE_SQL = 'create table %s WITH(APPENDONLY=TRUE, orientation=column) as select generate_series(1, %d) as c1' % (
            table_name, NROW)
    elif table_type == 'HEAP':
        CREATE_TABLE_SQL = 'create table %s as select generate_series(1, %d) as c1' % (
            table_name, NROW)

    if CREATE_TABLE_SQL is None:
        raise Exception('Invalid table type specified')

    SELECT_TABLE_SQL = 'select count(*) from %s' % table_name
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        dbconn.execSQL(conn, CREATE_TABLE_SQL)
        conn.commit()

        result = dbconn.execSQLForSingleton(conn, SELECT_TABLE_SQL)
        if result != NROW:
            raise Exception(
                'Integer table creation was not successful. Expected %d does not match %d'
                % (NROW, result))
Пример #20
0
    def test_client_encoding_can_be_set(self):
        encoding = 'SQL_ASCII'

        with dbconn.connect(self.url, encoding=encoding) as conn:
            actual = dbconn.execSQLForSingleton(conn, 'SHOW client_encoding')

        self.assertEqual(actual, encoding)
Пример #21
0
def create_int_table(context, table_name, table_type='heap', dbname='testdb'):
    CREATE_TABLE_SQL = None
    NROW = 1000

    table_type = table_type.upper()
    if table_type == 'AO':
        CREATE_TABLE_SQL = 'create table %s WITH(APPENDONLY=TRUE) as select generate_series(1,%d) as c1' % (
        table_name, NROW)
    elif table_type == 'CO':
        CREATE_TABLE_SQL = 'create table %s WITH(APPENDONLY=TRUE, orientation=column) as select generate_series(1, %d) as c1' % (
        table_name, NROW)
    elif table_type == 'HEAP':
        CREATE_TABLE_SQL = 'create table %s as select generate_series(1, %d) as c1' % (table_name, NROW)

    if CREATE_TABLE_SQL is None:
        raise Exception('Invalid table type specified')

    SELECT_TABLE_SQL = 'select count(*) from %s' % table_name
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        dbconn.execSQL(conn, CREATE_TABLE_SQL)
        conn.commit()

        result = dbconn.execSQLForSingleton(conn, SELECT_TABLE_SQL)
        if result != NROW:
            raise Exception('Integer table creation was not successful. Expected %d does not match %d' % (NROW, result))
Пример #22
0
def check_count_for_specific_query(dbname, query, nrows):
    NUM_ROWS_QUERY = '%s' % query
    # We want to bubble up the exception so that if table does not exist, the test fails
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        result = dbconn.execSQLForSingleton(conn, NUM_ROWS_QUERY)
    if result != nrows:
        raise Exception('%d rows in query: %s. Expected row count = %d' % (result, query, nrows))
Пример #23
0
    def template0_stop_limit(self):
        """
        Raise next xid so that age(template0) grows beyond stop limit.
        Create a new database off template0, let GPDB stop accepting
        commands.  Recover GPDB using the documented proceudure.
        Ensure that the new database is sane.

        """
        dburl = dbconn.DbURL()
        with dbconn.connect(dburl, utility=True) as conn:
            sql = "SHOW xid_stop_limit"
            slimit_guc = int(dbconn.execSQLForSingleton(conn, sql))
        new_limit = xid_sum(slimit_guc, -(10**6))
        # Raise nextXid so that template0 age would cross stop limit.
        self._raise_template0_age(self.STOP_LIMIT, self.gparray.master)
        # newdb's age crosses stop limit and GPDB stops accepting commands.
        PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
            validateAfter=True)
        self._basic_sanity_check("error")
        # Reduce xid_stop_limit as per the standard procedure.
        self._reduce_stop_limit_guc(self.gparray.master, new_limit)
        # Vacuum freezing newdb should be suffice to recover.
        PSQL(sql_cmd="VACUUM FREEZE",
             dbname="newdb",
             out_file="vacuum_newdb_stop_master.out").run(validateAfter=True)
        self._basic_sanity_check("clean")
        PSQL.drop_database(dbname="newdb")
        self._restore_stop_limit_guc(self.gparray.master.datadir)
Пример #24
0
    def _process_createdb(self, restore_timestamp, restore_db, master_datadir, master_port): 
        conn = None
        try:
            dburl = dbconn.DbURL(port=master_port)
            conn = dbconn.connect(dburl)
            count = execSQLForSingleton(conn, "select count(*) from pg_database where datname='%s';" % restore_db)

            if count == 1:
                logger.info("Dropping database %s" % restore_db)
                try:
                    cursor=conn.cursor()
                    cursor.execute("commit")                          # hack to move drop stmt out of implied transaction
                    cursor.execute("drop database %s" % restore_db)
                    cursor.close()
                except Exception, e:
                    logger.exception("Could not create database %s" % restore_db)
                    raise ExceptionNoStackTraceNeeded('Failed to drop database %s' % restore_db)
                else:
                    logger.info('Dropped database %s' % restore_db)
        finally:
            if conn is not None:
                conn.close()

        createdb_file = os.path.join(master_datadir, DUMP_DIR, restore_timestamp[0:8], "%s%s" % (CREATEDB_PREFIX, restore_timestamp))
        logger.info('Invoking %s' % createdb_file)
        Psql('Invoking schema dump', filename=createdb_file).run(validateAfter=True)
Пример #25
0
    def _reduce_stop_limit_guc(self, segdb, new_slimit):
        """
        Reduce the xid_stop_limit GUC by the specified value.

        @param datadir: PGDATA directory containing postgresql.conf that needs
        to be modified.

        @param new_slimit: New value of xid_stop_limit GUC, less than the
        default value of 10**9.
        """
        for seg in self.gparray.getDbList(True):
            logger.info("Stopping segment %d at %s" % (seg.dbid, seg.datadir))
            cmd = "pg_ctl -D %s stop" % seg.datadir
            Command("stop segment", cmd).run(validateAfter=True)
        logger.info("New xid_stop_limit: %s" % new_slimit)
        cmd = ('echo "xid_stop_limit=%d" >> %s/postgresql.conf' %
               (new_slimit, segdb.datadir))
        Command("revise xid_stop_limit", cmd).run(validateAfter=True)
        logger.info("Starting the cluster")
        cmd = "source $GPHOME/greenplum_path.sh && gpstart -a"
        Command("start cluster", cmd).run(validateAfter=True)
        dburl = dbconn.DbURL(hostname=segdb.hostname, port=segdb.port)
        with dbconn.connect(dburl, utility=True) as conn:
            stop_limit = int(
                dbconn.execSQLForSingleton(conn, "SHOW xid_stop_limit"))
        self.assertEqual(stop_limit, new_slimit,
                         "Failed to set xid_stop_limit")
Пример #26
0
def check_row_count(tablename, dbname, nrows):
    NUM_ROWS_QUERY = 'select count(*) from %s' % tablename
    # We want to bubble up the exception so that if table does not exist, the test fails
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        result = dbconn.execSQLForSingleton(conn, NUM_ROWS_QUERY)
    if result != nrows:
        raise Exception('%d rows in table %s.%s, expected row count = %d' % (result, dbname, tablename, nrows))
Пример #27
0
def check_count_for_specific_query(dbname, query, nrows):
    NUM_ROWS_QUERY = '%s' % query
    # We want to bubble up the exception so that if table does not exist, the test fails
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        result = dbconn.execSQLForSingleton(conn, NUM_ROWS_QUERY)
    if result != nrows:
        raise Exception('%d rows in query: %s. Expected row count = %d' % (result, query, nrows))
Пример #28
0
    def _process_createdb(self, restore_timestamp, restore_db, master_datadir, master_port): 
        conn = None
        try:
            dburl = dbconn.DbURL(port=master_port)
            conn = dbconn.connect(dburl)
            count = execSQLForSingleton(conn, "select count(*) from pg_database where datname='%s';" % restore_db)

            if count == 1:
                logger.info("Dropping database %s" % restore_db)
                try:
                    cursor=conn.cursor()
                    cursor.execute("commit")                          # hack to move drop stmt out of implied transaction
                    cursor.execute("drop database %s" % restore_db)
                    cursor.close()
                except Exception, e:
                    logger.exception("Could not create database %s" % restore_db)
                    raise ExceptionNoStackTraceNeeded('Failed to drop database %s' % restore_db)
                else:
                    logger.info('Dropped database %s' % restore_db)
        finally:
            if conn is not None:
                conn.close()

        createdb_file = os.path.join(master_datadir, DUMP_DIR, restore_timestamp[0:8], "%s%s" % (CREATEDB_PREFIX, restore_timestamp))
        logger.info('Invoking %s' % createdb_file)
        Psql('Invoking schema dump', filename=createdb_file).run(validateAfter=True)
Пример #29
0
    def _reduce_stop_limit_guc(self, segdb, new_slimit):
        """
        Reduce the xid_stop_limit GUC by the specified value.

        @param datadir: PGDATA directory containing postgresql.conf that needs
        to be modified.

        @param new_slimit: New value of xid_stop_limit GUC, less than the
        default value of 10**9.
        """
        for seg in self.gparray.getDbList(True):
           logger.info("Stopping segment %d at %s" % (seg.dbid, seg.datadir))
           cmd = "pg_ctl -D %s stop" % seg.datadir
           Command("stop segment", cmd).run(validateAfter=True)
        logger.info("New xid_stop_limit: %s" % new_slimit)
        cmd = ('echo "xid_stop_limit=%d" >> %s/postgresql.conf' %
               (new_slimit, segdb.datadir))
        Command("revise xid_stop_limit", cmd).run(validateAfter=True)
        logger.info("Starting the cluster")
        cmd = "source $GPHOME/greenplum_path.sh && gpstart -a"
        Command("start cluster", cmd).run(validateAfter=True)
        dburl = dbconn.DbURL(hostname=segdb.hostname, port=segdb.port)
        with dbconn.connect(dburl, utility=True) as conn:
            stop_limit = int(
                dbconn.execSQLForSingleton(conn, "SHOW xid_stop_limit"))
        self.assertEqual(stop_limit, new_slimit, "Failed to set xid_stop_limit")
Пример #30
0
def impl(context, configString, dbname):
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        query = "select datconfig from pg_database where datname in ('%s');" % dbname
        datconfig = dbconn.execSQLForSingleton(conn, query)
    if not datconfig or configString not in datconfig:
        raise Exception("%s is not in the datconfig for database '%s':\n %s" %
                        (configString, dbname, datconfig))
Пример #31
0
    def test_client_encoding_can_be_set(self):
        encoding = 'SQL_ASCII'

        with dbconn.connect(self.url, encoding=encoding) as conn:
            actual = dbconn.execSQLForSingleton(conn, 'SHOW client_encoding')

        self.assertEqual(actual, encoding)
Пример #32
0
def impl(context, dbname, expected_owner):
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        query = "SELECT pg_catalog.pg_get_userbyid(d.datdba) FROM pg_catalog.pg_database d WHERE d.datname = '%s';" % dbname
        actual_owner = dbconn.execSQLForSingleton(conn, query)
    if actual_owner != expected_owner:
        raise Exception(
            "Database %s has owner %s when it should have owner %s" %
            (dbname, actual_owner, expected_owner))
Пример #33
0
def has_expected_status(content, preferred_role, expected_status):
    with dbconn.connect(dbconn.DbURL(dbname="template1"),
                        unsetSearchPath=False) as conn:
        status = dbconn.execSQLForSingleton(
            conn,
            "SELECT status FROM gp_segment_configuration WHERE content = %s AND preferred_role = '%s'"
            % (content, preferred_role))
    return status == expected_status
Пример #34
0
 def get_relfilenode_oid(self, tablename):
     RELFILENODE_OID_QUERY = """ SELECT relfilenode
                                 FROM pg_class
                                 WHERE relname='%s'
                             """ % tablename
     with dbconn.connect(dbconn.DbURL()) as conn:
         relfilenode_oid = dbconn.execSQLForSingleton(conn, RELFILENODE_OID_QUERY)
     return relfilenode_oid
Пример #35
0
def impl(context, tablename, dbname):
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        query = "select count(*) from pg_statistic where starelid='%s'::regclass;" % tablename
        num_tuples = dbconn.execSQLForSingleton(conn, query)
        if num_tuples == 0:
            raise Exception(
                "Expected partition table %s to contain root statistics" %
                tablename)
Пример #36
0
    def test_secure_search_path_not_set(self):

        with dbconn.connect(self.url, unsetSearchPath=False) as conn:
            result = dbconn.execSQLForSingleton(
                conn,
                "SELECT setting FROM pg_settings WHERE name='search_path'")

        self.assertEqual(result, '"$user",public')
Пример #37
0
 def get_oid(self, tablename):
     OID_QUERY = """ SELECT oid
                                 FROM pg_class
                                 WHERE relname='%s'
                             """ % tablename
     with dbconn.connect(dbconn.DbURL()) as conn:
         oid = dbconn.execSQLForSingleton(conn, OID_QUERY)
     return oid
Пример #38
0
    def test_secure_search_path_set(self):

        with dbconn.connect(self.url) as conn:
            result = dbconn.execSQLForSingleton(
                conn,
                "SELECT setting FROM pg_settings WHERE name='search_path'")

        self.assertEqual(result, '')
Пример #39
0
 def get_oid(self, tablename):
     OID_QUERY = """ SELECT oid
                                 FROM pg_class
                                 WHERE relname='%s'
                             """ % tablename
     with dbconn.connect(dbconn.DbURL()) as conn:
         oid = dbconn.execSQLForSingleton(conn, OID_QUERY)
     return oid
Пример #40
0
def execute_sql_singleton(dbname, sql):
    result = None
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        result = dbconn.execSQLForSingleton(conn, sql)

    if result is None:
        raise Exception("error running query: %s" % sql)

    return result
Пример #41
0
 def execute(self):
     try:
         dburl = dbconn.DbURL(port=self.master_port, dbname=self.database)
         conn = dbconn.connect(dburl)
         count = execSQLForSingleton(conn, "select count(*) from pg_class, pg_namespace where pg_class.relname = '%s' and pg_class.relnamespace = pg_namespace.oid and pg_namespace.nspname = '%s'" % (self.table, self.schema))
         return count > 0
     finally:
         if conn is not None:
             conn.close()
Пример #42
0
def execute_sql_singleton(dbname, sql):
    result = None
    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        result = dbconn.execSQLForSingleton(conn, sql)

    if result is None:
        raise Exception("error running query: %s" % sql)

    return result
Пример #43
0
def get_table_oid(context, dbname, schema, tablename):
    OID_SQL = """SELECT c.oid
                 FROM pg_class c, pg_namespace n
                 WHERE c.relnamespace = n.oid AND c.relname = '%s' AND n.nspname = '%s'""" % (tablename, schema)

    with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
        oid = dbconn.execSQLForSingleton(conn, OID_SQL)

    return oid
Пример #44
0
 def get_relfilenode_oid(self, tablename):
     RELFILENODE_OID_QUERY = """ SELECT relfilenode
                                 FROM pg_class
                                 WHERE relname='%s'
                             """ % tablename
     with dbconn.connect(dbconn.DbURL()) as conn:
         relfilenode_oid = dbconn.execSQLForSingleton(
             conn, RELFILENODE_OID_QUERY)
     return relfilenode_oid
Пример #45
0
 def get_redistribute_status(self):
     sql = 'select status from gpexpand.status order by updated desc limit 1'
     dburl = dbconn.DbURL(dbname=self.database)
     conn = dbconn.connect(dburl, encoding='UTF8')
     status = dbconn.execSQLForSingleton(conn, sql)
     if status == 'EXPANSION COMPLETE':
         rc = 0
     else:
         rc = 1
     return rc
Пример #46
0
 def validate_table(self, schema_name, table_name):
     with dbconn.connect(dbconn.DbURL(dbname=self.database, port=self.port)) as conn:
         c = execSQLForSingleton(conn,
                                 """SELECT count(*)
                                    FROM pg_class, pg_namespace
                                    WHERE pg_namespace.nspname = '{schema}'
                                    AND pg_class.relname = '{table}'""".format(schema=schema_name, table=table_name))
         if not c:
             raise ExceptionNoStackTraceNeeded('Table {schema}.{table} does not exist'
                                               .format(schema=schema_name, table=table_name))
Пример #47
0
 def get_redistribute_status(self):
     sql = 'select status from gpexpand.status order by updated desc limit 1'
     dburl = dbconn.DbURL(dbname=self.database)
     conn = dbconn.connect(dburl, encoding='UTF8')
     status = dbconn.execSQLForSingleton(conn, sql)
     if status == 'EXPANSION COMPLETE':
         rc = 0
     else:
         rc = 1
     return rc
Пример #48
0
def must_have_expected_status(content, preferred_role, expected_status):
    with dbconn.connect(dbconn.DbURL(dbname="template1"),
                        unsetSearchPath=False) as conn:
        status = dbconn.execSQLForSingleton(
            conn,
            "SELECT status FROM gp_segment_configuration WHERE content = %s AND preferred_role = '%s'"
            % (content, preferred_role))
    if status != expected_status:
        raise Exception("Expected status for role %s to be %s, but it is %s" %
                        (preferred_role, expected_status, status))
Пример #49
0
 def execute(self):
     conn = None
     try:
         dburl = dbconn.DbURL(port = self.master_port )
         conn = dbconn.connect(dburl)
         count = execSQLForSingleton(conn, "select count(*) from pg_database where datname='%s';" % self.database)
         if count == 0:
             raise ExceptionNoStackTraceNeeded("Database %s does not exist." % self.database)
     finally:
         if conn is not None:
             conn.close()
Пример #50
0
 def execute(self):
     conn = None
     try:
         dburl = dbconn.DbURL(port = self.master_port, dbname = self.database)
         conn = dbconn.connect(dburl)
         count = execSQLForSingleton(conn, "select count(*) from pg_namespace where nspname='%s';" % self.schema)
         if count == 0:
             raise ExceptionNoStackTraceNeeded("Schema %s does not exist in database %s." % (self.schema, self.database))
     finally:
         if conn is not None:
             conn.close()
Пример #51
0
def check_row_count(context, tablename, dbname, nrows):
    NUM_ROWS_QUERY = 'select count(*) from %s' % tablename
    # We want to bubble up the exception so that if table does not exist, the test fails
    if hasattr(context, 'standby_was_activated') and context.standby_was_activated is True:
        dburl = dbconn.DbURL(dbname=dbname, port=context.standby_port, hostname=context.standby_hostname)
    else:
        dburl = dbconn.DbURL(dbname=dbname)

    with dbconn.connect(dburl) as conn:
        result = dbconn.execSQLForSingleton(conn, NUM_ROWS_QUERY)
    if result != nrows:
        raise Exception('%d rows in table %s.%s, expected row count = %d' % (result, dbname, tablename, nrows))
Пример #52
0
 def execute(self):
     try:
         dburl = dbconn.DbURL(port=self.master_port, dbname=self.database)
         conn = dbconn.connect(dburl)
         count = execSQLForSingleton(
             conn,
             "select count(*) from pg_class, pg_namespace where pg_class.relname = '%s' and pg_class.relnamespace = pg_namespace.oid and pg_namespace.nspname = '%s'"
             % (self.table, self.schema))
         return count > 0
     finally:
         if conn is not None:
             conn.close()
Пример #53
0
 def check_indexes(self, schema_name, table_name):
     with dbconn.connect(dbconn.DbURL(dbname=self.database, port=self.port)) as conn:
         c = execSQLForSingleton(conn, """SELECT count(*)
                                          FROM pg_index
                                          WHERE indrelid = (SELECT pg_class.oid
                                                            FROM pg_class, pg_namespace
                                                            WHERE pg_class.relname='{table}' AND pg_namespace.nspname='{schema}')
                                       """.format(table=table_name, schema=schema_name))
         if c != 0:
             if self.interactive:
                 return ask_yesno(None,
                                 'Table {schema}.{table} has indexes. This might slow down table reload. Do you still want to continue ?'
                                 .format(schema=schema_name, table=table_name),
                                 'N')
     return True
Пример #54
0
def impl(context, dbname):

    SLICE_QUERY = """
select count(*) from (select * from
(select * from 
(select * from (select a from t2 group by a) SUBA, (select a from t1 group by a) SUBB) PARTA,
(select * from (select b from t2 group by b) SUBC, (select b from t1 group by b) SUBD) PARTB) SOUPA,
(select * from 
(select * from (select a from t2 group by a) SUBA, (select a from t1 group by a) SUBB) PARTA,
(select * from (select b from t2 group by b) SUBC, (select b from t1 group by b) SUBD) PARTB) SOUPB) FINALA;
    """

    try:
        with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
            context.slice_query_result = dbconn.execSQLForSingleton(conn, SLICE_QUERY)
    except Exception, e:
        context.dispatch_exception = e
Пример #55
0
 def execute(self):
     dburl = dbconn.DbURL(dbname=self.database, port=self.master_port)
     conn = None
     try:
         conn = dbconn.connect(dburl)
         count = execSQLForSingleton(conn, "select count(*) from pg_class, pg_namespace where pg_namespace.nspname = 'gp_toolkit' and pg_class.relnamespace = pg_namespace.oid")
     finally:
         if conn is not None:
             conn.close()
     if count > 0:
         logger.debug("gp_toolkit exists within database %s." % self.database)
         return
     logger.info("gp_toolkit not found. Installing...")
     Psql('Installing gp_toolkit', 
          filename='$GPHOME/share/postgresql/gp_toolkit.sql',
          database=self.database,
          port=self.master_port).run(validateAfter=True)
Пример #56
0
    def hosts_gpsegconf_validation(self):
        """ 
            Validate if the new hosts are added to gp_segment_configuration table 
            Parse the expansion map and populate the datafields into a list
        """
        logger.info("Verifying expanded segments in gp_segment_configuration table ...")

        with open(self.expansion_map_file) as fp:
            replication_port_list = []
            for line in fp:
                fields = line.split(':')
                if len(fields) == 8:
                    replication_port_list.append(fields[7])
                    cmd = """select count(*)
                             from gp_segment_configuration
                             where hostname = '%s'
                             and address = '%s'
                             and port = %s
                             and dbid = %s
                             and content = %s
                             and role = '%s'
                             and replication_port = %s""" % (fields[0], fields[1], fields[2],  fields[4], fields[5], fields[6], fields[7])
                else:
                    cmd = """select count(*)
                             from gp_segment_configuration
                             where hostname = '%s'
                             and address = '%s'
                             and port = %s
                             and dbid = %s
                             and content = %s
                             and role = '%s'""" % (fields[0], fields[1], fields[2], fields[4], fields[5], fields[6])
        
                with dbconn.connect(dbconn.DbURL()) as conn:
                    row = dbconn.execSQLForSingleton(conn, cmd)

                if row != 1:
                    return False
            
        return True
Пример #57
0
    def cleanup_expansion(self):
        """
            Run gpexpand to cleanup the expansion catalog
            @return: True if the cleanup of gpexpand schema suceeded 
                     False otherwise
        """
        logger.info("Running expansion cleanup ...")
        query = "SELECT count(*) FROM information_schema.schemata where schema_name='gpexpand';"

        cmd = Command(name='run gpexpand cleanup',
                      cmdStr='echo -e \"y\n\"  | gpexpand -c -D %s' % self.test_database,
                      ctxt=REMOTE,
                      remoteHost='localhost')
        cmd.run(validateAfter=True)

        with dbconn.connect(dbconn.DbURL(dbname=self.test_database)) as conn:
            try:
                row = dbconn.execSQLForSingleton(conn, query)
            except UnexpectedRowsError, e:
                return False
            
            if row != 0:
                return False