def template0_wrap_around(self): """ Raise next xid so that age(template0) suffers a wrap around and becomes negative. Create a new database off template0, which also suffers wrap around. Reset the new db's age. Sanity must succeed on the new db. """ self._raise_template0_age(self.WRAP_LIMIT, self.gparray.master) PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run( validateAfter=True) sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'" dburl = dbconn.DbURL() with dbconn.connect(dburl, utility=True) as conn: age_newdb = int(dbconn.execSQLForSingleton(conn, sql)) # Xid wrap-around should cause template0 and newdb's age to be negative. self.assertTrue(age_newdb < 0) # All xids in newdb are frozen at this point. Therefore, we # can reset its age so that it is not negative. self._reset_age("newdb") with dbconn.connect(dburl, utility=True) as conn: age_newdb = int(dbconn.execSQLForSingleton(conn, sql)) self.assertTrue(age_newdb > 0) # Verify that normal operations can be performed on newdb post recovery # from wraparound. self._basic_sanity_check("clean", {"dbname":"newdb"}) logger.info("Sanity succeeded on newdb, dropping it.") PSQL.drop_database(dbname="newdb")
def template0_stop_limit(self): """ Raise next xid so that age(template0) grows beyond stop limit. Create a new database off template0, let GPDB stop accepting commands. Recover GPDB using the documented proceudure. Ensure that the new database is sane. """ dburl = dbconn.DbURL() with dbconn.connect(dburl, utility=True) as conn: sql = "SHOW xid_stop_limit" slimit_guc = int(dbconn.execSQLForSingleton(conn, sql)) new_limit = xid_sum(slimit_guc, -(10**6)) # Raise nextXid so that template0 age would cross stop limit. self._raise_template0_age(self.STOP_LIMIT, self.gparray.master) # newdb's age crosses stop limit and GPDB stops accepting commands. PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run( validateAfter=True) self._basic_sanity_check("error") # Reduce xid_stop_limit as per the standard procedure. self._reduce_stop_limit_guc(self.gparray.master, new_limit) # Vacuum freezing newdb should be suffice to recover. PSQL(sql_cmd="VACUUM FREEZE", dbname="newdb", out_file="vacuum_newdb_stop_master.out").run(validateAfter=True) self._basic_sanity_check("clean") PSQL.drop_database(dbname="newdb") self._restore_stop_limit_guc(self.gparray.master.datadir)
def template0_warn_limit(self): """ Raise next xid so that age(template0) grows beyond warn limit. Create a new database from template0, which will inherit age of template0. Ensure that warnings stop when vacuum freeze is run on the new database. """ # Bump up age of template0 to cause warn limit violation. self._raise_template0_age(self.WARN_LIMIT, self.gparray.master) # All is well until we create a new db off template0. self._basic_sanity_check("clean") # Create database newdb off template0. PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run( validateAfter=True) # The xid limits in shared memory are only updated at a VACUUM, # so run one now. PSQL(sql_cmd='VACUUM FREEZE', dbname='postgres', out_file='vacuum_postgres.out').run(validateAfter=True) # newdb is now the oldest database, older than warn limit. self._basic_sanity_check("warn") # Ensure that vacuum freeze on newdb stops the warnings. PSQL(sql_cmd="VACUUM FREEZE", dbname="newdb", out_file="vacuum_newdb_wl_master.out").run(validateAfter=True) self._basic_sanity_check("clean") PSQL.drop_database(dbname="newdb")
def template0_wrap_around_on_segment(self, primary): """ Same as template0_wrap_around, but on segment. """ logger.info("template0_wrap_around_on_segment: dbid(%d) %s:%d'" % (primary.dbid, primary.hostname, primary.port)) self._raise_template0_age(self.WRAP_LIMIT, primary) PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run( validateAfter=True) sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'" # Verify that age of newdb on the segment is negative. dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port) with dbconn.connect(dburl, utility=True) as conn: age_newdb = int(dbconn.execSQLForSingleton(conn, sql)) self.assertTrue(age_newdb < 0) # Reset newdb age so as to recover from wrap around. self._reset_age("newdb", primary) # Verify that normal operations can be performed on newdb whose age was # reset to a correct value. self._basic_sanity_check("clean", {"dbname":"newdb"}) # Verify that age of newdb on the segment is valid. with dbconn.connect(dburl, utility=True) as conn: age_newdb = int(dbconn.execSQLForSingleton(conn, sql)) self.assertTrue(age_newdb > 0) PSQL.drop_database(dbname="newdb")
def template0_wrap_around(self): """ Raise next xid so that age(template0) suffers a wrap around and becomes negative. Create a new database off template0, which also suffers wrap around. Reset the new db's age. Sanity must succeed on the new db. """ self._raise_template0_age(self.WRAP_LIMIT, self.gparray.master) PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run( validateAfter=True) sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'" dburl = dbconn.DbURL() with dbconn.connect(dburl, utility=True) as conn: age_newdb = int(dbconn.execSQLForSingleton(conn, sql)) # Xid wrap-around should cause template0 and newdb's age to be negative. self.assertTrue(age_newdb < 0) # All xids in newdb are frozen at this point. Therefore, we # can reset its age so that it is not negative. self._reset_age("newdb") with dbconn.connect(dburl, utility=True) as conn: age_newdb = int(dbconn.execSQLForSingleton(conn, sql)) self.assertTrue(age_newdb > 0) # Verify that normal operations can be performed on newdb post recovery # from wraparound. self._basic_sanity_check("clean", {"dbname": "newdb"}) logger.info("Sanity succeeded on newdb, dropping it.") PSQL.drop_database(dbname="newdb")
def test_reset_database_exists(self): dbname = 'testresetdb2' PSQL.create_database(dbname) time.sleep(4) PSQL.run_sql_command(sql_cmd="CREATE TABLE testresettable(i int)", dbname=dbname) output = PSQL.run_sql_command(sql_cmd="SELECT * FROM testresettable", dbname=dbname) self.assertIsNotNone(re.search('0 rows', output)) PSQL.reset_database(dbname) out_file = os.path.join( os.path.dirname(inspect.getfile(self.__class__)), 'test_reset_database_exists.out') self.assertFalse(os.path.exists(out_file)) PSQL.run_sql_command(sql_cmd="SELECT * FROM testresettable", dbname=dbname, out_file=out_file) self.assertTrue(os.path.exists(out_file)) # stdout should be none with open(out_file, 'r') as f: output = f.read() self.assertIsNotNone(re.search('relation.*does not exist', output)) os.remove(out_file) self.assertFalse(os.path.exists(out_file)) PSQL.drop_database(dbname) time.sleep(4) self.assertFalse(PSQL.database_exists(dbname))
def template0_warn_limit_on_segment(self, primary): """ Same as template0_warn_limit, but on a segment. """ logger.info("template0_warn_limit_on_segment: dbid(%d) %s:%d'" % (primary.dbid, primary.hostname, primary.port)) # Bump up age of template0 to cause warn limit violation. self._raise_template0_age(self.WARN_LIMIT, primary) # All is well until we create a new db off template0. self._basic_sanity_check("clean") # Create database newdb off template0. PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run( validateAfter=True) logger.info("newdb created off template0") # The xid limits in shared memory are only updated at a VACUUM, # so run one now. PSQL(sql_cmd='VACUUM FREEZE', dbname='postgres', out_file='vacuum_postgres.out').run(validateAfter=True) # newdb is now the oldest database, older than warn limit. self._basic_sanity_check("warn_segment") # Ensure that vacuum freeze on newdb stops the warnings. PSQL(sql_cmd="VACUUM FREEZE", dbname="newdb", out_file="vacuum_newdb_warn_seg.out").run(validateAfter=True) self._basic_sanity_check("clean") PSQL.drop_database(dbname="newdb")
def template0_wrap_around_on_segment(self, primary): """ Same as template0_wrap_around, but on segment. """ logger.info("template0_wrap_around_on_segment: dbid(%d) %s:%d'" % (primary.dbid, primary.hostname, primary.port)) self._raise_template0_age(self.WRAP_LIMIT, primary) PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run( validateAfter=True) sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'" # Verify that age of newdb on the segment is negative. dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port) with dbconn.connect(dburl, utility=True) as conn: age_newdb = int(dbconn.execSQLForSingleton(conn, sql)) self.assertTrue(age_newdb < 0) # Reset newdb age so as to recover from wrap around. self._reset_age("newdb", primary) # Verify that normal operations can be performed on newdb whose age was # reset to a correct value. self._basic_sanity_check("clean", {"dbname": "newdb"}) # Verify that age of newdb on the segment is valid. with dbconn.connect(dburl, utility=True) as conn: age_newdb = int(dbconn.execSQLForSingleton(conn, sql)) self.assertTrue(age_newdb > 0) PSQL.drop_database(dbname="newdb")
def test_reset_database_not_exists(self): dbname = 'testresetdb' PSQL.reset_database(dbname) time.sleep(4) self.assertTrue(PSQL.database_exists(dbname)) PSQL.drop_database(dbname) time.sleep(4) self.assertFalse(PSQL.database_exists(dbname))
def test_create_database(self): dbname = 'testdb' self.assertFalse(PSQL.database_exists(dbname)) PSQL.create_database(dbname) time.sleep(4) self.assertTrue(PSQL.database_exists(dbname)) PSQL.drop_database(dbname) time.sleep(4) self.assertFalse(PSQL.database_exists(dbname))
def template0_stop_limit_on_segment(self, primary): """ Same as template0_stop_limit, but on segment. """ logger.info("template0_stop_limit_on_segment: dbid(%d) %s:%d'" % (primary.dbid, primary.hostname, primary.port)) dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port) with dbconn.connect(dburl, utility=True) as conn: sql = "SHOW xid_stop_limit" slimit_guc = int(dbconn.execSQLForSingleton(conn, sql)) new_limit = xid_sum(slimit_guc, -(10**6)) # Raise nextXid so that template0 age would cross stop limit. self._raise_template0_age(self.STOP_LIMIT, primary) # newdb's age crosses stop limit and GPDB stops accepting commands. PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run( validateAfter=True) logger.info("newdb created off template0") # The xid limits in shared memory are only updated at a VACUUM, # so run one now. PSQL(sql_cmd='VACUUM FREEZE', dbname='postgres', out_file='vacuum_postgres.out').run(validateAfter=True) # Ensure that utility connections to the segment fail with error. psql_args = { "PGOPTIONS": "-c 'gp_session_role=utility'", "host": primary.hostname, "port": primary.port } self._basic_sanity_check("error", psql_args) logger.info("Utility connection to dbid(%d) reported stop limit " "error, as expected." % primary.dbid) try: # Verify that SQL commands from master fail. PSQL(sql_cmd="CREATE TABLE test (a int, b int)").run( validateAfter=True) self.fail("CREATE TABLE succeeded from master, when expecting " "stop limit error on segment.") except ExecutionError: logger.info("CREATE TABLE failed from master, as expected.") # Reduce xid_stop_limit as per the standard procedure. self._reduce_stop_limit_guc(primary, new_limit) # Vacuum freezing newdb should be suffice to recover. PSQL(sql_cmd="VACUUM FREEZE", dbname="newdb", out_file="vacuum_newdb_wl.out").run(validateAfter=True) # Ensure that utility connections to the segment are successful. sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'" with dbconn.connect(dburl, utility=True) as conn: age_newdb = int(dbconn.execSQLForSingleton(conn, sql)) self.assertTrue(age_newdb > 0) # Verify SQL commands from master are successful. self._basic_sanity_check("clean") self._restore_stop_limit_guc(primary.datadir) # Verify SQL commands after restoring xid_stop_limit GUC. self._basic_sanity_check("clean") PSQL.drop_database(dbname="newdb")
def setUpClass(cls): dbs = ['testdb', 'testdupdb', 'testdropdb', 'testresetdb', 'testresetdb2'] for db in dbs: if PSQL.database_exists(db): PSQL.drop_database(db) curr_dir = os.path.dirname(inspect.getfile(cls)) for filename in os.listdir(curr_dir): if fnmatch.fnmatch(filename, "*.out"): os.remove(os.path.join(curr_dir, filename))
def test_drop_database(self): dbname = "testdropdb" self.assertFalse(PSQL.database_exists(dbname)) PSQL.create_database(dbname) time.sleep(4) self.assertTrue(PSQL.database_exists(dbname)) # dropdb PSQL.drop_database(dbname) time.sleep(4) self.assertFalse(PSQL.database_exists(dbname))
def test_create_database_already_exists(self): dbname = 'testdupdb' self.assertFalse(PSQL.database_exists(dbname)) PSQL.create_database(dbname) time.sleep(4) self.assertTrue(PSQL.database_exists(dbname)) with self.assertRaises(PSQLException) as cm: PSQL.create_database(dbname) PSQL.drop_database(dbname) time.sleep(4) self.assertFalse(PSQL.database_exists(dbname))
def setUpClass(cls): dbs = [ 'testdb', 'testdupdb', 'testdropdb', 'testresetdb', 'testresetdb2' ] for db in dbs: if PSQL.database_exists(db): PSQL.drop_database(db) curr_dir = os.path.dirname(inspect.getfile(cls)) for filename in os.listdir(curr_dir): if fnmatch.fnmatch(filename, "*.out"): os.remove(os.path.join(curr_dir, filename))
def template0_stop_limit_on_segment(self, primary): """ Same as template0_stop_limit, but on segment. """ logger.info("template0_stop_limit_on_segment: dbid(%d) %s:%d'" % (primary.dbid, primary.hostname, primary.port)) dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port) with dbconn.connect(dburl, utility=True) as conn: sql = "SHOW xid_stop_limit" slimit_guc = int(dbconn.execSQLForSingleton(conn, sql)) new_limit = xid_sum(slimit_guc, -(10**6)) # Raise nextXid so that template0 age would cross stop limit. self._raise_template0_age(self.STOP_LIMIT, primary) # newdb's age crosses stop limit and GPDB stops accepting commands. PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run( validateAfter=True) logger.info("newdb created off template0") # Ensure that utility connections to the segment fail with error. psql_args = {"PGOPTIONS":"-c 'gp_session_role=utility'", "host":primary.hostname, "port":primary.port} self._basic_sanity_check("error", psql_args) logger.info("Utility connection to dbid(%d) reported stop limit " "error, as expected." % primary.dbid) try: # Verify that SQL commands from master fail. PSQL(sql_cmd="CREATE TABLE test (a int, b int)").run( validateAfter=True) self.fail("CREATE TABLE succeeded from master, when expecting " "stop limit error on segment.") except ExecutionError: logger.info("CREATE TABLE failed from master, as expected.") # Reduce xid_stop_limit as per the standard procedure. self._reduce_stop_limit_guc(primary, new_limit) # Vacuum freezing newdb should be suffice to recover. PSQL(sql_cmd="VACUUM FREEZE", dbname="newdb", out_file="vacuum_newdb_wl.out").run(validateAfter=True) # Ensure that utility connections to the segment are successful. sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'" with dbconn.connect(dburl, utility=True) as conn: age_newdb = int(dbconn.execSQLForSingleton(conn, sql)) self.assertTrue(age_newdb > 0) # Verify SQL commands from master are successful. self._basic_sanity_check("clean") self._restore_stop_limit_guc(primary.datadir) # Verify SQL commands after restoring xid_stop_limit GUC. self._basic_sanity_check("clean") PSQL.drop_database(dbname="newdb")
def template0_warn_limit_on_segment(self, primary): """ Same as template0_warn_limit, but on a segment. """ logger.info("template0_warn_limit_on_segment: dbid(%d) %s:%d'" % (primary.dbid, primary.hostname, primary.port)) # Bump up age of template0 to cause warn limit violation. self._raise_template0_age(self.WARN_LIMIT, primary) # All is well until we create a new db off template0. self._basic_sanity_check("clean") # Create database newdb off template0. PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run( validateAfter=True) logger.info("newdb created off template0") # newdb is now the oldest database, older than warn limit. self._basic_sanity_check("warn_segment") # Ensure that vacuum freeze on newdb stops the warnings. PSQL(sql_cmd="VACUUM FREEZE", dbname="newdb", out_file="vacuum_newdb_warn_seg.out").run(validateAfter=True) self._basic_sanity_check("clean") PSQL.drop_database(dbname="newdb")
def test_reset_database_exists(self): dbname = 'testresetdb2' PSQL.create_database(dbname) time.sleep(4) PSQL.run_sql_command(sql_cmd = "CREATE TABLE testresettable(i int)", dbname = dbname) output = PSQL.run_sql_command(sql_cmd = "SELECT * FROM testresettable", dbname = dbname) self.assertIsNotNone(re.search('0 rows', output)) PSQL.reset_database(dbname) out_file = os.path.join(os.path.dirname(inspect.getfile(self.__class__)),'test_reset_database_exists.out') self.assertFalse(os.path.exists(out_file)) PSQL.run_sql_command(sql_cmd = "SELECT * FROM testresettable", dbname = dbname, out_file = out_file) self.assertTrue(os.path.exists(out_file)) # stdout should be none with open(out_file, 'r') as f: output = f.read() self.assertIsNotNone(re.search('relation.*does not exist', output)) os.remove(out_file) self.assertFalse(os.path.exists(out_file)) PSQL.drop_database(dbname) time.sleep(4) self.assertFalse(PSQL.database_exists(dbname))
def template0_warn_limit(self): """ Raise next xid so that age(template0) grows beyond warn limit. Create a new database from template0, which will inherit age of template0. Ensure that warnings stop when vacuum freeze is run on the new database. """ # Bump up age of template0 to cause warn limit violation. self._raise_template0_age(self.WARN_LIMIT, self.gparray.master) # All is well until we create a new db off template0. self._basic_sanity_check("clean") # Create database newdb off template0. PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run( validateAfter=True) # newdb is now the oldest database, older than warn limit. self._basic_sanity_check("warn") # Ensure that vacuum freeze on newdb stops the warnings. PSQL(sql_cmd="VACUUM FREEZE", dbname="newdb", out_file="vacuum_newdb_wl_master.out").run(validateAfter=True) self._basic_sanity_check("clean") PSQL.drop_database(dbname="newdb")
def test_drop_database_does_not_exist(self): with self.assertRaises(PSQLException) as cm: PSQL.drop_database('testnotexistdatabase')