def __init__(self, name): self.name = name self.path = tempfile.mkdtemp() self.dbname = 'tablespace_db_%s' % name self.table_counter = 0 self.initial_data = None gparray = GpArray.initFromCatalog(dbconn.DbURL()) for host in gparray.getHostList(): run_cmd('ssh %s mkdir -p %s' % (pipes.quote(host), pipes.quote(self.path))) conn = dbconn.connect(dbconn.DbURL(), unsetSearchPath=False) dbconn.execSQL( conn, "CREATE TABLESPACE %s LOCATION '%s'" % (self.name, self.path)) dbconn.execSQL( conn, "CREATE DATABASE %s TABLESPACE %s" % (self.dbname, self.name)) conn.close() conn = dbconn.connect(dbconn.DbURL(dbname=self.dbname), unsetSearchPath=False) dbconn.execSQL(conn, "CREATE TABLE tbl (i int) DISTRIBUTED RANDOMLY") dbconn.execSQL(conn, "INSERT INTO tbl VALUES (GENERATE_SERIES(0, 25))") # save the distributed data for later verification self.initial_data = dbconn.query( conn, "SELECT gp_segment_id, i FROM tbl").fetchall() conn.close()
def cleanup(self): with dbconn.connect(dbconn.DbURL(dbname="postgres")) as conn: db = pg.DB(conn) db.query("DROP DATABASE IF EXISTS %s" % self.dbname) db.query("DROP TABLESPACE IF EXISTS %s" % self.name) # Without synchronous_commit = 'remote_apply' introduced in 9.6, there # is no guarantee that the mirrors have removed their tablespace # directories by the time the DROP TABLESPACE command returns. # We need those directories to no longer be in use by the mirrors # before removing them below. _checkpoint_and_wait_for_replication_replay(db) gparray = GpArray.initFromCatalog(dbconn.DbURL()) for host in gparray.getHostList(): run_cmd('ssh %s rm -rf %s' % (pipes.quote(host), pipes.quote(self.path)))
def cleanup(self): with dbconn.connect(dbconn.DbURL(dbname="postgres"), unsetSearchPath=False) as conn: db = pg.DB(conn) db.query("DROP DATABASE IF EXISTS %s" % self.dbname) db.query("DROP TABLESPACE IF EXISTS %s" % self.name) # Without synchronous_commit = 'remote_apply' introduced in 9.6, there # is no guarantee that the mirrors have removed their tablespace # directories by the time the DROP TABLESPACE command returns. # We need those directories to no longer be in use by the mirrors # before removing them below. _checkpoint_and_wait_for_replication_replay(db) gparray = GpArray.initFromCatalog(dbconn.DbURL()) for host in gparray.getHostList(): run_cmd('ssh %s rm -rf %s' % (pipes.quote(host), pipes.quote(self.path)))
def __init__(self, name): self.name = name self.path = tempfile.mkdtemp() self.dbname = 'tablespace_db_%s' % name self.table_counter = 0 self.initial_data = None gparray = GpArray.initFromCatalog(dbconn.DbURL()) for host in gparray.getHostList(): run_cmd('ssh %s mkdir -p %s' % (pipes.quote(host), pipes.quote(self.path))) with dbconn.connect(dbconn.DbURL()) as conn: db = pg.DB(conn) db.query("CREATE TABLESPACE %s LOCATION '%s'" % (self.name, self.path)) db.query("CREATE DATABASE %s TABLESPACE %s" % (self.dbname, self.name)) with dbconn.connect(dbconn.DbURL(dbname=self.dbname)) as conn: db = pg.DB(conn) db.query("CREATE TABLE tbl (i int) DISTRIBUTED RANDOMLY") db.query("INSERT INTO tbl VALUES (GENERATE_SERIES(0, 25))") # save the distributed data for later verification self.initial_data = db.query("SELECT gp_segment_id, i FROM tbl").getresult()