def impl(context, seg_type, content): if seg_type == "primary": preferred_role = 'p' elif seg_type == "mirror": preferred_role = 'm' else: raise Exception( "Invalid segment type %s (options are primary and mirror)" % seg_type) with dbconn.connect(dbconn.DbURL(dbname="template1"), unsetSearchPath=False) as conn: dbid, hostname = dbconn.execSQLForSingletonRow( conn, "SELECT dbid, hostname FROM gp_segment_configuration WHERE content = %s AND preferred_role = '%s'" % (content, preferred_role)) if not hasattr(context, 'old_hostnames'): context.old_hostnames = {} context.old_hostnames[(content, preferred_role)] = hostname change_hostname(content, preferred_role, 'invalid_host') if not hasattr(context, 'down_segment_dbids'): context.down_segment_dbids = [] context.down_segment_dbids.append(dbid) wait_for_unblocked_transactions(context)
def impl(context, obj, objname, schemaname, dbname): if obj == 'function': cmd_sql = "select exists(select '%s.%s'::regprocedure)" % (schemaname, objname) else: cmd_sql = "select exists(select * from pg_class where relname='%s' and relnamespace=(select oid from pg_namespace where nspname='%s'))" % (objname, schemaname) with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn: exists = dbconn.execSQLForSingletonRow(conn, cmd_sql) if exists[0] is not True: raise Exception("The %s '%s' does not exists in schema '%s' and database '%s' " % (obj, objname, schemaname, dbname) )
def check_table_exists(context, dbname, table_name, table_type=None, host=None, port=0, user=None): with dbconn.connect( dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn: if '.' in table_name: schemaname, tablename = table_name.split('.') SQL_format = """ SELECT c.oid, c.relkind, c.relstorage, c.reloptions FROM pg_class c, pg_namespace n WHERE c.relname = '%s' AND n.nspname = '%s' AND c.relnamespace = n.oid; """ SQL = SQL_format % (escape_string( tablename, conn=conn), escape_string(schemaname, conn=conn)) else: SQL_format = """ SELECT oid, relkind, relstorage, reloptions \ FROM pg_class \ WHERE relname = E'%s';\ """ SQL = SQL_format % (escape_string(table_name, conn=conn)) table_row = None try: table_row = dbconn.execSQLForSingletonRow(conn, SQL) except Exception as e: context.exception = e return False if table_type is None: return True if table_row[2] == 'a': original_table_type = 'ao' elif table_row[2] == 'c': original_table_type = 'co' elif table_row[2] == 'h': original_table_type = 'heap' elif table_row[2] == 'x': original_table_type = 'external' elif table_row[2] == 'v': original_table_type = 'view' else: raise Exception('Unknown table type %s' % table_row[2]) if original_table_type != table_type.strip(): return False return True
def execute(self): dburl = dbconn.DbURL() query = self.SELECT_VERIFICATION_ENTRY % self.token with dbconn.connect(dburl) as conn: try: tuple = dbconn.execSQLForSingletonRow(conn, query) except UnexpectedRowsError, e: if e.actual == 0: raise TokenNotFound(self.token) raise
def _get_host_and_port_for_table(self, tablename): HOST_AND_PORT_QUERY = """ SELECT hostname, port FROM gp_segment_configuration WHERE role = 'p' AND content = (SELECT DISTINCT(gp_segment_id) FROM %s) """ % tablename with dbconn.connect(dbconn.DbURL(dbname=self.dbname)) as conn: host, port = dbconn.execSQLForSingletonRow(conn, HOST_AND_PORT_QUERY) return host, port
def _create_test_db(self): testdb_exists = True with dbconn.connect(dbconn.DbURL()) as conn: row = dbconn.execSQLForSingletonRow(conn, "select count(*) from pg_database where datname='%s'" % self.TEST_DB) if row[0] == 0: testdb_exists = False if not testdb_exists: Command('create a test database', 'createdb %s' % self.TEST_DB).run(validateAfter=True)
def __init__(self, masterDataDir, readFromMasterCatalog, timeout=None, retries=None): """ masterDataDir: if None then we try to find it from the system environment readFromMasterCatalog: if True then we will connect to the master in utility mode and fetch some more data from there (like collation settings) """ if masterDataDir is None: self.__masterDataDir = gp.get_masterdatadir() else: self.__masterDataDir = masterDataDir logger.debug("Obtaining master's port from master data directory") pgconf_dict = pgconf.readfile(self.__masterDataDir + "/postgresql.conf") self.__masterPort = pgconf_dict.int('port') logger.debug("Read from postgresql.conf port=%s" % self.__masterPort) self.__masterMaxConnections = pgconf_dict.int('max_connections') logger.debug("Read from postgresql.conf max_connections=%s" % self.__masterMaxConnections) self.__gpHome = gp.get_gphome() self.__gpVersion = gp.GpVersion.local( 'local GP software version check', self.__gpHome) logger.info("local Greenplum Version: '%s'" % self.__gpVersion) # read collation settings from master if readFromMasterCatalog: dbUrl = dbconn.DbURL(port=self.__masterPort, dbname='template1', timeout=timeout, retries=retries) conn = dbconn.connect(dbUrl, utility=True) (self.__lcCollate, self.__lcMonetary, self.__lcNumeric) = catalog.getCollationSettings(conn) # MPP-13807, read/show the master's database version too self.__pgVersion = dbconn.execSQLForSingletonRow( conn, "select version();")[0] logger.info("master Greenplum Version: '%s'" % self.__pgVersion) conn.close() checkNotNone("lc_collate", self.__lcCollate) checkNotNone("lc_monetary", self.__lcMonetary) checkNotNone("lc_numeric", self.__lcNumeric) else: self.__lcCollate = None self.__lcMonetary = None self.__lcNumeric = None self.__pgVersion = None
def execute(self): logger.info('Checking if filespace %s exists' % self.filespace) dburl = dbconn.DbURL() query = self.SELECT_FILESPACENAME_QUERY % self.filespace logger.debug('Connecting to database') with dbconn.connect(dburl, utility=True) as conn: try: logger.debug('Executing query %s' % query) tuple = dbconn.execSQLForSingletonRow(conn, query) except UnexpectedRowsError, e: if e.actual == 0: return False raise
def check_table_exists(context, dbname, table_name, table_type=None, host=None, port=0, user=None): with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn: if '.' in table_name: schemaname, tablename = table_name.split('.') SQL_format = """ SELECT c.oid, c.relkind, c.relstorage, c.reloptions FROM pg_class c, pg_namespace n WHERE c.relname = '%s' AND n.nspname = '%s' AND c.relnamespace = n.oid; """ SQL = SQL_format % (escape_string(tablename, conn=conn), escape_string(schemaname, conn=conn)) else: SQL_format = """ SELECT oid, relkind, relstorage, reloptions \ FROM pg_class \ WHERE relname = E'%s';\ """ SQL = SQL_format % (escape_string(table_name, conn=conn)) table_row = None try: table_row = dbconn.execSQLForSingletonRow(conn, SQL) except Exception as e: context.exception = e return False if table_type is None: return True if table_row[2] == 'a': original_table_type = 'ao' elif table_row[2] == 'c': original_table_type = 'co' elif table_row[2] == 'h': original_table_type = 'heap' elif table_row[2] == 'x': original_table_type = 'external' elif table_row[2] == 'v': original_table_type = 'view' else: raise Exception('Unknown table type %s' % table_row[2]) if original_table_type != table_type.strip(): return False return True
def __init__(self, masterDataDir, readFromMasterCatalog, timeout=None, retries=None, verbose=True): """ masterDataDir: if None then we try to find it from the system environment readFromMasterCatalog: if True then we will connect to the master in utility mode and fetch some more data from there (like collation settings) """ if masterDataDir is None: self.__masterDataDir = gp.get_masterdatadir() else: self.__masterDataDir = masterDataDir logger.debug("Obtaining master's port from master data directory") pgconf_dict = pgconf.readfile(self.__masterDataDir + "/postgresql.conf") self.__masterPort = pgconf_dict.int('port') logger.debug("Read from postgresql.conf port=%s" % self.__masterPort) self.__masterMaxConnections = pgconf_dict.int('max_connections') logger.debug("Read from postgresql.conf max_connections=%s" % self.__masterMaxConnections) self.__gpHome = gp.get_gphome() self.__gpVersion = gp.GpVersion.local('local GP software version check',self.__gpHome) if verbose: logger.info("local Greenplum Version: '%s'" % self.__gpVersion) # read collation settings from master if readFromMasterCatalog: dbUrl = dbconn.DbURL(port=self.__masterPort, dbname='template1', timeout=timeout, retries=retries) conn = dbconn.connect(dbUrl, utility=True) (self.__lcCollate, self.__lcMonetary, self.__lcNumeric) = catalog.getCollationSettings(conn) # MPP-13807, read/show the master's database version too self.__pgVersion = dbconn.execSQLForSingletonRow(conn, "select version();")[0] logger.info("master Greenplum Version: '%s'" % self.__pgVersion) conn.close() checkNotNone("lc_collate", self.__lcCollate) checkNotNone("lc_monetary", self.__lcMonetary) checkNotNone("lc_numeric", self.__lcNumeric) else: self.__lcCollate = None self.__lcMonetary = None self.__lcNumeric = None self.__pgVersion = None
def _create_expansion_input_file(self): """This code has been taken from system_management utilities test suite. creates a expansion input file""" with dbconn.connect(dbconn.DbURL()) as conn: next_dbid = dbconn.execSQLForSingletonRow(conn, "select max(dbid)+1 \ from pg_catalog.gp_segment_configuration")[0] next_content = dbconn.execSQL(conn, "select max(content)+1 \ from pg_catalog.gp_segment_configuration").fetchall()[0][0] next_pri_port = dbconn.execSQL(conn, "select max(port)+1 \ from pg_catalog.gp_segment_configuration \ where role='p'").fetchall()[0][0] self.primary_host_name = dbconn.execSQL(conn, "select distinct hostname \ from gp_segment_configuration \ where content >= 0 and preferred_role = 'p'").fetchall()[0][0] next_mir_port = dbconn.execSQL(conn, "select max(port)+1 \ from pg_catalog.gp_segment_configuration \ where role='m'").fetchall()[0][0] if next_mir_port == None or next_mir_port == ' ' or next_mir_port == 0: mirroring_on = False else: mirroring_on = True next_pri_replication_port = dbconn.execSQL(conn, "select max(replication_port)+1 \ from pg_catalog.gp_segment_configuration \ where role='p'").fetchall()[0][0] next_mir_replication_port = dbconn.execSQL(conn, "select max(replication_port)+1 \ from pg_catalog.gp_segment_configuration \ where role='m'").fetchall()[0][0] select_mirror = "select distinct hostname \ from gp_segment_configuration \ where content >= 0 and preferred_role = 'm' and hostname != '%s'" % self.primary_host_name mirror_name_row = dbconn.execSQL(conn, select_mirror).fetchall() if mirror_name_row == None or len(mirror_name_row) == 0: self.mirror_host_name = self.primary_host_name else: self.mirror_host_name = mirror_name_row[0][0] self.primary_host_address = socket.getaddrinfo(self.primary_host_name, None)[0][4][0] self.mirror_host_address = socket.getaddrinfo(self.mirror_host_name, None)[0][4][0] with open(self.EXPANSION_INPUT_FILE, 'w') as outfile: for i in range(self.SEGMENTS): pri_datadir = os.path.join(os.getcwd(), 'new_pri_seg%d' % i) mir_datadir = os.path.join(os.getcwd(), 'new_mir_seg%d' % i) temp_str = "%s:%s:%d:%s:%d:%d:%s" % (self.primary_host_name, self.primary_host_address, next_pri_port, pri_datadir, next_dbid, next_content, 'p') if mirroring_on: temp_str = temp_str + ":" + str(next_pri_replication_port) temp_str = temp_str + "\n" outfile.write(temp_str) if mirroring_on: # The content number for mirror is same as the primary segment's content number next_dbid += 1 outfile.write("%s:%s:%d:%s:%d:%d:%s:%s\n" % (self.mirror_host_name, self.mirror_host_address, next_mir_port, mir_datadir, next_dbid, next_content, 'm', str(next_mir_replication_port))) next_mir_port += 1 next_pri_replication_port += 1 next_mir_replication_port += 1 next_pri_port += 1 next_dbid += 1 next_content += 1
def _create_expansion_input_file(self): """This code has been taken from system_management utilities test suite. creates a expansion input file""" with dbconn.connect(dbconn.DbURL()) as conn: next_dbid = dbconn.execSQLForSingletonRow( conn, "select max(dbid)+1 \ from pg_catalog.gp_segment_configuration" )[0] next_content = dbconn.execSQL( conn, "select max(content)+1 \ from pg_catalog.gp_segment_configuration" ).fetchall()[0][0] next_pri_port = dbconn.execSQL( conn, "select max(port)+1 \ from pg_catalog.gp_segment_configuration \ where role='p'").fetchall()[0][0] self.primary_host_name = dbconn.execSQL( conn, "select distinct hostname \ from gp_segment_configuration \ where content >= 0 and preferred_role = 'p'" ).fetchall()[0][0] next_mir_port = dbconn.execSQL( conn, "select max(port)+1 \ from pg_catalog.gp_segment_configuration \ where role='m'").fetchall()[0][0] if next_mir_port == None or next_mir_port == ' ' or next_mir_port == 0: mirroring_on = False else: mirroring_on = True next_pri_replication_port = dbconn.execSQL( conn, "select max(replication_port)+1 \ from pg_catalog.gp_segment_configuration \ where role='p'" ).fetchall()[0][0] next_mir_replication_port = dbconn.execSQL( conn, "select max(replication_port)+1 \ from pg_catalog.gp_segment_configuration \ where role='m'" ).fetchall()[0][0] select_mirror = "select distinct hostname \ from gp_segment_configuration \ where content >= 0 and preferred_role = 'm' and hostname != '%s'" % self.primary_host_name mirror_name_row = dbconn.execSQL(conn, select_mirror).fetchall() if mirror_name_row == None or len(mirror_name_row) == 0: self.mirror_host_name = self.primary_host_name else: self.mirror_host_name = mirror_name_row[0][0] self.primary_host_address = socket.getaddrinfo( self.primary_host_name, None)[0][4][0] self.mirror_host_address = socket.getaddrinfo( self.mirror_host_name, None)[0][4][0] with open(self.EXPANSION_INPUT_FILE, 'w') as outfile: for i in range(self.SEGMENTS): pri_datadir = os.path.join(os.getcwd(), 'new_pri_seg%d' % i) mir_datadir = os.path.join(os.getcwd(), 'new_mir_seg%d' % i) temp_str = "%s:%s:%d:%s:%d:%d:%s" % ( self.primary_host_name, self.primary_host_address, next_pri_port, pri_datadir, next_dbid, next_content, 'p') if mirroring_on: temp_str = temp_str + ":" + str( next_pri_replication_port) temp_str = temp_str + "\n" outfile.write(temp_str) if mirroring_on: # The content number for mirror is same as the primary segment's content number next_dbid += 1 outfile.write("%s:%s:%d:%s:%d:%d:%s:%s\n" % (self.mirror_host_name, self.mirror_host_address, next_mir_port, mir_datadir, next_dbid, next_content, 'm', str(next_mir_replication_port))) next_mir_port += 1 next_pri_replication_port += 1 next_mir_replication_port += 1 next_pri_port += 1 next_dbid += 1 next_content += 1