def validate_attempt(self, pid): """ Checks if we can kill the process """ command = self.examine_process(pid) critical_process_prefix = ['postgres', gp.get_gphome(), dereference_symlink(gp.get_gphome())] for prefix in critical_process_prefix: if command.startswith(prefix): raise KillError('process %s may not be killed' % pid) if not command.startswith('python ' + gp.get_gphome()): raise KillError('process %s ignored by gpkill as it is not a greenplum process' % pid)
def setUp(self): self.subject = gpsegstart self.subject.logger = Mock( spec=['log', 'warn', 'info', 'debug', 'error', 'warning', 'fatal', 'warning_to_file_only']) local_version = gp.GpVersion.local('local GP software version check', gp.get_gphome()) self.failing_data_dir = "/doesnt/exist/datadirs/dbfast_mirror1/demoDataDir0" self.args_list = ["gpsegstart.py", "-V", local_version, "-n", "3", "--era", "5c105ee373d42194_180105120208", "-t", "600", "-p", "pickled transition data", "-C", "en_US.utf-8:en_US.utf-8:en_US.utf-8", "-D", "2|0|p|p|s|u|aspen|aspen|25432|25438|/doesnt/exist/datadirs/dbfast1/demoDataDir0||", "-D", "5|0|m|m|s|u|aspen|aspen|25435|25441|" + self.failing_data_dir + "||", "-D", "3|1|p|p|s|u|aspen|aspen|25433|25439|/doesnt/exist/datadirs/dbfast2/demoDataDir1||", "-D", "6|1|m|m|s|u|aspen|aspen|25436|25442|/doesnt/exist/datadirs/dbfast_mirror2/demoDataDir1||", "-D", "4|2|p|p|s|u|aspen|aspen|25434|25440|/doesnt/exist/datadirs/dbfast3/demoDataDir2||", "-D", "7|2|m|m|s|u|aspen|aspen|25437|25443|/doesnt/exist/datadirs/dbfast_mirror3/demoDataDir2||", ] self.apply_patches([ patch('os.path.isdir'), patch('os.path.exists'), patch('os.kill'), patch('gpsegstart.gp.recovery_startup', return_value=None), patch('gpsegstart.base64.urlsafe_b64decode'), patch('gpsegstart.pickle.loads', return_value="random string"), patch('gpsegstart.base.WorkerPool'), patch('gpsegstart.gp.read_postmaster_pidfile', return_value=111), patch('gpsegstart.PgControlData.run') ]) self.mock_workerpool = self.get_mock_from_apply_patch('WorkerPool') self.mock_pgcontroldata_run = self.get_mock_from_apply_patch('run')
def setUp(self): self.subject = gpsegstart self.subject.logger = Mock( spec=['log', 'warn', 'info', 'debug', 'error', 'warning', 'fatal', 'warning_to_file_only']) local_version = gp.GpVersion.local('local GP software version check', gp.get_gphome()) self.failing_data_dir = "/doesnt/exist/datadirs/dbfast_mirror1/demoDataDir0" self.args_list = ["gpsegstart.py", "-V", local_version, "-n", "3", "--era", "5c105ee373d42194_180105120208", "-t", "600", "-p", "pickled transition data", "-D", "2|0|p|p|s|u|aspen|aspen|25432|/doesnt/exist/datadirs/dbfast1/demoDataDir0", "-D", "5|0|m|m|s|u|aspen|aspen|25435|" + self.failing_data_dir , "-D", "3|1|p|p|s|u|aspen|aspen|25433|/doesnt/exist/datadirs/dbfast2/demoDataDir1", "-D", "6|1|m|m|s|u|aspen|aspen|25436|/doesnt/exist/datadirs/dbfast_mirror2/demoDataDir1", "-D", "4|2|p|p|s|u|aspen|aspen|25434|/doesnt/exist/datadirs/dbfast3/demoDataDir2", "-D", "7|2|m|m|s|u|aspen|aspen|25437|/doesnt/exist/datadirs/dbfast_mirror3/demoDataDir2", ] self.apply_patches([ patch('os.path.isdir'), patch('os.path.exists'), patch('os.kill'), patch('gpsegstart.gp.recovery_startup', return_value=None), patch('gpsegstart.base64.urlsafe_b64decode'), patch('gpsegstart.pickle.loads', return_value="random string"), patch('gpsegstart.base.WorkerPool'), patch('gpsegstart.gp.read_postmaster_pidfile', return_value=111), patch('gpsegstart.PgControlData.run') ]) self.mock_workerpool = self.get_mock_from_apply_patch('WorkerPool')
def __init__(self, masterDataDir, readFromMasterCatalog, timeout=None, retries=None): """ masterDataDir: if None then we try to find it from the system environment readFromMasterCatalog: if True then we will connect to the master in utility mode and fetch some more data from there (like collation settings) """ if masterDataDir is None: self.__masterDataDir = gp.get_masterdatadir() else: self.__masterDataDir = masterDataDir logger.debug("Obtaining master's port from master data directory") pgconf_dict = pgconf.readfile(self.__masterDataDir + "/postgresql.conf") self.__masterPort = pgconf_dict.int('port') logger.debug("Read from postgresql.conf port=%s" % self.__masterPort) self.__masterMaxConnections = pgconf_dict.int('max_connections') logger.debug("Read from postgresql.conf max_connections=%s" % self.__masterMaxConnections) self.__gpHome = gp.get_gphome() self.__gpVersion = gp.GpVersion.local( 'local GP software version check', self.__gpHome) logger.info("local Greenplum Version: '%s'" % self.__gpVersion) # read collation settings from master if readFromMasterCatalog: dbUrl = dbconn.DbURL(port=self.__masterPort, dbname='template1', timeout=timeout, retries=retries) conn = dbconn.connect(dbUrl, utility=True) (self.__lcCollate, self.__lcMonetary, self.__lcNumeric) = catalog.getCollationSettings(conn) # MPP-13807, read/show the master's database version too self.__pgVersion = dbconn.execSQLForSingletonRow( conn, "select version();")[0] logger.info("master Greenplum Version: '%s'" % self.__pgVersion) conn.close() checkNotNone("lc_collate", self.__lcCollate) checkNotNone("lc_monetary", self.__lcMonetary) checkNotNone("lc_numeric", self.__lcNumeric) else: self.__lcCollate = None self.__lcMonetary = None self.__lcNumeric = None self.__pgVersion = None
def __init__(self, coordinatorDataDir, readFromCoordinatorCatalog, timeout=None, retries=None, verbose=True): """ coordinatorDataDir: if None then we try to find it from the system environment readFromCoordinatorCatalog: if True then we will connect to the coordinator in utility mode and fetch some more data from there (like collation settings) """ if coordinatorDataDir is None: self.__coordinatorDataDir = gp.get_coordinatordatadir() else: self.__coordinatorDataDir = coordinatorDataDir logger.debug( "Obtaining coordinator's port from coordinator data directory") pgconf_dict = pgconf.readfile(self.__coordinatorDataDir + "/postgresql.conf") self.__coordinatorPort = pgconf_dict.int('port') logger.debug("Read from postgresql.conf port=%s" % self.__coordinatorPort) self.__coordinatorMaxConnections = pgconf_dict.int('max_connections') logger.debug("Read from postgresql.conf max_connections=%s" % self.__coordinatorMaxConnections) self.__gpHome = gp.get_gphome() self.__gpVersion = gp.GpVersion.local( 'local GP software version check', self.__gpHome) if verbose: logger.info("local Greenplum Version: '%s'" % self.__gpVersion) # read collation settings from coordinator if readFromCoordinatorCatalog: dbUrl = dbconn.DbURL(port=self.__coordinatorPort, dbname='template1', timeout=timeout, retries=retries) conn = dbconn.connect(dbUrl, utility=True) # MPP-13807, read/show the coordinator's database version too self.__pgVersion = dbconn.queryRow(conn, "select version();")[0] logger.info("coordinator Greenplum Version: '%s'" % self.__pgVersion) conn.close() else: self.__pgVersion = None
def __init__(self, masterDataDir, readFromMasterCatalog, timeout=None, retries=None, verbose=True): """ masterDataDir: if None then we try to find it from the system environment readFromMasterCatalog: if True then we will connect to the master in utility mode and fetch some more data from there (like collation settings) """ if masterDataDir is None: self.__masterDataDir = gp.get_masterdatadir() else: self.__masterDataDir = masterDataDir logger.debug("Obtaining master's port from master data directory") pgconf_dict = pgconf.readfile(self.__masterDataDir + "/postgresql.conf") self.__masterPort = pgconf_dict.int('port') logger.debug("Read from postgresql.conf port=%s" % self.__masterPort) self.__masterMaxConnections = pgconf_dict.int('max_connections') logger.debug("Read from postgresql.conf max_connections=%s" % self.__masterMaxConnections) self.__gpHome = gp.get_gphome() self.__gpVersion = gp.GpVersion.local('local GP software version check',self.__gpHome) if verbose: logger.info("local Greenplum Version: '%s'" % self.__gpVersion) # read collation settings from master if readFromMasterCatalog: dbUrl = dbconn.DbURL(port=self.__masterPort, dbname='template1', timeout=timeout, retries=retries) conn = dbconn.connect(dbUrl, utility=True) (self.__lcCollate, self.__lcMonetary, self.__lcNumeric) = catalog.getCollationSettings(conn) # MPP-13807, read/show the master's database version too self.__pgVersion = dbconn.execSQLForSingletonRow(conn, "select version();")[0] logger.info("master Greenplum Version: '%s'" % self.__pgVersion) conn.close() checkNotNone("lc_collate", self.__lcCollate) checkNotNone("lc_monetary", self.__lcMonetary) checkNotNone("lc_numeric", self.__lcNumeric) else: self.__lcCollate = None self.__lcMonetary = None self.__lcNumeric = None self.__pgVersion = None
os_string = '' if dist.lower() == 'redhat': os_string += 'rhel' elif dist.lower() == 'suse': os_string += 'suse' os_string += major_release return os_string OS = get_os() ARCH = platform.machine() # AK: use dereference_symlink when mucking with RPM database for the same reason # it's used in the gppylib.operations.package. For more info, see the function definition. GPHOME = dereference_symlink(gp.get_gphome()) ARCHIVE_PATH = os.path.join(GPHOME, 'share/packages/archive') RPM_DATABASE = os.path.join(GPHOME, 'share/packages/database') GPPKG_EXTENSION = ".gppkg" SCRATCH_SPACE = os.path.join(tempfile.gettempdir(), getpass.getuser()) GPDB_VERSION = '.'.join([str(n) for n in MAIN_VERSION[:2]]) MASTER_PORT = os.getenv("PGPORT") def skipIfNoStandby(): """ A decorator which skips a unit test if a standby is not already present in the cluster. """ standby = get_host_list()[0] if standby is None: