def test00_pg_hba_conf_file(self): os.environ[self.GP_COMMAND_FAULT_POINT] = 'gpexpand tar segment template' cmd = Command(name='run gpexpand', cmdStr='gpexpand -i %s' % (self.EXPANSION_INPUT_FILE)) with self.assertRaisesRegexp(ExecutionError, 'Fault Injection'): cmd.run(validateAfter=True) #Read from the pg_hba.conf file and ensure that #The address of the new hosts is present. cmd = Command(name='get the temp pg_hba.conf file', cmdStr="ls %s" % os.path.join(os.path.dirname(self.MASTER_DATA_DIRECTORY), 'gpexpand*', 'pg_hba.conf')) cmd.run(validateAfter=True) results = cmd.get_results() temp_pg_hba_conf = results.stdout.strip() actual_values = set() expected_values = set([self.primary_host_address, self.mirror_host_address]) with open(temp_pg_hba_conf) as f: for line in f: if line.strip() == '# %s' % self.primary_host_name or\ line.strip() == '# %s' % self.mirror_host_name: address = f.next().strip().split()[3] address = address[:address.rfind('/')] actual_values.add(address) self.assertEqual(actual_values, expected_values) GpStart(name='start the database in master only mode', masterOnly=True).run(validateAfter=True) Command(name='rollback the expansion', cmdStr='gpexpand -r').run(validateAfter=True) GpStart(name='start the database').run(validateAfter=True)
def start_database(self): logger.info('Starting Greenplum Database') cmd = GpStart('Start Greenplum Database') cmd.run() if cmd.get_results().rc != 0: logger.error('Failed to start Greenplum Database.') cmd.validate()
def start_master_only(self): logger.info('Starting Greenplum Database in master only mode') cmd = GpStart('Start Greenplum Database in master only mode', masterOnly=True) cmd.run() if cmd.get_results().rc != 0: logger.error( 'Failed to start Greenplum Database in master only mode.') cmd.validate()
def start_master_only(self): logger.info('Starting Greenplum Database in master only mode') cmd = GpStart('Start Greenplum Database in master only mode', masterOnly=True) cmd.run() if cmd.get_results().rc != 0: logger.error('Failed to start Greenplum Database in master only mode.') cmd.validate()
def test_start_gpdb_with_high_transaction_id(self): """ @description GPDB hang after high transaction id @created 2013-04-18 00:00:00 @modified 2013-04-18 00:00:00 @tags transaction MPP-17302 MPP-17323 MPP-17325 MPP-18462 MPP-18463 schedule_transaction @note This requires that both primary and mirror to reset xlog. Repro step from Hitoshi: gpstop -a pg_resetxlog -x 100000000 /data/haradh1/gpdata/d/gpseg0 dd if=/dev/zero of=/data/haradh1/gpdata/d/gpseg0/pg_clog/0017 oflag=append conv=notrunc bs=1048576 count=1 cp /data/haradh1/gpdata/d/gpseg0/pg_clog/0017 /data/haradh1/gpdata/d/gpseg0/pg_distributed/02FA gpstart -a """ # @note: need a class to get GPDB configuration, need to get primary/mirror segment location sqlcmd = "select fselocation from gp_segment_configuration, pg_filespace_entry where dbid=fsedbid and content=0" with dbconn.connect(dbconn.DbURL()) as conn: segments = dbconn.execSQL(conn, sqlcmd) # @note: Issue with self.run_gpstop, hard-coded remoteHost to mdw # @note: upgrade model uses a series of gpstop and gpstart command, need helper classes cmd = GpStop("gpstop") cmd.run(validateAfter=True) for segment in segments: cmd = Command(name="reset xlog", cmdStr="echo yes | pg_resetxlog -f -x 100000000 %s" % segment[0]) cmd.run(validateAfter=True) xlogfile = local_path('xlog_file') # @todo: able to copy the xlogfile remotely shutil.copyfile(xlogfile, "%s/pg_clog/0017" % segment[0]) shutil.copyfile(xlogfile, "%s/pg_distributedlog/02FA" % segment[0]) cmd = GpStart("gpstart") cmd.run(validateAfter=True)