def CheckBinaryVersion(self): ''' Validate that the correct binary is installed in all segments ''' logger.info('Checking Segment binary version') hosts = self.Select("select distinct hostname from gp_segment_configuration"); masterversion = self.getversion(self.newhome, self.newenv) cmdStr = '%s/bin/pg_ctl --hawq-version' % self.newhome for uh in hosts: cmd = base.Command(uh, cmdStr, base.REMOTE, uh) self.pool.addCommand(cmd) self.pool.join() items = self.pool.getCompletedItems() for i in items: if i.results.rc: logger.error("error on host %s with error: %s" % (i.remoteHost, i.results.stderr)) raise UpgradeError('Cannot verify segment GPDB binary') if not i.results.stdout: logger.error("could not find version string from host %s with command: %s" % (i.remoteHost, cmdStr)) raise UpgradeError('Cannot verify segment GPDB binary') version_string = i.results.stdout.strip() if version_string != masterversion: logger.error("version string on host %s: '%s' does not match expected: '%s'" % (i.remoteHost, version_string, masterversion)) raise UpgradeError('Master/Segment binary mismatch')
def _get_segment_version(seg): try: if seg.role == gparray.ROLE_PRIMARY: dburl = dbconn.DbURL(hostname=seg.hostname, port=seg.port, dbname="template1") conn = dbconn.connect(dburl, utility=True) return dbconn.execSQLForSingleton(conn, "select version()") if seg.role == gparray.ROLE_MIRROR: cmd = base.Command("Try connecting to mirror", "psql -h %s -p %s template1 -c 'select 1'" %(seg.hostname, seg.port)) cmd.run(validateAfter=False) if cmd.results.rc == 0: raise RuntimeError("Connection to mirror succeeded unexpectedly") stderr = cmd.results.stderr.splitlines() for line in stderr: match = _version_regex.match(line) if match: return match.group(1) raise RuntimeError("Unexpected error from mirror connection: %s" % cmd.results.stderr) logger.error("Invalid role '%s' for dbid %d", seg.role, seg.dbid) return None except Exception as ex: logger.error("Could not get segment version for dbid %d", seg.dbid, exc_info=ex) return None
def _get_segment_status(segment): cmd = base.Command( 'pg_isready for segment', "PGOPTIONS=\"-c gp_role=utility\" pg_isready -q -h %s -p %d -d %s" % (segment.hostname, segment.port, gp.PGDATABASE_FOR_COMMON_USE)) cmd.run() rc = cmd.get_return_code() if rc == PQPING_OK: if segment.role == gparray.ROLE_PRIMARY: return 'Up' elif segment.role == gparray.ROLE_MIRROR: return 'Acting as Primary' elif rc == PQPING_REJECT: return 'Rejecting Connections' elif rc == PQPING_NO_RESPONSE: return 'Down' elif rc == PQPING_MIRROR_READY: if segment.role == gparray.ROLE_PRIMARY: return 'Acting as Mirror' elif segment.role == gparray.ROLE_MIRROR: return 'Up' return None
def dereference_remote_symlink(self, datadir, host): cmdStr = """python -c 'import os; print os.path.realpath("%s")'""" % datadir cmd = base.Command('dereference a symlink on a remote host', cmdStr=cmdStr, ctxt=base.REMOTE, remoteHost=host) cmd.run() results = cmd.get_results() if results.rc != 0: self.__logger.warning('Unable to determine if %s is symlink. Assuming it is not symlink' % (datadir)) return datadir return results.stdout.strip()
def remove_postmaster_pid_from_remotehost(self, host, datadir): cmd = base.Command(name = 'remove the postmaster.pid file', cmdStr = 'rm -f %s/postmaster.pid' % datadir, ctxt=gp.REMOTE, remoteHost = host) cmd.run() return_code = cmd.get_return_code() if return_code != 0: raise ExecutionError("Failed while trying to remove postmaster.pid.", cmd)
def __getProgressAndRemoveCmds(self, progressFile, targetSegmentDbId, targetHostname): progressCmd = None if self.__progressMode != GpMirrorListToBuild.Progress.NONE: progressCmd = GpMirrorListToBuild.ProgressCommand("tail the last line of the file", "set -o pipefail; touch -a {0}; tail -1 {0} | tr '\\r' '\\n' | tail -1".format( pipes.quote(progressFile)), targetSegmentDbId, progressFile, ctxt=base.REMOTE, remoteHost=targetHostname) removeCmd = base.Command("remove file", "rm -f %s" % pipes.quote(progressFile), ctxt=base.REMOTE, remoteHost=targetHostname) return progressCmd, removeCmd
def _get_segment_status(segment): cmd = base.Command('pg_isready for segment', "pg_isready -q -h %s -p %d" % (segment.hostname, segment.port)) cmd.run() rc = cmd.get_return_code() if rc == PQPING_OK: if segment.role == gparray.ROLE_PRIMARY: return 'Up' elif segment.role == gparray.ROLE_MIRROR: return 'Acting as Primary' elif rc == PQPING_REJECT: return 'Rejecting Connections' elif rc == PQPING_NO_RESPONSE: return 'Down' elif rc == PQPING_MIRROR_READY: if segment.role == gparray.ROLE_PRIMARY: return 'Acting as Mirror' elif segment.role == gparray.ROLE_MIRROR: return 'Up' return None
def __copySegmentDirectories(self, gpEnv, gpArray, directives): """ directives should be composed of GpCopySegmentDirectoryDirective values """ if len(directives) == 0: return srcSegments = [] destSegments = [] isTargetReusedLocation = [] timeStamp = datetime.datetime.today().strftime('%Y%m%d_%H%M%S') for directive in directives: srcSegment = directive.getSrcSegment() destSegment = directive.getDestSegment() destSegment.primaryHostname = srcSegment.getSegmentHostName() destSegment.primarySegmentPort = srcSegment.getSegmentPort() destSegment.progressFile = '%s/pg_basebackup.%s.dbid%s.out' % ( gplog.get_logger_dir(), timeStamp, destSegment.getSegmentDbId()) srcSegments.append(srcSegment) destSegments.append(destSegment) isTargetReusedLocation.append(directive.isTargetReusedLocation()) destSegmentByHost = GpArray.getSegmentsByHostName(destSegments) newSegmentInfo = gp.ConfigureNewSegment.buildSegmentInfoForNewSegment( destSegments, isTargetReusedLocation) def createConfigureNewSegmentCommand(hostName, cmdLabel, validationOnly): segmentInfo = newSegmentInfo[hostName] checkNotNone("segmentInfo for %s" % hostName, segmentInfo) return gp.ConfigureNewSegment(cmdLabel, segmentInfo, gplog.get_logger_dir(), newSegments=True, verbose=gplog.logging_is_verbose(), batchSize=self.__parallelDegree, ctxt=gp.REMOTE, remoteHost=hostName, validationOnly=validationOnly, forceoverwrite=self.__forceoverwrite) # # validate directories for target segments # self.__logger.info('Validating remote directories') cmds = [] for hostName in list(destSegmentByHost.keys()): cmds.append( createConfigureNewSegmentCommand(hostName, 'validate blank segments', True)) for cmd in cmds: self.__pool.addCommand(cmd) if self.__quiet: self.__pool.join() else: base.join_and_indicate_progress(self.__pool) validationErrors = [] for item in self.__pool.getCompletedItems(): results = item.get_results() if not results.wasSuccessful(): if results.rc == 1: # stdoutFromFailure = results.stdout.replace("\n", " ").strip() lines = results.stderr.split("\n") for line in lines: if len(line.strip()) > 0: validationErrors.append( "Validation failure on host %s %s" % (item.remoteHost, line)) else: validationErrors.append(str(item)) self.__pool.empty_completed_items() if validationErrors: raise ExceptionNoStackTraceNeeded("\n" + ("\n".join(validationErrors))) # Configure a new segment # # Recover segments using gpconfigurenewsegment, which # uses pg_basebackup. gprecoverseg generates a log filename which is # passed to gpconfigurenewsegment as a confinfo parameter. gprecoverseg # tails this file to show recovery progress to the user, and removes the # file when one done. A new file is generated for each run of # gprecoverseg based on a timestamp. self.__logger.info('Configuring new segments') cmds = [] progressCmds = [] removeCmds = [] for hostName in list(destSegmentByHost.keys()): for segment in destSegmentByHost[hostName]: progressCmd, removeCmd = self.__getProgressAndRemoveCmds( segment.progressFile, segment.getSegmentDbId(), hostName) removeCmds.append(removeCmd) if progressCmd: progressCmds.append(progressCmd) cmds.append( createConfigureNewSegmentCommand(hostName, 'configure blank segments', False)) self.__runWaitAndCheckWorkerPoolForErrorsAndClear( cmds, "unpacking basic segment directory", suppressErrorCheck=False, progressCmds=progressCmds) self.__runWaitAndCheckWorkerPoolForErrorsAndClear( removeCmds, "removing pg_basebackup progress logfiles", suppressErrorCheck=False) # # copy dump files from old segment to new segment # for srcSeg in srcSegments: for destSeg in destSegments: if srcSeg.content == destSeg.content: src_dump_dir = os.path.join( srcSeg.getSegmentDataDirectory(), 'db_dumps') cmd = base.Command('check existence of db_dumps directory', 'ls %s' % (src_dump_dir), ctxt=base.REMOTE, remoteHost=destSeg.getSegmentAddress()) cmd.run() if cmd.results.rc == 0: # Only try to copy directory if it exists cmd = Scp( 'copy db_dumps from old segment to new segment', os.path.join(srcSeg.getSegmentDataDirectory(), 'db_dumps*', '*'), os.path.join(destSeg.getSegmentDataDirectory(), 'db_dumps'), srcSeg.getSegmentAddress(), destSeg.getSegmentAddress(), recursive=True) cmd.run(validateAfter=True) break
def _get_remove_cmd(self, cmd, target_host): return base.Command("remove file", "rm -f {}".format(pipes.quote(cmd)), ctxt=base.REMOTE, remoteHost=target_host)
def __copySegmentDirectories(self, gpEnv, gpArray, directives): """ directives should be composed of GpCopySegmentDirectoryDirective values """ if len(directives) == 0: return srcSegments = [] destSegments = [] isTargetReusedLocation = [] for directive in directives: srcSegment = directive.getSrcSegment() destSegment = directive.getDestSegment() destSegment.primaryHostname = srcSegment.getSegmentHostName() destSegment.primarySegmentPort = srcSegment.getSegmentPort() srcSegments.append(srcSegment) destSegments.append(destSegment) isTargetReusedLocation.append(directive.isTargetReusedLocation()) destSegmentByHost = GpArray.getSegmentsByHostName(destSegments) newSegmentInfo = gp.ConfigureNewSegment.buildSegmentInfoForNewSegment( destSegments, isTargetReusedLocation) def createConfigureNewSegmentCommand(hostName, cmdLabel, validationOnly): segmentInfo = newSegmentInfo[hostName] checkNotNone("segmentInfo for %s" % hostName, segmentInfo) return gp.ConfigureNewSegment(cmdLabel, segmentInfo, newSegments=True, verbose=gplog.logging_is_verbose(), batchSize=self.__parallelDegree, ctxt=gp.REMOTE, remoteHost=hostName, validationOnly=validationOnly, forceoverwrite=self.__forceoverwrite) # # validate directories for target segments # self.__logger.info('Validating remote directories') cmds = [] for hostName in destSegmentByHost.keys(): cmds.append( createConfigureNewSegmentCommand(hostName, 'validate blank segments', True)) for cmd in cmds: self.__pool.addCommand(cmd) self.__pool.wait_and_printdots(len(cmds), self.__quiet) validationErrors = [] for item in self.__pool.getCompletedItems(): results = item.get_results() if not results.wasSuccessful(): if results.rc == 1: # stdoutFromFailure = results.stdout.replace("\n", " ").strip() lines = results.stderr.split("\n") for line in lines: if len(line.strip()) > 0: validationErrors.append( "Validation failure on host %s %s" % (item.remoteHost, line)) else: validationErrors.append(str(item)) self.__pool.empty_completed_items() if validationErrors: raise ExceptionNoStackTraceNeeded("\n" + ("\n".join(validationErrors))) # # unpack and configure new segments # self.__logger.info('Configuring new segments') cmds = [] for hostName in destSegmentByHost.keys(): cmds.append( createConfigureNewSegmentCommand(hostName, 'configure blank segments', False)) self.__runWaitAndCheckWorkerPoolForErrorsAndClear( cmds, "unpacking basic segment directory") # # copy dump files from old segment to new segment # for srcSeg in srcSegments: for destSeg in destSegments: if srcSeg.content == destSeg.content: src_dump_dir = os.path.join( srcSeg.getSegmentDataDirectory(), 'db_dumps') cmd = base.Command('check existence of db_dumps directory', 'ls %s' % (src_dump_dir), ctxt=base.REMOTE, remoteHost=destSeg.getSegmentAddress()) cmd.run() if cmd.results.rc == 0: # Only try to copy directory if it exists cmd = Scp( 'copy db_dumps from old segment to new segment', os.path.join(srcSeg.getSegmentDataDirectory(), 'db_dumps*', '*'), os.path.join(destSeg.getSegmentDataDirectory(), 'db_dumps'), srcSeg.getSegmentAddress(), destSeg.getSegmentAddress(), recursive=True) cmd.run(validateAfter=True) break