def execute(self): entry = ValidateVerificationEntry(token=self.token).run() if not entry['verdone']: raise WrongStateError( "Only finished verification tasks may be cleaned up.") path = os.path.join(get_masterdatadir(), 'pg_verify', self.token) Command('cleanup', 'rm -rf %s' % path).run(validateAfter=True) #RemoveTree(path).run() to_clean = ValidateVerification(content=entry['vercontent'], primaries_only=False).run() pool = WorkerPool(min(len(to_clean), self.batch_default)) for seg in to_clean: host = seg.getSegmentHostName() path = os.path.join(seg.getSegmentDataDirectory(), 'pg_verify', "*%s*" % self.token) cmd = Command('cleanup', 'rm -f %s' % path, remoteHost=host) pool.addCommand(cmd) logger.info('Waiting for clean commands to complete...') pool.wait_and_printdots(len(to_clean)) for cmd in pool.getCompletedItems(): res = cmd.get_results() if not res.wasSuccessful(): logger.error('Failed to send cleanup on %s' % cmd.host) logger.error('Error: %s' % res.stderr) raise CleanVerificationError() RemoveVerificationEntry(token=self.token).run() logger.info('Verification %s has been cleaned.' % self.token)
def execute(self): entry = ValidateVerificationEntry(token = self.token).run() if not entry['verdone']: raise WrongStateError("Only finished verification tasks may be cleaned up.") path = os.path.join(get_masterdatadir(), 'pg_verify', self.token) Command('cleanup', 'rm -rf %s' % path).run(validateAfter=True) #RemoveTree(path).run() to_clean = ValidateVerification(content = entry['vercontent'], primaries_only = False).run() pool = WorkerPool(min(len(to_clean), self.batch_default)) for seg in to_clean: host = seg.getSegmentHostName() path = os.path.join(seg.getSegmentDataDirectory(), 'pg_verify', "*%s*" % self.token) cmd = Command('cleanup', 'rm -f %s' % path, remoteHost=host) pool.addCommand(cmd) logger.info('Waiting for clean commands to complete...') pool.wait_and_printdots(len(to_clean)) for cmd in pool.getCompletedItems(): res = cmd.get_results() if not res.wasSuccessful(): logger.error('Failed to send cleanup on %s' % cmd.host) logger.error('Error: %s' % res.stderr) raise CleanVerificationError() RemoveVerificationEntry(token = self.token).run() logger.info('Verification %s has been cleaned.' % self.token)
def __init__(self, masterDataDir, readFromMasterCatalog, timeout=None, retries=None): """ masterDataDir: if None then we try to find it from the system environment readFromMasterCatalog: if True then we will connect to the master in utility mode and fetch some more data from there (like collation settings) """ if masterDataDir is None: self.__masterDataDir = gp.get_masterdatadir() else: self.__masterDataDir = masterDataDir logger.debug("Obtaining master's port from master data directory") pgconf_dict = pgconf.readfile(self.__masterDataDir + "/postgresql.conf") self.__masterPort = pgconf_dict.int('port') logger.debug("Read from postgresql.conf port=%s" % self.__masterPort) self.__masterMaxConnections = pgconf_dict.int('max_connections') logger.debug("Read from postgresql.conf max_connections=%s" % self.__masterMaxConnections) self.__gpHome = gp.get_gphome() self.__gpVersion = gp.GpVersion.local( 'local GP software version check', self.__gpHome) logger.info("local Greenplum Version: '%s'" % self.__gpVersion) # read collation settings from master if readFromMasterCatalog: dbUrl = dbconn.DbURL(port=self.__masterPort, dbname='template1', timeout=timeout, retries=retries) conn = dbconn.connect(dbUrl, utility=True) (self.__lcCollate, self.__lcMonetary, self.__lcNumeric) = catalog.getCollationSettings(conn) # MPP-13807, read/show the master's database version too self.__pgVersion = dbconn.execSQLForSingletonRow( conn, "select version();")[0] logger.info("master Greenplum Version: '%s'" % self.__pgVersion) conn.close() checkNotNone("lc_collate", self.__lcCollate) checkNotNone("lc_monetary", self.__lcMonetary) checkNotNone("lc_numeric", self.__lcNumeric) else: self.__lcCollate = None self.__lcMonetary = None self.__lcNumeric = None self.__pgVersion = None
def execute(self): entry = ValidateVerificationEntry(token=self.token).run() to_inspect = ValidateVerification(content=entry['vercontent']).run() state_dict = ValidateCompletion( token=self.token, to_validate=to_inspect, batch_default=self.batch_default).run() incomplete = state_dict[VerificationState.RUNNING] if len(incomplete) > 0: # TODO: --force to consolidate files despite ongoing logger.error( 'One or more content verifications is still in progress: %s' % incomplete) return entry GatherResults(master_datadir=get_masterdatadir(), content=entry['vercontent'], token=self.token, batch_default=self.batch_default).run() state = VerificationState.SUCCEEDED mismatch = False aborted = state_dict[VerificationState.ABORTED] failed = state_dict[VerificationState.FAILED] if len(failed) > 0: # any FAILED trumps ABORTED state = VerificationState.FAILED mismatch = True logger.warn( 'One or more contents for verification %s were marked FAILED: %s' % (self.token, failed)) elif len(aborted) > 0: state = VerificationState.ABORTED logger.warn( 'One or more contents for verification %s were marked ABORTED: %s' % (self.token, aborted)) else: logger.info('Verification %s completed successfully' % self.token) if not entry['verdone']: UpdateVerificationEntry(token=self.token, state=state, mismatch=mismatch, done=True).run() entry.update({ 'vermismatch': mismatch, 'verdone': True, 'verstate': state }) return entry
def execute(self): entry = ValidateVerificationEntry(token=self.token).run() to_inspect = ValidateVerification(content=entry['vercontent']).run() state_dict = ValidateCompletion( token=self.token, to_validate=to_inspect, batch_default=self.batch_default).run() incomplete = state_dict[VerificationState.RUNNING] if len(incomplete) > 0: # TODO: --force to consolidate files despite ongoing logger.error( 'One or more content verifications is still in progress: %s' % incomplete) return entry GatherResults( master_datadir=get_masterdatadir(), content=entry['vercontent'], token=self.token, batch_default=self.batch_default).run() state = VerificationState.SUCCEEDED mismatch = False aborted = state_dict[VerificationState.ABORTED] failed = state_dict[VerificationState.FAILED] if len(failed) > 0: # any FAILED trumps ABORTED state = VerificationState.FAILED mismatch = True logger.warn( 'One or more contents for verification %s were marked FAILED: %s' % (self.token, failed)) elif len(aborted) > 0: state = VerificationState.ABORTED logger.warn( 'One or more contents for verification %s were marked ABORTED: %s' % (self.token, aborted)) else: logger.info('Verification %s completed successfully' % self.token) if not entry['verdone']: UpdateVerificationEntry( token=self.token, state=state, mismatch=mismatch, done=True).run() entry.update({ 'vermismatch': mismatch, 'verdone': True, 'verstate': state }) return entry
def __init__(self, mainOptions): self.pidfilename = mainOptions.get('pidfilename', None) # the file we're using for locking self.parentpidvar = mainOptions.get('parentpidvar', None) # environment variable holding parent pid self.parentpid = None # parent pid which already has the lock self.ppath = None # complete path to the lock file self.pidlockfile = None # PIDLockFile object self.pidfilepid = None # pid of the process which has the lock self.locktorelease = None # PIDLockFile object we should release when done if self.parentpidvar is not None and self.parentpidvar in os.environ: self.parentpid = int(os.environ[self.parentpidvar]) if self.pidfilename is not None: self.ppath = os.path.join(gp.get_masterdatadir(), self.pidfilename) self.pidlockfile = PIDLockFile(self.ppath)
def _do_results(self, token, batch_default): if self.token is not None: entry = FinalizeVerification(token=token, batch_default=batch_default).run() entries = [entry] else: entries = FinalizeAllVerifications(batch_default).run() master_datadir = get_masterdatadir() for entry in entries: logger.info("---------------------------") logger.info("Token: %s" % entry["vertoken"]) logger.info("Type: %s" % VerificationType.lookup[entry["vertype"]]) logger.info("Content: %s" % (entry["vercontent"] if entry["vercontent"] > 0 else "ALL")) logger.info("Started: %s" % entry["verstarttime"]) logger.info("State: %s" % VerificationState.lookup[entry["verstate"]]) if entry["verdone"]: path = os.path.join(master_datadir, "pg_verify", entry["vertoken"]) logger.info("Details: %s" % path)
def _do_results(self, token, batch_default): if self.token is not None: entry = FinalizeVerification(token = token, batch_default = batch_default).run() entries = [entry] else: entries = FinalizeAllVerifications(batch_default).run() master_datadir = get_masterdatadir() for entry in entries: logger.info('---------------------------') logger.info('Token: %s' % entry['vertoken']) logger.info('Type: %s' % VerificationType.lookup[entry['vertype']]) logger.info('Content: %s' % (entry['vercontent'] if entry['vercontent'] > 0 else "ALL")) logger.info('Started: %s' % entry['verstarttime']) logger.info('State: %s' % VerificationState.lookup[entry['verstate']]) if entry['verdone']: path = os.path.join(master_datadir, 'pg_verify', entry['vertoken']) logger.info('Details: %s' % path)
def __init__(self, masterDataDir, readFromMasterCatalog, timeout=None, retries=None, verbose=True): """ masterDataDir: if None then we try to find it from the system environment readFromMasterCatalog: if True then we will connect to the master in utility mode and fetch some more data from there (like collation settings) """ if masterDataDir is None: self.__masterDataDir = gp.get_masterdatadir() else: self.__masterDataDir = masterDataDir logger.debug("Obtaining master's port from master data directory") pgconf_dict = pgconf.readfile(self.__masterDataDir + "/postgresql.conf") self.__masterPort = pgconf_dict.int('port') logger.debug("Read from postgresql.conf port=%s" % self.__masterPort) self.__masterMaxConnections = pgconf_dict.int('max_connections') logger.debug("Read from postgresql.conf max_connections=%s" % self.__masterMaxConnections) self.__gpHome = gp.get_gphome() self.__gpVersion = gp.GpVersion.local('local GP software version check',self.__gpHome) if verbose: logger.info("local Greenplum Version: '%s'" % self.__gpVersion) # read collation settings from master if readFromMasterCatalog: dbUrl = dbconn.DbURL(port=self.__masterPort, dbname='template1', timeout=timeout, retries=retries) conn = dbconn.connect(dbUrl, utility=True) (self.__lcCollate, self.__lcMonetary, self.__lcNumeric) = catalog.getCollationSettings(conn) # MPP-13807, read/show the master's database version too self.__pgVersion = dbconn.execSQLForSingletonRow(conn, "select version();")[0] logger.info("master Greenplum Version: '%s'" % self.__pgVersion) conn.close() checkNotNone("lc_collate", self.__lcCollate) checkNotNone("lc_monetary", self.__lcMonetary) checkNotNone("lc_numeric", self.__lcNumeric) else: self.__lcCollate = None self.__lcMonetary = None self.__lcNumeric = None self.__pgVersion = None
class Context(object): filename = os.path.join(gp.get_masterdatadir(), 'gpexpand.status') dbname = os.getenv('PGDATABASE', 'postgres') dburl = dbconn.DbURL(dbname=dbname) conn = dbconn.connect(dburl) day = 0
cmd.run(validateAfter=True) results = cmd.get_results().stdout.strip() rpm_version_string = results.split(' ')[-1] if not rpm_version_string.startswith('4.'): raise ExceptionNoStackTraceNeeded( 'gppkg requires rpm version 4.x') except ExecutionError, ex: results = ex.cmd.get_results().stderr.strip() if len(results) != 0 and 'not found' in results: raise ExceptionNoStackTraceNeeded( 'gppkg requires RPM to be available in PATH') if self.master_datadir is None: self.master_datadir = gp.get_masterdatadir() self.master_port = self._get_master_port(self.master_datadir) self._get_gpdb_host_list() if self.migrate: MigratePackages(from_gphome=self.migrate[0], to_gphome=self.migrate[1], standby_host=self.standby_host, segment_host_list=self.segment_host_list).run() return if self.install: pkg = Gppkg.from_package_path(self.install) InstallPackage(pkg, self.master_host, self.standby_host, self.segment_host_list).run()
class GpPkgProgram: """ This is the CLI entry point to package management code. """ def __init__(self, options, args): self.master_datadir = options.masterDataDirectory # TODO: AK: Program logic should not be dictating master, standby, and segment information # In other words, the fundamental Operations should have APIs that preclude the need for this. self.master_host = None self.standby_host = None self.segment_host_list = None self.query = options.query self.build = options.build self.install = options.install self.remove = options.remove self.update = options.update self.clean = options.clean self.migrate = options.migrate # only one of the following may be provided: --install, --remove, --update, --query, --build, --clean, --migrate count = sum([1 for opt in ['install', 'remove', 'update', 'query', 'build', 'clean', 'migrate'] if getattr(self, opt)]) if count != 1: raise ExceptionNoStackTraceNeeded('Exactly one of the following must be provided: --install, --remove, -update, --query, --clean, --migrate') if self.query: # gppkg -q can be supplemented with --info, --list, --all count = sum([1 for opt in ['info', 'list', 'all'] if options.__dict__[opt]]) if count > 1: raise ExceptionNoStackTraceNeeded('For --query, at most one of the following can be provided: --info, --list, --all') # for all query options other than --all, a package path must be provided if not options.all and len(args) != 1: raise ExceptionNoStackTraceNeeded('A package must be specified for -q, -q --info, and -q --list.') if options.info: self.query = (QueryPackage.INFO, args[0]) elif options.list: self.query = (QueryPackage.LIST, args[0]) elif options.all: self.query = (QueryPackage.ALL, None) else: self.query = (None, args[0]) elif self.migrate: if len(args) != 2: raise ExceptionNoStackTraceNeeded('Invalid syntax, expecting "gppkg --migrate <from_gphome> <to_gphome>".') self.migrate = (args[0], args[1]) @staticmethod def create_parser(): parser = OptParser(option_class=OptChecker, description="Greenplum Package Manager", version='%prog version $Revision: #1 $') parser.setHelp([]) addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True) parser.remove_option('-q') parser.remove_option('-l') add_to = OptionGroup(parser, 'General Options') parser.add_option_group(add_to) addMasterDirectoryOptionForSingleClusterProgram(add_to) # TODO: AK: Eventually, these options may need to be flexible enough to accept mutiple packages # in one invocation. If so, the structure of this parser may need to change. add_to.add_option('-i', '--install', help='install the given gppkg', metavar='<package>') add_to.add_option('-u', '--update', help='update the given gppkg', metavar='<package>') add_to.add_option('-r', '--remove', help='remove the given gppkg', metavar='<name>-<version>') add_to.add_option('-q', '--query', help='query the gppkg database or a particular gppkg', action='store_true') add_to.add_option('-b', '--build', help='build a gppkg', metavar='<directory>') add_to.add_option('-c', '--clean', help='clean the cluster of the given gppkg', action='store_true') add_to.add_option('--migrate', help='migrate gppkgs from a separate $GPHOME', metavar='<from_gphome> <to_gphome>', action='store_true', default=False) add_to = OptionGroup(parser, 'Query Options') parser.add_option_group(add_to) add_to.add_option('--info', action='store_true', help='print information about the gppkg including name, version, description') add_to.add_option('--list', action='store_true', help='print all the files present in the gppkg') add_to.add_option('--all', action='store_true', help='print all the gppkgs installed by gppkg') return parser @staticmethod def create_program(options, args): """ TODO: AK: This convention may be unnecessary. """ return GpPkgProgram(options, args) def _get_gpdb_host_list(self): """ TODO: AK: Get rid of this. Program logic should not be driving host list building . This method gets the host names of all hosts in the gpdb array. It sets the following variables GpPkgProgram.master_host to master GpPkgProgram.standby_host to standby GpPkgProgram.segment_host_list to segment hosts """ logger.debug('_get_gpdb_host_list') #Get host list GPHOME = os.getenv('GPHOME') if GPHOME == '' or not GPHOME: logger.info('GPHOME is not set.') sys.exit(1) hawq_site = HawqXMLParser(GPHOME) hawq_site.get_all_values() master_port = hawq_site.hawq_dict['hawq_master_address_port'] master_host = "" standby_host = None segment_host_list = [] host_list = get_hawq_hostname_all(master_port) for host, status in host_list['master'].iteritems(): master_host = host for host, status in host_list['standby'].iteritems(): standby_host = host for host, status in host_list['segment'].iteritems(): segment_host_list.append(host) #Deduplicate the hosts so that we #dont install multiple times on the same host segment_host_list = list(set(segment_host_list)) #Segments might exist on the master host. Since we store the #master host separately in self.master_host, storing the master_host #in the segment_host_list is redundant. for host in segment_host_list: if host == master_host or host == standby_host: segment_host_list.remove(host) self.master_host = master_host self.standby_host = standby_host self.segment_host_list = segment_host_list def _get_master_port(self, datadir): ''' Obtain the master port from the pgconf file ''' logger.debug('_get_master_port') pgconf_dict = pgconf.readfile(os.path.join(datadir, 'postgresql.conf')) return pgconf_dict.int('port') def run(self): if self.build: BuildGppkg(self.build).run() return #Check for RPM and Solaris OS if curr_platform == SUNOS: raise ExceptionNoStackTraceNeeded('gppkg is not supported on Solaris') try: cmd = Command(name = 'Check for rpm', cmdStr = 'rpm --version') cmd.run(validateAfter = True) results = cmd.get_results().stdout.strip() rpm_version_string = results.split(' ')[-1] if not rpm_version_string.startswith('4.'): raise ExceptionNoStackTraceNeeded('gppkg requires rpm version 4.x') except ExecutionError, ex: results = ex.cmd.get_results().stderr.strip() if len(results) != 0 and 'not found' in results: raise ExceptionNoStackTraceNeeded('gppkg requires RPM to be available in PATH') if self.migrate: MigratePackages(from_gphome = self.migrate[0], to_gphome = self.migrate[1]).run() return # MASTER_DATA_DIRECTORY and PGPORT must not need to be set for # --build and --migrate to function properly if self.master_datadir is None: self.master_datadir = gp.get_masterdatadir() self.master_port = self._get_master_port(self.master_datadir) # TODO: AK: Program logic should not drive host decisions. self._get_gpdb_host_list() if self.install: pkg = Gppkg.from_package_path(self.install) InstallPackage(pkg, self.master_host, self.standby_host, self.segment_host_list).run() elif self.query: query_type, package_path = self.query QueryPackage(query_type, package_path).run() elif self.remove: if self.remove.count('-') != 1: raise ExceptionNoStackTraceNeeded('Please specify the correct <name>-<version>.') pkg_file_list = ListFilesByPattern(GPPKG_ARCHIVE_PATH, self.remove + '-*-*' + GPPKG_EXTENSION).run() if len(pkg_file_list) == 0: raise ExceptionNoStackTraceNeeded('Package %s has not been installed.' % self.remove) assert len(pkg_file_list) == 1 pkg_file = pkg_file_list[0] pkg = Gppkg.from_package_path(os.path.join(GPPKG_ARCHIVE_PATH, pkg_file)) UninstallPackage(pkg, self.master_host, self.standby_host, self.segment_host_list).run() elif self.update: pkg = Gppkg.from_package_path(self.update) UpdatePackage(pkg, self.master_host, self.standby_host, self.segment_host_list).run() elif self.clean: CleanGppkg(self.standby_host, self.segment_host_list).run()
try: cmd = Command(name = 'Check for rpm', cmdStr = 'rpm --version') cmd.run(validateAfter = True) results = cmd.get_results().stdout.strip() rpm_version_string = results.split(' ')[-1] if not rpm_version_string.startswith('4.'): raise ExceptionNoStackTraceNeeded('gppkg requires rpm version 4.x') except ExecutionError, ex: results = ex.cmd.get_results().stderr.strip() if len(results) != 0 and 'not found' in results: raise ExceptionNoStackTraceNeeded('gppkg requires RPM to be available in PATH') if self.master_datadir is None: self.master_datadir = gp.get_masterdatadir() self.master_port = self._get_master_port(self.master_datadir) self._get_gpdb_host_list() if self.migrate: MigratePackages(from_gphome = self.migrate[0], to_gphome = self.migrate[1], standby_host = self.standby_host, segment_host_list = self.segment_host_list ).run() return if self.install: pkg = Gppkg.from_package_path(self.install) InstallPackage(pkg, self.master_host, self.standby_host, self.segment_host_list).run()
class GpPkgProgram: """ This is the CLI entry point to package management code. """ def __init__(self, options, args): self.master_datadir = options.masterDataDirectory # TODO: AK: Program logic should not be dictating master, standby, and segment information # In other words, the fundamental Operations should have APIs that preclude the need for this. self.master_host = None self.standby_host = None self.segment_host_list = None self.query = options.query self.build = options.build self.install = options.install self.remove = options.remove self.update = options.update self.clean = options.clean self.migrate = options.migrate self.interactive = options.interactive self.filename = options.filename # only one of the following may be provided: --install, --remove, --update, --query, --build, --clean, --migrate count = sum([ 1 for opt in [ 'install', 'remove', 'update', 'query', 'build', 'clean', 'migrate' ] if getattr(self, opt) ]) if count != 1: raise ExceptionNoStackTraceNeeded( 'Exactly one of the following must be provided: --install, --remove, -update, --query, --clean, --migrate' ) if self.query: # gppkg -q can be supplemented with --info, --list, --all count = sum([ 1 for opt in ['info', 'list', 'all'] if options.__dict__[opt] ]) if count > 1: raise ExceptionNoStackTraceNeeded( 'For --query, at most one of the following can be provided: --info, --list, --all' ) # for all query options other than --all, a package path must be provided if not options.all and len(args) != 1: raise ExceptionNoStackTraceNeeded( 'A package must be specified for -q, -q --info, and -q --list.' ) if options.info: self.query = (QueryPackage.INFO, args[0]) elif options.list: self.query = (QueryPackage.LIST, args[0]) elif options.all: self.query = (QueryPackage.ALL, None) else: self.query = (None, args[0]) elif self.migrate: if len(args) != 2: raise ExceptionNoStackTraceNeeded( 'Invalid syntax, expecting "gppkg --migrate <from_gphome> <to_gphome>".' ) self.migrate = (args[0], args[1]) # gppkg should check gpexpand status check_result, msg = gp.conflict_with_gpexpand("gppkg", refuse_phase1=True, refuse_phase2=False) if not check_result: raise ExceptionNoStackTraceNeeded(msg) @staticmethod def create_parser(): parser = OptParser(option_class=OptChecker, description="Greenplum Package Manager", version='%prog version $Revision: #1 $') parser.setHelp([]) addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True) parser.remove_option('-q') parser.remove_option('-l') add_to = OptionGroup(parser, 'General Options') parser.add_option_group(add_to) addMasterDirectoryOptionForSingleClusterProgram(add_to) # TODO: AK: Eventually, these options may need to be flexible enough to accept mutiple packages # in one invocation. If so, the structure of this parser may need to change. add_to.add_option('-i', '--install', help='install the given gppkg', metavar='<package>') add_to.add_option('-u', '--update', help='update the given gppkg', metavar='<package>') add_to.add_option('-r', '--remove', help='remove the given gppkg', metavar='<name>-<version>') add_to.add_option( '-q', '--query', help='query the gppkg database or a particular gppkg', action='store_true') add_to.add_option('-b', '--build', help='build a gppkg', metavar='<directory>') add_to.add_option('-c', '--clean', help='clean the cluster of the given gppkg', action='store_true') add_to.add_option('--migrate', help='migrate gppkgs from a separate $GPHOME', metavar='<from_gphome> <to_gphome>', action='store_true', default=False) add_to.add_option('-f', '--filename', help='set specific package name', metavar='<name>') add_to = OptionGroup(parser, 'Query Options') parser.add_option_group(add_to) add_to.add_option( '--info', action='store_true', help= 'print information about the gppkg including name, version, description' ) add_to.add_option('--list', action='store_true', help='print all the files present in the gppkg') add_to.add_option('--all', action='store_true', help='print all the gppkgs installed by gppkg') return parser @staticmethod def create_program(options, args): """ TODO: AK: This convention may be unnecessary. """ return GpPkgProgram(options, args) def _get_gpdb_host_list(self): """ TODO: Perhaps the host list should be produced by gparray instead of here. This method gets the host names of all hosts in the gpdb array. It sets the following variables GpPkgProgram.master_host to master GpPkgProgram.standby_host to standby GpPkgProgram.segment_host_list to segment hosts """ logger.debug('_get_gpdb_host_list') gparr = GpArray.initFromCatalog(dbconn.DbURL(port=self.master_port), utility=True) master_host = None standby_host = None segment_host_list = [] segs = gparr.getDbList() for seg in segs: if seg.isSegmentMaster(current_role=True): master_host = seg.getSegmentHostName() elif seg.isSegmentStandby(current_role=True): standby_host = seg.getSegmentHostName() else: segment_host_list.append(seg.getSegmentHostName()) # Deduplicate the hosts so that we # dont install multiple times on the same host segment_host_list = list(set(segment_host_list)) # Segments might exist on the master host. Since we store the # master host separately in self.master_host, storing the master_host # in the segment_host_list is redundant. for host in segment_host_list: if host == master_host or host == standby_host: segment_host_list.remove(host) self.master_host = master_host self.standby_host = standby_host self.segment_host_list = segment_host_list def _get_master_port(self, datadir): ''' Obtain the master port from the pgconf file ''' logger.debug('_get_master_port') pgconf_dict = pgconf.readfile(os.path.join(datadir, 'postgresql.conf')) return pgconf_dict.int('port') or os.getenv('PGPORT') def run(self): if self.build: if self.filename: BuildGppkg(self.build, self.filename).run() else: BuildGppkg(self.build, None).run() return #Check for RPM and Solaris OS if curr_platform == SUNOS: raise ExceptionNoStackTraceNeeded( 'gppkg is not supported on Solaris') try: if platform.linux_distribution()[0] == 'Ubuntu': cmd = Command(name='Check for dpkg', cmdStr='dpkg --version') cmd.run(validateAfter=True) else: cmd = Command(name='Check for rpm', cmdStr='rpm --version') cmd.run(validateAfter=True) results = cmd.get_results().stdout.strip() rpm_version_string = results.split(' ')[-1] if not rpm_version_string.startswith('4.'): raise ExceptionNoStackTraceNeeded( 'gppkg requires rpm version 4.x') except ExecutionError, ex: results = ex.cmd.get_results().stderr.strip() if len(results) != 0 and 'not found' in results: raise ExceptionNoStackTraceNeeded( 'gppkg requires RPM to be available in PATH') if self.master_datadir is None: self.master_datadir = gp.get_masterdatadir() self.master_port = self._get_master_port(self.master_datadir) self._get_gpdb_host_list() if self.migrate: MigratePackages(from_gphome=self.migrate[0], to_gphome=self.migrate[1], standby_host=self.standby_host, segment_host_list=self.segment_host_list).run() return if self.install: pkg = Gppkg.from_package_path(self.install) InstallPackage(pkg, self.master_host, self.standby_host, self.segment_host_list).run() elif self.query: query_type, package_path = self.query QueryPackage(query_type, package_path).run() elif self.remove: # Check for exact match first, then use wildcard for what will be removed. pkg_file_list = ListFilesByPattern( GPPKG_ARCHIVE_PATH, self.remove + GPPKG_EXTENSION).run() if len(pkg_file_list) == 0: # now try wildcard pkg_file_list = ListFilesByPattern( GPPKG_ARCHIVE_PATH, self.remove + '*' + GPPKG_EXTENSION).run() if len(pkg_file_list) == 0: raise ExceptionNoStackTraceNeeded( 'Package %s has not been installed.' % self.remove) # refuse to remove at all if the match is too broad, i.e., > 1 if len(pkg_file_list) > 1: err_msg = "Remove request '%s' too broad. " \ "Multiple packages match remove request: ( %s )." % (self.remove, ", ".join(pkg_file_list)) raise ExceptionNoStackTraceNeeded(err_msg) pkg_file = pkg_file_list[0] pkg = Gppkg.from_package_path( os.path.join(GPPKG_ARCHIVE_PATH, pkg_file)) UninstallPackage(pkg, self.master_host, self.standby_host, self.segment_host_list).run() elif self.update: logger.warning( 'WARNING: The process of updating a package includes removing all' ) logger.warning( 'previous versions of the system objects related to the package. For' ) logger.warning( 'example, previous versions of shared libraries are removed.') logger.warning( 'After the update process, a database function will fail when it is' ) logger.warning( 'called if the function references a package file that has been removed.' ) if self.interactive: if not ask_yesno(None, 'Do you still want to continue ?', 'N'): logger.info('Skipping update of gppkg based on user input') return pkg = Gppkg.from_package_path(self.update) UpdatePackage(pkg, self.master_host, self.standby_host, self.segment_host_list).run() elif self.clean: CleanGppkg(self.standby_host, self.segment_host_list).run()
def run(self): if self.build: if self.filename: BuildGppkg(self.build, self.filename).run() else: BuildGppkg(self.build, None).run() return if platform.linux_distribution()[0] == 'Ubuntu': try: cmd = Command(name='Check for dpkg', cmdStr='dpkg --version') cmd.run(validateAfter=True) cmd = Command(name='Check for fakeroot', cmdStr='fakeroot --version') cmd.run(validateAfter=True) except Exception as ex: raise ExceptionNoStackTraceNeeded( 'fakeroot and dpkg are both required by gppkg') else: try: cmd = Command(name='Check for rpm', cmdStr='rpm --version') cmd.run(validateAfter=True) results = cmd.get_results().stdout.strip() rpm_version_string = results.split(' ')[-1] if not rpm_version_string.startswith('4.'): raise ExceptionNoStackTraceNeeded( 'gppkg requires rpm version 4.x') except ExecutionError as ex: results = ex.cmd.get_results().stderr.strip() if len(results) != 0 and 'not found' in results: raise ExceptionNoStackTraceNeeded( 'gppkg requires RPM to be available in PATH') if self.master_datadir is None: self.master_datadir = gp.get_masterdatadir() self.master_port = self._get_master_port(self.master_datadir) self._get_gpdb_host_list() if self.migrate: MigratePackages(from_gphome=self.migrate[0], to_gphome=self.migrate[1], standby_host=self.standby_host, segment_host_list=self.segment_host_list).run() return if self.install: pkg = Gppkg.from_package_path(self.install) InstallPackage(pkg, self.master_host, self.standby_host, self.segment_host_list).run() elif self.query: query_type, package_path = self.query QueryPackage(query_type, package_path).run() elif self.remove: # Check for exact match first, then use wildcard for what will be removed. pkg_file_list = ListFilesByPattern( GPPKG_ARCHIVE_PATH, self.remove + GPPKG_EXTENSION).run() if len(pkg_file_list) == 0: # now try wildcard pkg_file_list = ListFilesByPattern( GPPKG_ARCHIVE_PATH, self.remove + '*' + GPPKG_EXTENSION).run() if len(pkg_file_list) == 0: raise ExceptionNoStackTraceNeeded( 'Package %s has not been installed.' % self.remove) # refuse to remove at all if the match is too broad, i.e., > 1 if len(pkg_file_list) > 1: err_msg = "Remove request '%s' too broad. " \ "Multiple packages match remove request: ( %s )." % (self.remove, ", ".join(pkg_file_list)) raise ExceptionNoStackTraceNeeded(err_msg) pkg_file = pkg_file_list[0] pkg = Gppkg.from_package_path( os.path.join(GPPKG_ARCHIVE_PATH, pkg_file)) UninstallPackage(pkg, self.master_host, self.standby_host, self.segment_host_list).run() elif self.update: logger.warning( 'WARNING: The process of updating a package includes removing all' ) logger.warning( 'previous versions of the system objects related to the package. For' ) logger.warning( 'example, previous versions of shared libraries are removed.') logger.warning( 'After the update process, a database function will fail when it is' ) logger.warning( 'called if the function references a package file that has been removed.' ) if self.interactive: if not ask_yesno(None, 'Do you still want to continue ?', 'N'): logger.info('Skipping update of gppkg based on user input') return pkg = Gppkg.from_package_path(self.update) UpdatePackage(pkg, self.master_host, self.standby_host, self.segment_host_list).run() elif self.clean: CleanGppkg(self.standby_host, self.segment_host_list).run()