def setStatus(self, status): checkNotNone("status", status) if status not in VALID_STATUS: raise Exception("Invalid status '%s'" % status) self.status = status
def write(self, data): if self.__isClosed: raise Exception("File has been closed") checkNotNone("file", self.__underlyingFile) # already closed? self.__underlyingFile.write(data)
def setRole(self, role): checkNotNone("role", role) if role not in VALID_ROLES: raise Exception("Invalid role '%s'" % role) self.role = role
def __init__(self, workerPool, quiet, gpVersion, gpHome, coordinatorDataDirectory, coordinator_checksum_value=None, timeout=SEGMENT_TIMEOUT_DEFAULT, specialMode=None, wrapper=None, wrapper_args=None, parallel=gp.DEFAULT_GPSTART_NUM_WORKERS, logfileDirectory=False): checkNotNone("workerPool", workerPool) self.__workerPool = workerPool self.__quiet = quiet self.__gpVersion = gpVersion self.__gpHome = gpHome self.__coordinatorDataDirectory = coordinatorDataDirectory self.__timeout = timeout assert (specialMode in [None, 'upgrade', 'maintenance']) self.__specialMode = specialMode self.__wrapper = wrapper self.__wrapper_args = wrapper_args self.__parallel = parallel self.coordinator_checksum_value = coordinator_checksum_value self.logfileDirectory = logfileDirectory
def __init__(self, workerPool, quiet, localeData, gpVersion, gpHome, masterDataDirectory, timeout=SEGMENT_TIMEOUT_DEFAULT, specialMode=None, wrapper=None, wrapper_args=None, logfileDirectory=False): checkNotNone("workerPool", workerPool) self.__workerPool = workerPool self.__quiet = quiet self.__localeData = localeData self.__gpVersion = gpVersion self.__gpHome = gpHome self.__masterDataDirectory = masterDataDirectory self.__timeout = timeout assert (specialMode in [None, 'upgrade', 'maintenance']) self.__specialMode = specialMode self.__wrapper = wrapper self.__wrapper_args = wrapper_args self.logfileDirectory = logfileDirectory
def validate(failed, live, failover): checkNotNone("liveSegment", live) if failed is None and failover is None: raise Exception( "internal error: insufficient information to recover a mirror") if not live.isSegmentQE(): raise ExceptionNoStackTraceNeeded( "Segment to recover from for content %s is not a correct segment " "(it is a coordinator or standby coordinator)" % live.getSegmentContentId()) if not live.isSegmentPrimary(True): raise ExceptionNoStackTraceNeeded( "Segment to recover from for content %s is not a primary" % live.getSegmentContentId()) if not live.isSegmentUp(): raise ExceptionNoStackTraceNeeded( "Primary segment is not up for content %s" % live.getSegmentContentId()) if live.unreachable: raise ExceptionNoStackTraceNeeded( "The recovery source segment %s (content %s) is unreachable." % (live.getSegmentHostName(), live.getSegmentContentId())) if failed is not None: if failed.getSegmentContentId() != live.getSegmentContentId(): raise ExceptionNoStackTraceNeeded( "The primary is not of the same content as the failed mirror. Primary content %d, " "mirror content %d" % (live.getSegmentContentId(), failed.getSegmentContentId())) if failed.getSegmentDbId() == live.getSegmentDbId(): raise ExceptionNoStackTraceNeeded( "For content %d, the dbid values are the same. " "A segment may not be recovered from itself" % live.getSegmentDbId()) if failover is not None: if failover.getSegmentContentId() != live.getSegmentContentId(): raise ExceptionNoStackTraceNeeded( "The primary is not of the same content as the mirror. Primary content %d, " "mirror content %d" % (live.getSegmentContentId(), failover.getSegmentContentId())) if failover.getSegmentDbId() == live.getSegmentDbId(): raise ExceptionNoStackTraceNeeded( "For content %d, the dbid values are the same. " "A segment may not be built from itself" % live.getSegmentDbId()) if failover.unreachable: raise ExceptionNoStackTraceNeeded( "The recovery target segment %s (content %s) is unreachable." % (failover.getSegmentHostName(), failover.getSegmentContentId())) if failed is not None and failover is not None: # for now, we require the code to have produced this -- even when moving the segment to another # location, we preserve the directory assert failed.getSegmentDbId() == failover.getSegmentDbId()
def registerOsProvider(provider): global gProvider checkNotNone("New global osProvider", provider) if gProvider is not None: gProvider.destroy() gProvider = provider
def registerFileSystemProvider(provider): global gProvider checkNotNone("New global fileSystemProvider", provider) if gProvider is not None: gProvider.destroy() gProvider = provider
def __init__(self, source, dest, isTargetReusedLocation): """ @param isTargetReusedLocation if True then the dest location is a cleaned-up location """ checkNotNone("source", source) checkNotNone("dest", dest) self.__source = source self.__dest = dest self.__isTargetReusedLocation = isTargetReusedLocation
def __init__(self, source, dest, isTargetReusedLocation ): """ @param isTargetReusedLocation if True then the dest location is a cleaned-up location """ checkNotNone("source", source) checkNotNone("dest", dest) self.__source = source self.__dest = dest self.__isTargetReusedLocation = isTargetReusedLocation
def initializeProvider( self, masterPort ) : """ Initialize the provider to get information from the given master db, if it chooses to get its data from the database returns self """ # verify here to match what the true one will require checkNotNone("masterPort", masterPort) return self
def initializeProvider(self, masterPort): """ Initialize the provider to get information from the given master db, if it chooses to get its data from the database returns self """ # verify here to match what the true one will require checkNotNone("masterPort", masterPort) return self
def __init__(self, failedSegment, liveSegment, failoverSegment, forceFullSynchronization): checkNotNone("liveSegment", liveSegment) checkNotNone("forceFullSynchronization", forceFullSynchronization) if failedSegment is None and failoverSegment is None: raise Exception("No mirror passed to GpMirrorToBuild") if not liveSegment.isSegmentQE(): raise ExceptionNoStackTraceNeeded("Segment to recover from for content %s is not a correct segment " \ "(it is a master or standby master)" % liveSegment.getSegmentContentId()) if not liveSegment.isSegmentPrimary(True): raise ExceptionNoStackTraceNeeded( "Segment to recover from for content %s is not a primary" % liveSegment.getSegmentContentId()) if not liveSegment.isSegmentUp(): raise ExceptionNoStackTraceNeeded( "Primary segment is not up for content %s" % liveSegment.getSegmentContentId()) if failedSegment is not None: if failedSegment.getSegmentContentId( ) != liveSegment.getSegmentContentId(): raise ExceptionNoStackTraceNeeded("The primary is not of the same content as the failed mirror. Primary content %d, " \ "mirror content %d" % (liveSegment.getSegmentContentId(), failedSegment.getSegmentContentId())) if failedSegment.getSegmentDbId() == liveSegment.getSegmentDbId(): raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same. " \ "A segment may not be recovered from itself" % liveSegment.getSegmentDbId()) if failoverSegment is not None: if failoverSegment.getSegmentContentId( ) != liveSegment.getSegmentContentId(): raise ExceptionNoStackTraceNeeded("The primary is not of the same content as the mirror. Primary content %d, " \ "mirror content %d" % (liveSegment.getSegmentContentId(), failoverSegment.getSegmentContentId())) if failoverSegment.getSegmentDbId() == liveSegment.getSegmentDbId( ): raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same. " \ "A segment may not be built from itself" % liveSegment.getSegmentDbId()) if failedSegment is not None and failoverSegment is not None: # for now, we require the code to have produced this -- even when moving the segment to another # location, we preserve the directory assert failedSegment.getSegmentDbId( ) == failoverSegment.getSegmentDbId() self.__failedSegment = failedSegment self.__liveSegment = liveSegment self.__failoverSegment = failoverSegment """ __forceFullSynchronization is true if full resynchronization should be FORCED -- that is, the existing segment will be cleared and all objects will be transferred by the file resynchronization process on the server """ self.__forceFullSynchronization = forceFullSynchronization
def createConfigureNewSegmentCommand(hostName, cmdLabel, validationOnly): segmentInfo = newSegmentInfo[hostName] checkNotNone("segmentInfo for %s" % hostName, segmentInfo) return gp.ConfigureNewSegment(cmdLabel, segmentInfo, tarFile=tarFileName, newSegments=True, verbose=gplog.logging_is_verbose(), batchSize=self.__parallelDegree, ctxt=gp.REMOTE, remoteHost=hostName, validationOnly=validationOnly)
def createConfigureNewSegmentCommand(hostName, cmdLabel, validationOnly): segmentInfo = newSegmentInfo[hostName] checkNotNone("segmentInfo for %s" % hostName, segmentInfo) return gp.ConfigureNewSegment(cmdLabel, segmentInfo, newSegments=True, verbose=gplog.logging_is_verbose(), batchSize=self.__parallelDegree, ctxt=gp.REMOTE, remoteHost=hostName, validationOnly=validationOnly, forceoverwrite=self.__forceoverwrite)
def __init__(self, failedSegment, liveSegment, failoverSegment, forceFullSynchronization, logger=logger): checkNotNone("liveSegment", liveSegment) checkNotNone("forceFullSynchronization", forceFullSynchronization) if failedSegment is None and failoverSegment is None: raise Exception("No mirror passed to GpMirrorToBuild") if not liveSegment.isSegmentQE(): raise ExceptionNoStackTraceNeeded("Segment to recover from for content %s is not a correct segment " "(it is a master or standby master)" % liveSegment.getSegmentContentId()) if not liveSegment.isSegmentPrimary(True): raise ExceptionNoStackTraceNeeded( "Segment to recover from for content %s is not a primary" % liveSegment.getSegmentContentId()) if not liveSegment.isSegmentUp(): raise ExceptionNoStackTraceNeeded( "Primary segment is not up for content %s" % liveSegment.getSegmentContentId()) if failedSegment is not None: if failedSegment.getSegmentContentId() != liveSegment.getSegmentContentId(): raise ExceptionNoStackTraceNeeded( "The primary is not of the same content as the failed mirror. Primary content %d, " "mirror content %d" % (liveSegment.getSegmentContentId(), failedSegment.getSegmentContentId())) if failedSegment.getSegmentDbId() == liveSegment.getSegmentDbId(): raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same. " "A segment may not be recovered from itself" % liveSegment.getSegmentDbId()) if failoverSegment is not None: if failoverSegment.getSegmentContentId() != liveSegment.getSegmentContentId(): raise ExceptionNoStackTraceNeeded( "The primary is not of the same content as the mirror. Primary content %d, " "mirror content %d" % (liveSegment.getSegmentContentId(), failoverSegment.getSegmentContentId())) if failoverSegment.getSegmentDbId() == liveSegment.getSegmentDbId(): raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same. " "A segment may not be built from itself" % liveSegment.getSegmentDbId()) if failedSegment is not None and failoverSegment is not None: # for now, we require the code to have produced this -- even when moving the segment to another # location, we preserve the directory assert failedSegment.getSegmentDbId() == failoverSegment.getSegmentDbId() self.__failedSegment = failedSegment self.__liveSegment = liveSegment self.__failoverSegment = failoverSegment """ __forceFullSynchronization is true if full resynchronization should be FORCED -- that is, the existing segment will be cleared and all objects will be transferred by the file resynchronization process on the server """ self.__forceFullSynchronization = forceFullSynchronization
def initializeProvider(self, masterPort): """ Initialize the provider to get information from the given master db, if it chooses to get its data from the database returns self """ checkNotNone("masterPort", masterPort) dbUrl = dbconn.DbURL(port=masterPort, dbname="template1") self.__masterDbUrl = dbUrl return self
def initializeProvider(self, masterPort): """ Initialize the provider to get information from the given master db, if it chooses to get its data from the database returns self """ checkNotNone("masterPort", masterPort) dbUrl = dbconn.DbURL(port=masterPort, dbname='template1') self.__masterDbUrl = dbUrl return self
def __init__(self, workerPool, quiet, localeData, gpVersion, gpHome, masterDataDirectory, timeout=SEGMENT_TIMEOUT_DEFAULT, specialMode=None, wrapper=None, wrapper_args=None): checkNotNone("workerPool", workerPool) self.__workerPool = workerPool self.__quiet = quiet self.__localeData = localeData self.__gpVersion = gpVersion self.__gpHome = gpHome self.__masterDataDirectory = masterDataDirectory self.__timeout = timeout assert(specialMode in [None, 'upgrade', 'maintenance']) self.__specialMode = specialMode self.__wrapper = wrapper self.__wrapper_args = wrapper_args
def __init__(self, failedSegment, liveSegment, failoverSegment, forceFullSynchronization): checkNotNone("forceFullSynchronization", forceFullSynchronization) # We need to call this validate function here because addmirrors directly calls GpMirrorToBuild. RecoveryTriplet.validate(failedSegment, liveSegment, failoverSegment) self.__failedSegment = failedSegment self.__liveSegment = liveSegment self.__failoverSegment = failoverSegment """ __forceFullSynchronization is true if full resynchronization should be FORCED -- that is, the existing segment will be cleared and all objects will be transferred by the file resynchronization process on the server """ self.__forceFullSynchronization = forceFullSynchronization
def sendPgElogFromMaster(self, msg, sendAlerts): """ Send a message from the master database using select pg_elog ... """ # ensure initializeProvider() was called checkNotNone("masterDbUrl", self.__masterDbUrl) conn = None try: conn = dbconn.connect(self.__masterDbUrl, utility=True) dbconn.execSQL( conn, "SELECT GP_ELOG(" + self.__toSqlCharValue(msg) + "," + ("true" if sendAlerts else "false") + ")") finally: if conn: conn.close()
def sendPgElogFromMaster(self, msg, sendAlerts): """ Send a message from the master database using select pg_elog ... """ # ensure initializeProvider() was called checkNotNone("masterDbUrl", self.__masterDbUrl) conn = None try: conn = dbconn.connect(self.__masterDbUrl, utility=True) dbconn.execSQL( conn, "SELECT GP_ELOG(" + self.__toSqlCharValue(msg) + "," + ("true" if sendAlerts else "false") + ")" ) finally: if conn: conn.close()
def getPort(self): """ Returns the listening port for the postmaster for this segment. Note: With file replication the postmaster will not be active for mirrors so nothing will be listening on this port, instead the "replicationPort" is used for primary-mirror communication. """ return checkNotNone("port", self.port)
def __init__(self, workerPool, quiet, gpVersion, gpHome, masterDataDirectory, master_checksum_value=None, timeout=SEGMENT_TIMEOUT_DEFAULT, specialMode=None, wrapper=None, wrapper_args=None, parallel=gp.DEFAULT_GPSTART_NUM_WORKERS, logfileDirectory=False): checkNotNone("workerPool", workerPool) self.__workerPool = workerPool self.__quiet = quiet self.__gpVersion = gpVersion self.__gpHome = gpHome self.__masterDataDirectory = masterDataDirectory self.__timeout = timeout assert(specialMode in [None, 'upgrade', 'maintenance']) self.__specialMode = specialMode self.__wrapper = wrapper self.__wrapper_args = wrapper_args self.__parallel = parallel self.master_checksum_value = master_checksum_value self.logfileDirectory = logfileDirectory
def loadSystemConfig( self, useUtilityMode ) : """ Load all segment information from the configuration source. Returns a new GpArray object """ # ensure initializeProvider() was called checkNotNone("masterDbUrl", self.__masterDbUrl) logger.info("Obtaining Segment details from master...") array = GpArray.initFromCatalog(self.__masterDbUrl, useUtilityMode) if get_local_db_mode(array.master.getSegmentDataDirectory()) != 'UTILITY': logger.debug("Validating configuration...") if not array.is_array_valid(): raise InvalidSegmentConfiguration(array) return array
def __updateGpIdFile(self, gpEnv, gpArray, segments): segmentByHost = GpArray.getSegmentsByHostName(segments) newSegmentInfo = gp.ConfigureNewSegment.buildSegmentInfoForNewSegment(segments) cmds = [] for hostName in segmentByHost.keys(): segmentInfo = newSegmentInfo[hostName] checkNotNone("segmentInfo for %s" % hostName, segmentInfo) cmd = gp.ConfigureNewSegment("update gpid file", segmentInfo, newSegments=False, verbose=gplog.logging_is_verbose(), batchSize=self.__parallelDegree, ctxt=gp.REMOTE, remoteHost=hostName, validationOnly=False, writeGpIdFileOnly=True) cmds.append(cmd) self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "writing updated gpid files")
def loadSystemConfig(self, useUtilityMode): """ Load all segment information from the configuration source. Returns a new GpArray object """ # ensure initializeProvider() was called checkNotNone("masterDbUrl", self.__masterDbUrl) logger.info("Obtaining Segment details from master...") array = GpArray.initFromCatalog(self.__masterDbUrl, useUtilityMode) if get_local_db_mode(array.master.getSegmentDataDirectory()) != "UTILITY": logger.debug("Validating configuration...") if not array.is_array_valid(): raise InvalidSegmentConfiguration(array) return array
def getDataDirectory(self): """ Return the primary datadirectory location for the segment. Note: the datadirectory is just one of the filespace locations associated with the segment, calling code should be carefull not to assume that this is the only directory location for this segment. Todo: evaluate callers of this function to see if they should really be dealing with a list of filespaces. """ return checkNotNone("dataDirectory", self.datadir)
def __init__(self, masterDataDir, readFromMasterCatalog, timeout=None, retries=None): """ masterDataDir: if None then we try to find it from the system environment readFromMasterCatalog: if True then we will connect to the master in utility mode and fetch some more data from there (like collation settings) """ if masterDataDir is None: self.__masterDataDir = gp.get_masterdatadir() else: self.__masterDataDir = masterDataDir logger.debug("Obtaining master's port from master data directory") pgconf_dict = pgconf.readfile(self.__masterDataDir + "/postgresql.conf") self.__masterPort = pgconf_dict.int('port') logger.debug("Read from postgresql.conf port=%s" % self.__masterPort) self.__masterMaxConnections = pgconf_dict.int('max_connections') logger.debug("Read from postgresql.conf max_connections=%s" % self.__masterMaxConnections) self.__gpHome = gp.get_gphome() self.__gpVersion = gp.GpVersion.local( 'local GP software version check', self.__gpHome) logger.info("local Greenplum Version: '%s'" % self.__gpVersion) # read collation settings from master if readFromMasterCatalog: dbUrl = dbconn.DbURL(port=self.__masterPort, dbname='template1', timeout=timeout, retries=retries) conn = dbconn.connect(dbUrl, utility=True) (self.__lcCollate, self.__lcMonetary, self.__lcNumeric) = catalog.getCollationSettings(conn) # MPP-13807, read/show the master's database version too self.__pgVersion = dbconn.execSQLForSingletonRow( conn, "select version();")[0] logger.info("master Greenplum Version: '%s'" % self.__pgVersion) conn.close() checkNotNone("lc_collate", self.__lcCollate) checkNotNone("lc_monetary", self.__lcMonetary) checkNotNone("lc_numeric", self.__lcNumeric) else: self.__lcCollate = None self.__lcMonetary = None self.__lcNumeric = None self.__pgVersion = None
def __init__(self, masterDataDir, readFromMasterCatalog, timeout=None, retries=None, verbose=True): """ masterDataDir: if None then we try to find it from the system environment readFromMasterCatalog: if True then we will connect to the master in utility mode and fetch some more data from there (like collation settings) """ if masterDataDir is None: self.__masterDataDir = gp.get_masterdatadir() else: self.__masterDataDir = masterDataDir logger.debug("Obtaining master's port from master data directory") pgconf_dict = pgconf.readfile(self.__masterDataDir + "/postgresql.conf") self.__masterPort = pgconf_dict.int('port') logger.debug("Read from postgresql.conf port=%s" % self.__masterPort) self.__masterMaxConnections = pgconf_dict.int('max_connections') logger.debug("Read from postgresql.conf max_connections=%s" % self.__masterMaxConnections) self.__gpHome = gp.get_gphome() self.__gpVersion = gp.GpVersion.local('local GP software version check',self.__gpHome) if verbose: logger.info("local Greenplum Version: '%s'" % self.__gpVersion) # read collation settings from master if readFromMasterCatalog: dbUrl = dbconn.DbURL(port=self.__masterPort, dbname='template1', timeout=timeout, retries=retries) conn = dbconn.connect(dbUrl, utility=True) (self.__lcCollate, self.__lcMonetary, self.__lcNumeric) = catalog.getCollationSettings(conn) # MPP-13807, read/show the master's database version too self.__pgVersion = dbconn.execSQLForSingletonRow(conn, "select version();")[0] logger.info("master Greenplum Version: '%s'" % self.__pgVersion) conn.close() checkNotNone("lc_collate", self.__lcCollate) checkNotNone("lc_monetary", self.__lcMonetary) checkNotNone("lc_numeric", self.__lcNumeric) else: self.__lcCollate = None self.__lcMonetary = None self.__lcNumeric = None self.__pgVersion = None
def getLcMonetary(self): checkNotNone( "lc_monetary", self.__lcMonetary ) # make sure we were initialized with "readFromMasterCatalog" return self.__lcMonetary
def getRegistrationOrder(self): return checkNotNone("registration_order", self.registration_order)
def getLcCollate(self): checkNotNone( "lc_collate", self.__lcCollate ) # make sure we were initialized with "readFromMasterCatalog" return self.__lcCollate
def updateSystemConfig(self, gpArray, textForConfigTable, dbIdToForceMirrorRemoveAdd, useUtilityMode, allowPrimary): """ Update the configuration for the given segments in the underlying configuration store to match the current values Also resets any dirty bits on saved/updated objects @param textForConfigTable label to be used when adding to segment configuration history @param dbIdToForceMirrorRemoveAdd a map of dbid -> True for mirrors for which we should force updating the mirror @param useUtilityMode True if the operations we're doing are expected to run via utility moed @param allowPrimary True if caller authorizes add/remove primary operations (e.g. gpexpand) """ # ensure initializeProvider() was called checkNotNone("masterDbUrl", self.__masterDbUrl) logger.debug("Validating configuration changes...") if not gpArray.is_array_valid(): logger.critical("Configuration is invalid") raise InvalidSegmentConfiguration(gpArray) conn = dbconn.connect(self.__masterDbUrl, useUtilityMode, allowSystemTableMods=True) dbconn.execSQL(conn, "BEGIN") # compute what needs to be updated update = ComputeCatalogUpdate(gpArray, dbIdToForceMirrorRemoveAdd, useUtilityMode, allowPrimary) update.validate() # put the mirrors in a map by content id so we can update them later mirror_map = {} for seg in update.mirror_to_add: mirror_map[seg.getSegmentContentId()] = seg # remove mirror segments (e.g. for gpexpand rollback) for seg in update.mirror_to_remove: self.__updateSystemConfigRemoveMirror(conn, seg, textForConfigTable) # remove primary segments (e.g for gpexpand rollback) for seg in update.primary_to_remove: self.__updateSystemConfigRemovePrimary(conn, seg, textForConfigTable) # add new primary segments for seg in update.primary_to_add: self.__updateSystemConfigAddPrimary(conn, gpArray, seg, textForConfigTable, mirror_map) # add new mirror segments for seg in update.mirror_to_add: self.__updateSystemConfigAddMirror(conn, gpArray, seg, textForConfigTable) # remove and add mirror segments necessitated by catalog attribute update for seg in update.mirror_to_remove_and_add: self.__updateSystemConfigRemoveAddMirror(conn, gpArray, seg, textForConfigTable) # apply updates to existing segments for seg in update.segment_to_update: originalSeg = update.dbsegmap.get(seg.getSegmentDbId()) self.__updateSystemConfigUpdateSegment(conn, gpArray, seg, originalSeg, textForConfigTable) # commit changes logger.debug("Committing configuration table changes") dbconn.execSQL(conn, "COMMIT") conn.close() gpArray.setSegmentsAsLoadedFromDb( [seg.copy() for seg in gpArray.getDbList()])
def getLocaleData(self): checkNotNone( "lc_numeric", self.__lcNumeric ) # make sure we were initialized with "readFromMasterCatalog" return ":".join( [self.__lcCollate, self.__lcMonetary, self.__lcNumeric])
def getFaultProber(): global gFaultProber return checkNotNone("Global fault prober interface", gFaultProber)
def __init__(self, path): checkNotNone("path", path) self.__isClosed = False self.name = path self.__underlyingFile = NamedTemporaryFile('w', delete=True)
def getOsProvider(): global gProvider return checkNotNone("Global osProvider", gProvider)
def setRegistrationOrder(self, registration_order): checkNotNone("registration_order", registration_order) checkIsInt("registration_order", registration_order) self.registration_order = registration_order
def getLcCollate(self): checkNotNone("lc_collate", self.__lcCollate) # make sure we were initialized with "readFromMasterCatalog" return self.__lcCollate
def setDataDirectory(self, dataDirectory): checkNotNone("dataDirectory", dataDirectory) self.datadir = dataDirectory
def getFileSystemProvider(): global gProvider return checkNotNone("Global fileSystemProvider", gProvider)
def getLcMonetary(self): checkNotNone("lc_monetary", self.__lcMonetary) # make sure we were initialized with "readFromMasterCatalog" return self.__lcMonetary
def getLcNumeric(self): checkNotNone("lc_numeric", self.__lcNumeric) # make sure we were initialized with "readFromMasterCatalog" return self.__lcNumeric
def getLcNumeric(self): checkNotNone( "lc_numeric", self.__lcNumeric ) # make sure we were initialized with "readFromMasterCatalog" return self.__lcNumeric
def getStatus(self): return checkNotNone("status", self.status)
def registerFaultProber(prober): global gFaultProber gFaultProber = checkNotNone("New global fault prober interface", prober)
def getRole(self): return checkNotNone("role", self.role)
def __init__(self, segment): checkNotNone("segment", segment) self.__segment = segment
def updateSystemConfig(self, gpArray, textForConfigTable, dbIdToForceMirrorRemoveAdd, useUtilityMode, allowPrimary): """ Update the configuration for the given segments in the underlying configuration store to match the current values Also resets any dirty bits on saved/updated objects @param textForConfigTable label to be used when adding to segment configuration history @param dbIdToForceMirrorRemoveAdd a map of dbid -> True for mirrors for which we should force updating the mirror @param useUtilityMode True if the operations we're doing are expected to run via utility moed @param allowPrimary True if caller authorizes add/remove primary operations (e.g. gpexpand) """ # ensure initializeProvider() was called checkNotNone("masterDbUrl", self.__masterDbUrl) logger.debug("Validating configuration changes...") if not gpArray.is_array_valid(): logger.critical("Configuration is invalid") raise InvalidSegmentConfiguration(gpArray) conn = dbconn.connect(self.__masterDbUrl, useUtilityMode, allowSystemTableMods="dml") dbconn.execSQL(conn, "BEGIN") # compute what needs to be updated update = ComputeCatalogUpdate(gpArray, dbIdToForceMirrorRemoveAdd, useUtilityMode, allowPrimary) update.validate() # put the mirrors in a map by content id so we can update them later mirror_map = {} for seg in update.mirror_to_add: mirror_map[seg.getSegmentContentId()] = seg # reset dbId of new primary and mirror segments to -1 # before invoking the operations which will assign them new ids for seg in update.primary_to_add: seg.setSegmentDbId(-1) for seg in update.mirror_to_add: seg.setSegmentDbId(-1) # remove mirror segments (e.g. for gpexpand rollback) for seg in update.mirror_to_remove: self.__updateSystemConfigRemoveMirror(conn, seg, textForConfigTable) # remove primary segments (e.g for gpexpand rollback) for seg in update.primary_to_remove: self.__updateSystemConfigRemovePrimary(conn, seg, textForConfigTable) # add new primary segments for seg in update.primary_to_add: self.__updateSystemConfigAddPrimary(conn, gpArray, seg, textForConfigTable, mirror_map) # add new mirror segments for seg in update.mirror_to_add: self.__updateSystemConfigAddMirror(conn, gpArray, seg, textForConfigTable) # remove and add mirror segments necessitated by catalog attribute update for seg in update.mirror_to_remove_and_add: self.__updateSystemConfigRemoveAddMirror(conn, gpArray, seg, textForConfigTable) # apply updates to existing segments for seg in update.segment_to_update: originalSeg = update.dbsegmap.get(seg.getSegmentDbId()) self.__updateSystemConfigUpdateSegment(conn, gpArray, seg, originalSeg, textForConfigTable) # apply update to fault strategy if gpArray.getStrategyAsLoadedFromDb() != gpArray.getFaultStrategy(): self.__updateSystemConfigFaultStrategy(conn, gpArray) # commit changes logger.debug("Committing configuration table changes") dbconn.execSQL(conn, "COMMIT") conn.close() gpArray.setStrategyAsLoadedFromDb([gpArray.getFaultStrategy()]) gpArray.setSegmentsAsLoadedFromDb([seg.copy() for seg in gpArray.getDbList()])
def setPort(self, port): checkNotNone("port", port) checkIsInt("port", port) self.port = port
def getLocaleData(self): checkNotNone("lc_numeric", self.__lcNumeric) # make sure we were initialized with "readFromMasterCatalog" return ":".join([self.__lcCollate, self.__lcMonetary, self.__lcNumeric])