def write(self, data): if self.__isClosed: raise Exception("File has been closed") checkNotNone("file", self.__underlyingFile) # already closed? self.__underlyingFile.write(data)
def registerOsProvider(provider): global gProvider checkNotNone("New global osProvider", provider) if gProvider is not None: gProvider.destroy() gProvider = provider
def registerFileSystemProvider(provider): global gProvider checkNotNone("New global fileSystemProvider", provider) if gProvider is not None: gProvider.destroy() gProvider = provider
def __init__(self, source, dest, isTargetReusedLocation ): """ @param isTargetReusedLocation if True then the dest location is a cleaned-up location """ checkNotNone("source", source) checkNotNone("dest", dest) self.__source = source self.__dest = dest self.__isTargetReusedLocation = isTargetReusedLocation
def initializeProvider(self, masterPort): """ Initialize the provider to get information from the given master db, if it chooses to get its data from the database returns self """ # verify here to match what the true one will require checkNotNone("masterPort", masterPort) return self
def createConfigureNewSegmentCommand(hostName, cmdLabel, validationOnly): segmentInfo = newSegmentInfo[hostName] checkNotNone("segmentInfo for %s" % hostName, segmentInfo) return gp.ConfigureNewSegment(cmdLabel, segmentInfo, tarFile=tarFileName, newSegments=True, verbose=gplog.logging_is_verbose(), batchSize=self.__parallelDegree, ctxt=gp.REMOTE, remoteHost=hostName, validationOnly=validationOnly)
def __init__(self, failedSegment, liveSegment, failoverSegment, forceFullSynchronization): checkNotNone("liveSegment", liveSegment) checkNotNone("forceFullSynchronization", forceFullSynchronization) if failedSegment is None and failoverSegment is None: raise Exception( "No mirror passed to GpMirrorToBuild") if not liveSegment.isSegmentQE(): raise ExceptionNoStackTraceNeeded("Segment to recover from for content %s is not a correct segment " \ "(it is a master or standby master)" % liveSegment.getSegmentContentId()) if not liveSegment.isSegmentPrimary(True): raise ExceptionNoStackTraceNeeded("Segment to recover from for content %s is not a primary" % liveSegment.getSegmentContentId()) if not liveSegment.isSegmentUp(): raise ExceptionNoStackTraceNeeded("Primary segment is not up for content %s" % liveSegment.getSegmentContentId()) if failedSegment is not None: if failedSegment.getSegmentContentId() != liveSegment.getSegmentContentId(): raise ExceptionNoStackTraceNeeded("The primary is not of the same content as the failed mirror. Primary content %d, " \ "mirror content %d" % (liveSegment.getSegmentContentId(), failedSegment.getSegmentContentId())) if failedSegment.getSegmentDbId() == liveSegment.getSegmentDbId(): raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same. " \ "A segment may not be recovered from itself" % liveSegment.getSegmentDbId()) if failoverSegment is not None: if failoverSegment.getSegmentContentId() != liveSegment.getSegmentContentId(): raise ExceptionNoStackTraceNeeded("The primary is not of the same content as the mirror. Primary content %d, " \ "mirror content %d" % (liveSegment.getSegmentContentId(), failoverSegment.getSegmentContentId())) if failoverSegment.getSegmentDbId() == liveSegment.getSegmentDbId(): raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same. " \ "A segment may not be built from itself" % liveSegment.getSegmentDbId()) if failedSegment is not None and failoverSegment is not None: # for now, we require the code to have produced this -- even when moving the segment to another # location, we preserve the directory assert failedSegment.getSegmentDbId() == failoverSegment.getSegmentDbId() self.__failedSegment = failedSegment self.__liveSegment = liveSegment self.__failoverSegment = failoverSegment """ __forceFullSynchronization is true if full resynchronization should be FORCED -- that is, the existing segment will be cleared and all objects will be transferred by the file resynchronization process on the server """ self.__forceFullSynchronization = forceFullSynchronization
def __updateGpIdFile(self, gpEnv, gpArray, segments): segmentByHost = GpArray.getSegmentsByHostName(segments) newSegmentInfo = gp.ConfigureNewSegment.buildSegmentInfoForNewSegment(segments) cmds = [] for hostName in segmentByHost.keys(): segmentInfo = newSegmentInfo[hostName] checkNotNone("segmentInfo for %s" % hostName, segmentInfo) cmd = gp.ConfigureNewSegment("update gpid file", segmentInfo, newSegments=False, verbose=gplog.logging_is_verbose(), batchSize=self.__parallelDegree, ctxt=gp.REMOTE, remoteHost=hostName, validationOnly=False, writeGpIdFileOnly=True) cmds.append(cmd) self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "writing updated gpid files")
def __init__(self, workerPool, quiet, localeData, gpVersion, gpHome, masterDataDirectory, timeout=SEGMENT_TIMEOUT_DEFAULT, specialMode=None, wrapper=None, wrapper_args=None): checkNotNone("workerPool", workerPool) self.__workerPool = workerPool self.__quiet = quiet self.__localeData = localeData self.__gpVersion = gpVersion self.__gpHome = gpHome self.__masterDataDirectory = masterDataDirectory self.__timeout = timeout assert (specialMode in [None, 'upgrade', 'maintenance']) self.__specialMode = specialMode self.__wrapper = wrapper self.__wrapper_args = wrapper_args
def __init__(self, masterDataDir, readFromMasterCatalog, timeout=None, retries=None): """ masterDataDir: if None then we try to find it from the system environment readFromMasterCatalog: if True then we will connect to the master in utility mode and fetch some more data from there (like collation settings) """ if masterDataDir is None: self.__masterDataDir = gp.get_masterdatadir() else: self.__masterDataDir = masterDataDir logger.debug("Obtaining master's port from master data directory") pgconf_dict = pgconf.readfile(self.__masterDataDir + "/postgresql.conf") self.__masterPort = pgconf_dict.int('port') logger.debug("Read from postgresql.conf port=%s" % self.__masterPort) self.__masterMaxConnections = pgconf_dict.int('max_connections') logger.debug("Read from postgresql.conf max_connections=%s" % self.__masterMaxConnections) self.__gpHome = gp.get_gphome() self.__gpVersion = gp.GpVersion.local( 'local GP software version check', self.__gpHome) logger.info("local Greenplum Version: '%s'" % self.__gpVersion) # read collation settings from master if readFromMasterCatalog: dbUrl = dbconn.DbURL(port=self.__masterPort, dbname='template1', timeout=timeout, retries=retries) conn = dbconn.connect(dbUrl, utility=True) (self.__lcCollate, self.__lcMonetary, self.__lcNumeric) = catalog.getCollationSettings(conn) # MPP-13807, read/show the master's database version too self.__pgVersion = dbconn.execSQLForSingletonRow( conn, "select version();")[0] logger.info("master Greenplum Version: '%s'" % self.__pgVersion) conn.close() checkNotNone("lc_collate", self.__lcCollate) checkNotNone("lc_monetary", self.__lcMonetary) checkNotNone("lc_numeric", self.__lcNumeric) else: self.__lcCollate = None self.__lcMonetary = None self.__lcNumeric = None self.__pgVersion = None
def getLocaleData(self): checkNotNone( "lc_numeric", self.__lcNumeric ) # make sure we were initialized with "readFromMasterCatalog" return ":".join( [self.__lcCollate, self.__lcMonetary, self.__lcNumeric])
def getOsProvider(): global gProvider return checkNotNone("Global osProvider", gProvider)
def registerFaultProber(prober): global gFaultProber gFaultProber = checkNotNone("New global fault prober interface", prober)
def getConfigurationProvider(): global gConfigurationProvider return checkNotNone("Global configuration provider", gConfigurationProvider)
def registerConfigurationProvider(provider): global gConfigurationProvider gConfigurationProvider = checkNotNone("New global configuration provider", provider)
def getFaultProber(): global gFaultProber return checkNotNone("Global fault prober interface", gFaultProber)
def getLcNumeric(self): checkNotNone( "lc_numeric", self.__lcNumeric ) # make sure we were initialized with "readFromMasterCatalog" return self.__lcNumeric
def getLcMonetary(self): checkNotNone( "lc_monetary", self.__lcMonetary ) # make sure we were initialized with "readFromMasterCatalog" return self.__lcMonetary
def getLcCollate(self): checkNotNone( "lc_collate", self.__lcCollate ) # make sure we were initialized with "readFromMasterCatalog" return self.__lcCollate
def getFileSystemProvider(): global gProvider return checkNotNone("Global fileSystemProvider", gProvider)
def __init__(self, segment): checkNotNone("segment", segment) self.__segment = segment
def __init__(self, path): checkNotNone("path", path) self.__isClosed = False self.name = path self.__underlyingFile = NamedTemporaryFile('w', delete=True)