def doCommit(cfg, changeSetFile, targetLabel): client = conaryclient.ConaryClient(cfg) repos = client.getRepos() callback = CheckinCallback() try: cs = changeset.ChangeSetFromFile(changeSetFile) except filecontainer.BadContainer: log.error("invalid changeset %s", changeSetFile) return 1 if cs.isLocal(): if not targetLabel: log.error("committing local changesets requires a targetLabel") label = versions.Label(targetLabel) cs.setTargetShadow(repos, label) commitCs = cs.makeAbsolute(repos) (fd, changeSetFile) = tempfile.mkstemp() os.close(fd) commitCs.writeToFile(changeSetFile) try: # hopefully the file hasn't changed underneath us since we # did the check at the top of doCommit(). We should probably # add commitChangeSet method that takes a fd. try: repos.commitChangeSetFile(changeSetFile, callback=callback) except errors.CommitError, e: print e finally: if targetLabel: os.unlink(changeSetFile)
def signTroves(cfg, specStrList, recurse = False, callback = None): troveStr = "" jobList = [] trv = [] client = conaryclient.ConaryClient(cfg) repos = client.getRepos() if callback is None: if cfg.quiet: callback = callbacks.SignatureCallback() else: callback = SignatureCallback() for specStr in specStrList: name, versionStr, flavor = parseTroveSpec(specStr) try: trvList = repos.findTrove([ cfg.buildLabel ], (name, versionStr, flavor), cfg.flavor) except errors.TroveNotFound, e: log.error(str(e)) return for trvInfo in trvList: troveStr += "%s=%s[%s]\n" % (trvInfo[0], trvInfo[1].asString(), deps.formatFlavor(trvInfo[2])) jobList.append((trvInfo[0], (None, None), (trvInfo[1], trvInfo[2]), True))
def _fetchUrl(self, url, headers): if isinstance(url, str): url = laUrl(url) retries = 3 if self.cfg.proxy and not self.noproxyFilter.bypassProxy(url.host): retries = 7 inFile = None for i in range(retries): try: # set up a handler that tracks cookies to handle # sites like Colabnet that want to set a session cookie cj = cookielib.LWPCookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) # add password handler if needed if url.user: url.passwd = url.passwd or '' opener.add_handler( HTTPBasicAuthHandler(url.user, url.passwd)) # add proxy and proxy password handler if needed if self.cfg.proxy and \ not self.noproxyFilter.bypassProxy(url.host): proxyPasswdMgr = urllib2.HTTPPasswordMgr() for v in self.cfg.proxy.values(): pUrl = laUrl(v[1]) if pUrl.user: pUrl.passwd = pUrl.passwd or '' proxyPasswdMgr.add_password( None, pUrl.asStr(noAuth=True, quoted=True), url.user, url.passwd) opener.add_handler( urllib2.ProxyBasicAuthHandler(proxyPasswdMgr)) opener.add_handler( urllib2.ProxyHandler(self.cfg.proxy)) if url.scheme == 'ftp': urlStr = url.asStr(noAuth=False, quoted=True) else: urlStr = url.asStr(noAuth=True, quoted=True) req = urllib2.Request(urlStr, headers=headers) inFile = opener.open(req) if not urlStr.startswith('ftp://'): content_type = inFile.info().get('content-type') if not url.explicit() and 'text/html' in content_type: raise urllib2.URLError('"%s" not found' % urlStr) log.info('Downloading %s...', urlStr) break except urllib2.HTTPError, msg: if msg.code == 404: return None else: log.error('error downloading %s: %s', urlStr, str(msg)) return None except urllib2.URLError: return None
def createChangeSet(self, jobId, troveSpecs=None): """ Creates a changeset object with all the built troves for a job. @param jobId: jobId or uuid for a given job. @type jobId: int or uuid @return: conary changeset object @rtype: conary.repository.changeset.ReadOnlyChangeSet @raise: JobNotFound: If job does not exist """ job = self.client.getJob(jobId) binTroves = [] for trove in job.iterTroves(): binTroves.extend(trove.iterBuiltTroves()) if not binTroves: log.error('No built troves associated with this job') return None if troveSpecs: troveSpecs = cmdline.parseTroveSpecs(troveSpecs) source = trovesource.SimpleTroveSource(binTroves) results = source.findTroves(None, troveSpecs) binTroves = itertools.chain(*results.values()) jobList = [(x[0], (None, None), (x[1], x[2]), True) for x in binTroves] primaryTroveList = [ x for x in binTroves if ':' not in x[0]] cs = self.getRepos().createChangeSet(jobList, recurse=False, primaryTroveList=primaryTroveList) return cs
def signTroves(cfg, specStrList, recurse=False, callback=None): troveStr = "" jobList = [] trv = [] client = conaryclient.ConaryClient(cfg) repos = client.getRepos() if callback is None: if cfg.quiet: callback = callbacks.SignatureCallback() else: callback = SignatureCallback() for specStr in specStrList: name, versionStr, flavor = parseTroveSpec(specStr) try: trvList = repos.findTrove([cfg.buildLabel], (name, versionStr, flavor), cfg.flavor) except errors.TroveNotFound, e: log.error(str(e)) return for trvInfo in trvList: troveStr += "%s=%s[%s]\n" % (trvInfo[0], trvInfo[1].asString(), deps.formatFlavor(trvInfo[2])) jobList.append( (trvInfo[0], (None, None), (trvInfo[1], trvInfo[2]), True))
def createChangeSetFile(self, jobId, path, troveSpecs=None): """ Creates a changeset file with all the built troves for a job. @param jobId: jobId or uuid for a given job. @type jobId: int or uuid @return: False if changeset not created, True if it was. @raise: JobNotFound: If job does not exist """ job = self.client.getJob(jobId) binTroves = [] for trove in job.iterTroves(): binTroves.extend(trove.iterBuiltTroves()) if not binTroves: log.error('No built troves associated with this job') return False if troveSpecs: troveSpecs = [ cmdline.parseTroveSpec(x) for x in troveSpecs ] source = trovesource.SimpleTroveSource(binTroves) results = source.findTroves(None, troveSpecs) binTroves = list(itertools.chain(*results.values())) primaryTroveList = binTroves recurse = True else: recurse = False primaryTroveList = [ x for x in binTroves if ':' not in x[0]] jobList = [(x[0], (None, None), (x[1], x[2]), True) for x in binTroves ] self.getRepos().createChangeSetFile(jobList, path, recurse=recurse, primaryTroveList=primaryTroveList) return True
def _parallel(targets, classMethod, *args, **kwargs): """ Map a method call across multiple targets concurrently """ if len(targets) == 1: return [classMethod(targets[0], *args, **kwargs)] results = Queue.Queue() threads = [] for index in range(len(targets)): thread = threading.Thread(target=_parallel_run, args=(index, results, targets, classMethod, args, kwargs,)) thread.start() threads.append(thread) for thread in threads: thread.join() ret = [None] * len(targets) last_error = None for thread in threads: index, ok, result = results.get() if ok: ret[index] = result else: last_error, trace = result log.error("Error updating target %s:\n%s", targets[index].name, trace) if last_error is not None: raise last_error return ret
def main(argv=None): if argv is None: argv = sys.argv sys.stdout = util.FileIgnoreEpipe(sys.stdout) try: argv = list(argv) debugAll = '--debug-all' in argv if debugAll: debuggerException = Exception argv.remove('--debug-all') else: debuggerException = errors.InternalConaryError cvcMain = CvcMain() ccfg = cvcMain.getConfigFile(argv) if debugAll: ccfg.debugExceptions = True ccfg.debugRecipeExceptions = True # reset the excepthook (using cfg values for exception settings) sys.excepthook = util.genExcepthook(debug=ccfg.debugExceptions, debugCtrlC=debugAll) return cvcMain.main(argv, debuggerException, debugAll=debugAll, cfg=ccfg) except debuggerException: raise except (errors.ConaryError, errors.CvcError, cfg.CfgError, openpgpfile.PGPError), e: if str(e): log.error(str(e)) sys.exit(2) else: raise
def _fetchUrl(self, url, headers): if isinstance(url, str): url = laUrl(url) retries = 3 if self.cfg.proxy and not self.noproxyFilter.bypassProxy(url.host): retries = 7 inFile = None for i in range(retries): try: # set up a handler that tracks cookies to handle # sites like Colabnet that want to set a session cookie cj = cookielib.LWPCookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) # add password handler if needed if url.user: url.passwd = url.passwd or '' passwdMgr = self.BasicPasswordManager() passwdMgr.add_password(url.user, url.passwd) opener.add_handler(urllib2.HTTPBasicAuthHandler(passwdMgr)) # add proxy and proxy password handler if needed if self.cfg.proxy and \ not self.noproxyFilter.bypassProxy(url.host): proxyPasswdMgr = urllib2.HTTPPasswordMgr() for v in self.cfg.proxy.values(): pUrl = laUrl(v[1]) if pUrl.user: pUrl.passwd = pUrl.passwd or '' proxyPasswdMgr.add_password( None, pUrl.asStr(noAuth=True, quoted=True), url.user, url.passwd) opener.add_handler( urllib2.ProxyBasicAuthHandler(proxyPasswdMgr)) opener.add_handler(urllib2.ProxyHandler(self.cfg.proxy)) if url.scheme == 'ftp': urlStr = url.asStr(noAuth=False, quoted=True) else: urlStr = url.asStr(noAuth=True, quoted=True) req = urllib2.Request(urlStr, headers=headers) inFile = opener.open(req) if not urlStr.startswith('ftp://'): content_type = inFile.info()['content-type'] if not url.explicit() and 'text/html' in content_type: raise urllib2.URLError('"%s" not found' % urlStr) log.info('Downloading %s...', urlStr) break except urllib2.HTTPError, msg: if msg.code == 404: return None else: log.error('error downloading %s: %s', urlStr, str(msg)) return None except urllib2.URLError: return None
def loadRecipe(repos, name, version, flavor, trv, defaultFlavor=None, loadInstalledSource=None, installLabelPath=None, buildLabel=None, groupRecipeSource=None, cfg=None): name = name.split(':')[0] try: if defaultFlavor is not None: fullFlavor = deps.overrideFlavor(defaultFlavor, flavor) else: fullFlavor = flavor # set up necessary flavors and track used flags before # calling loadRecipe, since even loading the class # may check some flags that may never be checked inside # the recipe recipeObj, loader = getRecipeObj(repos, name, version, fullFlavor, trv, loadInstalledSource=loadInstalledSource, installLabelPath=installLabelPath, buildLabel=buildLabel, cfg=cfg) relevantFlavor = use.usedFlagsToFlavor(recipeObj.name) relevantFlavor = flavorutil.removeInstructionSetFlavor(relevantFlavor) # always add in the entire arch flavor. We need to ensure the # relevant flavor is unique per architecture, also, arch flavors # can affect the macros used. if defaultFlavor is not None: relevantFlavor.union(flavor) relevantFlavor.union(flavorutil.getArchFlags(fullFlavor)) relevantFlags = flavorutil.getFlavorUseFlags(relevantFlavor) flags = flavorutil.getFlavorUseFlags(fullFlavor) use.track(False) for flagSet in ('Use',): # allow local flags not to be used -- they are set to their default if flagSet not in relevantFlags: continue for flag in relevantFlags[flagSet]: if flag not in flags[flagSet]: raise (RuntimeError, "Recipe %s uses Flavor %s but value not known" %(name, flag)) if 'Arch' in relevantFlags: for majarch in relevantFlags['Arch'].keys(): for subarch in relevantFlags['Arch'][majarch]: if not use.Arch[majarch][subarch]: #negative values for subarches are assumed continue if subarch not in flags['Arch'][majarch]: log.error("arch %s.%s used but not specified" % ( majarch, subarch)) raise RuntimeError, ( "arch %s.%s used but not specified" % ( majarch, subarch)) use.resetUsed() except: log.error('Error Loading Recipe (%s, %s, %s):\n%s' % (name, version, fullFlavor, ''.join(traceback.format_exc()))) raise return loader, recipeObj, relevantFlavor
def _buildFailed(failureFd, errMsg, traceBack=''): log.error(errMsg) frz = '\002'.join(str(x) for x in freeze('FailureReason', BuildFailed(errMsg, traceBack))) if failureFd is not None: os.write(failureFd, frz) os.close(failureFd) os._exit(1)
def doLocalCommit(db, changeSetFile): cs = changeset.ChangeSetFromFile(changeSetFile) if not cs.isLocal(): log.error("repository changesets must be applied with update instead") else: db.commitChangeSet(cs, database.UpdateJob(db), rollbackPhase = db.ROLLBACK_PHASE_LOCAL, updateDatabase = False)
def computeTroveList(client, applyList): # As dumb as this may sound, the same trove may be present multiple times # in applyList, so remove duplicates toFind = set() for (n, (oldVer, oldFla), (newVer, newFla), isAbs) in applyList: if n[0] in ('-', '+'): n = n[1:] found = False if oldVer or (oldFla is not None): toFind.add((n, oldVer,oldFla)) found = True if newVer or (newFla is not None): toFind.add((n, newVer, newFla)) found = True if not found: toFind.add((n, None, None)) repos = client.getRepos() results = repos.findTroves(client.cfg.installLabelPath, toFind, client.cfg.flavor) for troveSpec, trovesFound in results.iteritems(): if len(trovesFound) > 1: log.error("trove %s has multiple matches on " "installLabelPath", troveSpec[0]) primaryCsList = [] for (n, (oldVer, oldFla), (newVer, newFla), isAbs) in applyList: if n[0] == '-': updateByDefault = False else: updateByDefault = True if n[0] in ('-', '+'): n = n[1:] found = False if oldVer or (oldFla is not None): oldVer, oldFla = results[n, oldVer, oldFla][0][1:] found = True if newVer or (newFla is not None): newVer, newFla = results[n, newVer, newFla][0][1:] found = True if not found: if updateByDefault: newVer, newFla = results[n, None, None][0][1:] else: oldVer, oldFla = results[n, None, None][0][1:] primaryCsList.append((n, (oldVer, oldFla), (newVer, newFla), isAbs)) return primaryCsList
def computeTroveList(client, applyList): # As dumb as this may sound, the same trove may be present multiple times # in applyList, so remove duplicates toFind = set() for (n, (oldVer, oldFla), (newVer, newFla), isAbs) in applyList: if n[0] in ('-', '+'): n = n[1:] found = False if oldVer or (oldFla is not None): toFind.add((n, oldVer, oldFla)) found = True if newVer or (newFla is not None): toFind.add((n, newVer, newFla)) found = True if not found: toFind.add((n, None, None)) repos = client.getRepos() results = repos.findTroves(client.cfg.installLabelPath, toFind, client.cfg.flavor) for troveSpec, trovesFound in results.iteritems(): if len(trovesFound) > 1: log.error("trove %s has multiple matches on " "installLabelPath", troveSpec[0]) primaryCsList = [] for (n, (oldVer, oldFla), (newVer, newFla), isAbs) in applyList: if n[0] == '-': updateByDefault = False else: updateByDefault = True if n[0] in ('-', '+'): n = n[1:] found = False if oldVer or (oldFla is not None): oldVer, oldFla = results[n, oldVer, oldFla][0][1:] found = True if newVer or (newFla is not None): newVer, newFla = results[n, newVer, newFla][0][1:] found = True if not found: if updateByDefault: newVer, newFla = results[n, None, None][0][1:] else: oldVer, oldFla = results[n, None, None][0][1:] primaryCsList.append((n, (oldVer, oldFla), (newVer, newFla), isAbs)) return primaryCsList
def __init__(self, regex, macros, setmode=None, unsetmode=None, name=None, rootdir=None): """ Provide information to match against. @param regex: regular expression(s) to match against pathnames @type regex: string, list of strings, or compiled regular expression; strings or lists of strings will have macros interpolated. @param macros: current recipe macros @param setmode: bitmask containing bits that must be set for a match @type setmode: integer @param unsetmode: bitmask containing bits that must be unset for a match @type unsetmode: integer @param name: name of package or component @type name: string The setmode and unsetmode masks should be constructed from C{stat.S_IFDIR}, C{stat.S_IFCHR}, C{stat.S_IFBLK}, C{stat.S_IFREG}, C{stat.S_IFIFO}, C{stat.S_IFLNK}, and C{stat.S_IFSOCK} Note that these are not simple bitfields. To specify ``no symlinks'' in unsetmask you need to provide C{stat.S_IFLNK^stat.S_IFREG}. To specify only character devices in setmask, you need C{stat.S_IFCHR^stat.SBLK}. Here are the binary bitmasks for the flags:: S_IFDIR = 0100000000000000 S_IFCHR = 0010000000000000 S_IFBLK = 0110000000000000 S_IFREG = 1000000000000000 S_IFIFO = 0001000000000000 S_IFLNK = 1010000000000000 S_IFSOCK = 1100000000000000 """ if name: self.name = name if rootdir is None: self.rootdir = macros['destdir'] else: self.rootdir = rootdir self.setmode = setmode self.unsetmode = unsetmode tmplist = [] if callable(regex): regex = regex() if type(regex) is str: try: self.regexp = self._anchor(regex % macros) except ValueError, msg: log.error('invalid macro substitution in "%s", missing "s"?' % regex) raise self.re = re.compile(self.regexp)
def check(self): db = self.getDB() dbVersion = db.getVersion() if dbVersion.major == schema.VERSION.major: log.info("schema is compatible with this codebase") return True log.error("codebase requires schema %s, repository has %s", schema.VERSION, dbVersion) return False
def _buildFailed(failureFd, errMsg, traceBack=''): log.error(errMsg) frz = '\002'.join( str(x) for x in freeze('FailureReason', BuildFailed(errMsg, traceBack))) if failureFd is not None: os.write(failureFd, frz) os.close(failureFd) os._exit(1)
def checkConfig(cfg): if not cfg.host: log.error("ERROR: cfg.host is not defined") raise RuntimeError("cfg.host is not defined") # make sure that each label belongs to the host we're mirroring for label in cfg.labels: if label.getHost() != cfg.host: log.error("ERROR: label %s is not on host %s", label, cfg.host) raise RuntimeError("label %s is not on host %s", label, cfg.host)
def doLocalCommit(db, changeSetFile): cs = changeset.ChangeSetFromFile(changeSetFile) if not cs.isLocal(): log.error("repository changesets must be applied with update instead") else: db.commitChangeSet(cs, database.UpdateJob(db), rollbackPhase=db.ROLLBACK_PHASE_LOCAL, updateDatabase=False)
def fix(self): db = self.getDB() dbVersion = db.getVersion() try: log.info("performing a schema migration...") newVersion = schema.loadSchema(db, doMigrate=True) except sqlerrors.SchemaVersionError, e: log.error(e.msg) return False
def __init__(self, db, cfg, item, changeSetPath = None): _FindLocalChanges.__init__(self, db, cfg, display=DISPLAY_NONE, allMachineChanges=True) cs = self.run([item]) if not [ x for x in cs.iterNewTroveList() ]: log.error("there have been no local changes") else: cs.writeToFile(changeSetPath)
def _getChangeSet(path): try: cs = changeset.ChangeSetFromFile(path) except BadContainer, msg: # ensure that it is obvious that a file is being referenced if path[0] not in './': path = './' + path log.error("'%s' is not a valid conary changeset: %s" % (path, msg)) # XXX sys.exit is gross import sys sys.exit(1)
def __init__(self, db, cfg, item, changeSetPath=None): _FindLocalChanges.__init__(self, db, cfg, display=DISPLAY_NONE, allMachineChanges=True) cs = self.run([item]) if not [x for x in cs.iterNewTroveList()]: log.error("there have been no local changes") else: cs.writeToFile(changeSetPath)
def _sanityCheckForSSL(self): """Check SSL settings, create SSL certificate if missing. Returns 0 if everything is OK, or an exit code otherwise""" if not self.requiresSsl(): return 0 if not self.sslCertPath: log.error("sslCertPath to be set - cannot start server") return 1 try: util.mkdirChain(os.path.dirname(self.sslCertPath)) except OSError, err: log.error("Could not access sslCert dir %s: %s" % os.path.dirname(self.sslCertPath), err)
def __init__(self, regex, macros, setmode=None, unsetmode=None, name=None, rootdir=None): """ Provide information to match against. @param regex: regular expression(s) to match against pathnames @type regex: string, list of strings, or compiled regular expression; strings or lists of strings will have macros interpolated. @param macros: current recipe macros @param setmode: bitmask containing bits that must be set for a match @type setmode: integer @param unsetmode: bitmask containing bits that must be unset for a match @type unsetmode: integer @param name: name of package or component @type name: string The setmode and unsetmode masks should be constructed from C{stat.S_IFDIR}, C{stat.S_IFCHR}, C{stat.S_IFBLK}, C{stat.S_IFREG}, C{stat.S_IFIFO}, C{stat.S_IFLNK}, and C{stat.S_IFSOCK} Note that these are not simple bitfields. To specify ``no symlinks'' in unsetmask you need to provide C{stat.S_IFLNK^stat.S_IFREG}. To specify only character devices in setmask, you need C{stat.S_IFCHR^stat.SBLK}. Here are the binary bitmasks for the flags:: S_IFDIR = 0100000000000000 S_IFCHR = 0010000000000000 S_IFBLK = 0110000000000000 S_IFREG = 1000000000000000 S_IFIFO = 0001000000000000 S_IFLNK = 1010000000000000 S_IFSOCK = 1100000000000000 """ if name: self.name = name if rootdir is None: self.rootdir = macros['destdir'] else: self.rootdir = rootdir self.setmode = setmode self.unsetmode = unsetmode tmplist = [] if callable(regex): regex = regex() if type(regex) is str: try: self.regexp = self._anchor(regex %macros) except ValueError, msg: log.error('invalid macro substitution in "%s", missing "s"?' %regex) raise self.re = re.compile(self.regexp)
def removeRollbacks(db, rollbackSpec): rollbackStack = db.getRollbackStack() rollbackList = rollbackStack.getList() if rollbackSpec.startswith('r.'): try: i = rollbackList.index(rollbackSpec) except: log.error("rollback '%s' not present" % rollbackSpec) return 1 rollbacks = rollbackList[:i + 1] else: try: rollbackCount = int(rollbackSpec) except: log.error("integer rollback count expected instead of '%s'" % rollbackSpec) return 1 if rollbackCount < 1: log.error("rollback count must be positive") return 1 elif rollbackCount > len(rollbackList): log.error("rollback count higher then number of rollbacks " "available") return 1 rollbacks = rollbackList[:rollbackCount] for rb in rollbacks: rollbackStack.remove(rb) return 0
def __call__(self): """ Translate a shell PATTERN to a regular expression, substituting macros. There is no way to quote meta-characters. """ # macros must be substituted first, so that they can be properly # escaped try: pat = self.pattern % self.macros except ValueError, msg: log.error('invalid macro substitution in "%s", missing "s"?' % \ self.pattern) raise
def runCommand(self, cfg, argSet, args, profile = False, callback = None, repos = None): args = args[1:] binary = argSet.pop('binary', False) text = argSet.pop('text', False) if binary and text: log.error("files cannot be both binary and text") return 1 if argSet: return self.usage() if len(args) < 2: return self.usage() checkin.setFileFlags(repos, args[1:], text = text, binary = binary)
def fix(self): from conary.repository.netrepos import accessmap db = self.getDB() ri = accessmap.RoleInstances(db) for (permissionId, roleId, role) in self._status: log.info("fixing permission cache for %s...", role) ri.updatePermissionId(permissionId, roleId) log.info("checking again to verify changes...") self._status = set() if not self.check(): log.error("FAILED to fix the permissions cache. Unhandled error - contact rPath") db.rollback() return False self.commit() return True
class CheckSchema(Checker): """ checks for schema version """ def _postinit(self): self._alwaysfix = self._fix def check(self): db = self.getDB() dbVersion = db.getVersion() if dbVersion.major == schema.VERSION.major: log.info("schema is compatible with this codebase") return True log.error("codebase requires schema %s, repository has %s", schema.VERSION, dbVersion) return False def fix(self): db = self.getDB() dbVersion = db.getVersion() try: log.info("performing a schema migration...") newVersion = schema.loadSchema(db, doMigrate=True) except sqlerrors.SchemaVersionError, e: log.error(e.msg) return False if newVersion < dbVersion(): log.error("schema migration failed from %s to %s" % (dbVersion, schema.VERSION)) return False if newVersion == dbVersion: # did a big whoop noop log.info("schema check complete") else: log.info("schema migration from %s to %s completed" % (dbVersion, newVersion)) self.commit() return True
def __init__(self, *args, **keywords): assert(self.__class__ is not Action) # keywords will be in the class object, not the instance if not hasattr(self.__class__, 'keywords'): self.keywords = {} self._applyDefaults() self.addArgs(*args, **keywords) # verify that there are not broken format strings d = _AnyDict() for arg in args: if type(arg) is str and '%' in arg: try: arg % d except ValueError, msg: log.error('invalid macro substitution in "%s", missing "s"?' %arg) raise
def check(self): if not self.contentsDir: log.error("contentsDir needs to be specified") return False if not self.tmpDir: log.error("tmpDir needs to be specified") return False if not os.path.isdir(self.tmpDir): log.error("%s needs to be a directory", self.tmpDir) return False if not os.access(self.tmpDir, os.R_OK | os.W_OK | os.X_OK): log.error("%s needs to allow full read/write access", self.tmpDir) return False if os.path.realpath(self.tmpDir) != self.tmpDir: log.error("tmpDir cannot include symbolic links") return False return True
def logAndEmail(req, cfg, header, msg): timeStamp = time.ctime(time.time()) if not cfg.bugsFromEmail or not cfg.bugsToEmail: return log.error('sending mail to %s' % cfg.bugsToEmail) # send email body = header + '\n' body += 'Time of occurrence: %s\n' % timeStamp body += 'Conary repository server: %s\n\n' % req.hostname body += msg + '\n' body += '\nConnection Information:\n' body += formatRequestInfo(req) sendMail(cfg.bugsFromEmail, cfg.bugsEmailName, cfg.bugsToEmail, cfg.bugsEmailSubject, body)
def fix(self): from conary.repository.netrepos import accessmap db = self.getDB() ri = accessmap.RoleInstances(db) for (permissionId, roleId, role) in self._status: log.info("fixing permission cache for %s...", role) ri.updatePermissionId(permissionId, roleId) log.info("checking again to verify changes...") self._status = set() if not self.check(): log.error( "FAILED to fix the permissions cache. Unhandled error - contact rPath" ) db.rollback() return False self.commit() return True
def __init__(self, *args, **keywords): assert (self.__class__ is not Action) # keywords will be in the class object, not the instance if not hasattr(self.__class__, 'keywords'): self.keywords = {} self._applyDefaults() self.addArgs(*args, **keywords) # verify that there are not broken format strings d = _AnyDict() for arg in args: if type(arg) is str and '%' in arg: try: arg % d except ValueError, msg: log.error( 'invalid macro substitution in "%s", missing "s"?' % arg) raise
def runCommand(self, client, cfg, argSet, args): command, args = self.requireParameters(args, allowExtra=True) if args: jobId = _getJobIdOrUUId(args[0]) troveSpecs = args[1:] try: jobId = int(jobId) except ValueError: self.usage() log.error("bad jobId '%s'", jobId) return 1 else: jobId = None troveSpecs = [] displayTroves = argSet.pop('troves', False) displayDetails = argSet.pop('info', False) showFullVersions = argSet.pop('full-versions', False) showFullFlavors = argSet.pop('flavors', False) showLabels = argSet.pop('labels', False) showTracebacks = argSet.pop('tracebacks', False) showLogs = argSet.pop('logs', False) showConfig = argSet.pop('show-config', False) if argSet.pop('all', False): limit = None else: limit = 20 activeOnly = argSet.pop('active', False) watchJob = argSet.pop('watch', False) query.displayJobInfo(client, jobId, troveSpecs, displayTroves=displayTroves, displayDetails=displayDetails, showLogs=showLogs, showBuildLogs=showLogs, showFullVersions=showFullVersions, showFullFlavors=showFullFlavors, showLabels=showLabels, showTracebacks=showTracebacks, showConfig=showConfig, jobLimit=limit, activeOnly=activeOnly) if watchJob: client.watch(jobId, showBuildLogs=True, showTroveLogs=True)
def runCommand(self, cfg, argSet, args, profile=False, callback=None, repos=None): args = args[1:] binary = argSet.pop('binary', False) text = argSet.pop('text', False) if binary and text: log.error("files cannot be both binary and text") return 1 if argSet: return self.usage() if len(args) < 2: return self.usage() checkin.setFileFlags(repos, args[1:], text=text, binary=binary)
def runCommand(self, client, cfg, argSet, args): command, args = self.requireParameters(args, allowExtra=True) if args: jobId = _getJobIdOrUUId(args[0]) troveSpecs = args[1:] try: jobId = int(jobId) except ValueError: self.usage() log.error("bad jobId '%s'", jobId) return 1 else: jobId = None troveSpecs = [] displayTroves = argSet.pop('troves', False) displayDetails = argSet.pop('info', False) showFullVersions = argSet.pop('full-versions', False) showFullFlavors = argSet.pop('flavors', False) showLabels = argSet.pop('labels', False) showTracebacks = argSet.pop('tracebacks', False) showLogs = argSet.pop('logs', False) showConfig = argSet.pop('show-config', False) if argSet.pop('all', False): limit = None else: limit = 20 activeOnly = argSet.pop('active', False) watchJob = argSet.pop('watch', False) query.displayJobInfo(client, jobId, troveSpecs, displayTroves=displayTroves, displayDetails=displayDetails, showLogs=showLogs, showBuildLogs=showLogs, showFullVersions=showFullVersions, showFullFlavors=showFullFlavors, showLabels=showLabels, showTracebacks=showTracebacks, showConfig=showConfig, jobLimit=limit, activeOnly=activeOnly) if watchJob: client.watch(jobId, showBuildLogs = True, showTroveLogs = True)
def logErrorAndEmail(req, cfg, exception, e, bt): timeStamp = time.ctime(time.time()) header = 'Unhandled exception from conary repository: %s\n%s: %s\n' % ( req.hostname, exception.__name__, e) # Nicely format the exception tbFd, tbPath = tempfile.mkstemp('.txt', 'repos-error-') out = os.fdopen(tbFd, 'w+') formatTrace(exception, e, bt, stream = out, withLocals = False) out.write("\nFull stack:\n") formatTrace(exception, e, bt, stream = out, withLocals = True) out.seek(0) msg = out.read() logAndEmail(req, cfg, header, msg) # log error lines = traceback.format_exception(*sys.exc_info()) lines.insert(0, "Unhandled exception from conary repository:\n") lines.append('Extended traceback at %s\n' % (tbPath,)) log.error(''.join(lines))
def logErrorAndEmail(req, cfg, exception, e, bt): timeStamp = time.ctime(time.time()) header = 'Unhandled exception from conary repository: %s\n%s: %s\n' % ( req.hostname, exception.__name__, e) # Nicely format the exception tbFd, tbPath = tempfile.mkstemp('.txt', 'repos-error-') out = os.fdopen(tbFd, 'w+') formatTrace(exception, e, bt, stream=out, withLocals=False) out.write("\nFull stack:\n") formatTrace(exception, e, bt, stream=out, withLocals=True) out.seek(0) msg = out.read() logAndEmail(req, cfg, header, msg) # log error lines = traceback.format_exception(*sys.exc_info()) lines.insert(0, "Unhandled exception from conary repository:\n") lines.append('Extended traceback at %s\n' % (tbPath, )) log.error(''.join(lines))
def sanityCheckForStart(self): currUser = pwd.getpwuid(os.getuid()).pw_name cfgPaths = ['logDir', 'lockDir', 'serverDir'] socketPath = self.getSocketPath() if socketPath: if not os.access(os.path.dirname(socketPath), os.W_OK): log.error( 'cannot write to socketPath directory at %s - cannot start server' % os.path.dirname(socketPath)) sys.exit(1) ret = self._sanityCheckForSSL() if ret: sys.exit(ret) cfgPaths = ['buildDir', 'logDir', 'lockDir', 'serverDir'] for path in cfgPaths: if not os.path.exists(self[path]): log.error( '%s does not exist, expected at %s - cannot start server' % (path, self[path])) sys.exit(1) if not os.access(self[path], os.W_OK): log.error( 'user "%s" cannot write to %s at %s - cannot start server' % (currUser, path, self[path])) sys.exit(1) if self.useResolverCache: util.mkdirChain(self.getResolverCachePath())
def _main(argv, MainClass): """ Python hook for starting rbuild from the command line. @param argv: standard argument vector """ if argv is None: argv = sys.argv #pylint: disable-msg=E0701 # pylint complains about except clauses here because we sometimes # redefine debuggerException debuggerException = Exception try: argv = list(argv) debugAll = '--debug-all' in argv if debugAll: log.setupLogging(consoleLevel=logging.DEBUG) argv.remove('--debug-all') else: debuggerException = errors.JButlerInternalError sys.excepthook = errors.genExcepthook(debug=debugAll, debugCtrlC=debugAll) rc = MainClass().main(argv, debuggerException=debuggerException) if rc is None: return 0 return rc except debuggerException: raise except IOError as e: # allow broken pipe to exit if e.errno != errno.EPIPE: log.error(e) return 1 except KeyboardInterrupt: return 1 except Exception as e: log.error(e) return 1 return 0
def error(self, msg, *args, **kwargs): """Error handling callback @param msg: A message to display @type msg: str @keyword exc_text: Traceback text that should be printed verbatim @type exc_text: str """ exc_text = kwargs.pop('exc_text', None) # Append the traceback to the message if exc_text: msg += "\n%s" args += (exc_text, ) return log.error(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs): """Error handling callback @param msg: A message to display @type msg: str @keyword exc_text: Traceback text that should be printed verbatim @type exc_text: str """ exc_text = kwargs.pop("exc_text", None) # Append the traceback to the message if exc_text: msg += "\n%s" args += (exc_text,) return log.error(msg, *args, **kwargs)
def commitJobs(self, jobIds, message=None, commitOutdatedSources=False, commitWithFailures=True, waitForJob=False, sourceOnly=False, updateRecipes=True, excludeSpecs=None, writeToFile=None): """ Commits a set of jobs. Committing in rMake is slightly different from committing in conary. rMake uses the conary "clone" command to move the binary stored in its internal repository out into the repository the source component came from. @param jobId: jobId or uuid for a given job. @type jobId: int or uuid @param message: Message to use for source commits. @type message: str @param commitOutdatedSources: if True, allow commit of sources even if someone else has changed the source component outside of rMake before you. @param commitWithFailures: if True, allow commit of this job even if parts of the job have failed. @param waitForJob: if True, wait for the job to finish if necessary before committing. @param sourceOnly: if True, only commit the source component. @param writeToFile: if set to a path, the changeset is written to that path instead of committed to the repository (Advanced) @return: False if job failed to commit, True if it succeeded. @raise: JobNotFound: If job does not exist """ if not isinstance(jobIds, (list, tuple)): jobIds = [jobIds] jobs = self.client.getJobs(jobIds, withConfigs=True) finalJobs = [] for job in jobs: jobId = job.jobId if job.isCommitting(): raise errors.RmakeError("Job %s is already committing" % job.jobId) if not job.isFinished() and waitForJob: print "Waiting for job %s to complete before committing" % jobId try: self.waitForJob(jobId) except Exception, err: print "Wait interrupted, not committing" print "You can restart commit by running 'rmake commit %s'" % jobId raise job = self.client.getJob(jobId) if not job.isFinished(): log.error('Job %s is not yet finished' % jobId) return False if job.isFailed() and not commitWithFailures: log.error('Job %s has failures, not committing' % jobId) return False if not list(job.iterBuiltTroves()): log.error('Job %s has no built troves to commit' % jobId) return True finalJobs.append(job)