def shouldRun(self, repo): logger.info("%s.%s", self.forestName, repo.name) local = self.prep(repo.path) okay = True if os.path.exists(os.path.join(local, ".git")): if self.ask: okay = cmdline.askYn("Dir exists; continue with pull? [y/N]", default=False) else: if self.ask: okay = cmdline.askYn("continue with clone? [y/N]", default=False) return okay
def _finishClone(client, cfg, cs, callback, info=False, test=False, ignoreConflicts=False, targetFile=None): repos = client.repos if cfg.interactive or info: print 'The following clones will be created:' displayCloneJob(cs) if info: return if cfg.interactive: print okay = cmdline.askYn('continue with clone? [y/N]', default=False) if not okay: return if targetFile: cs.writeToFile(targetFile) elif not test: repos.commitChangeSet(cs, callback=callback) return cs
def shouldRun(self, repo): logger.info("%s.%s", self.forestName, repo.name) okay = True if self.ask: okay = cmdline.askYn("continue? [y/N]", default=False) return okay
def _test(input, result, default=None): oldStdin, oldStdout = sys.stdin, sys.stdout try: outBuffer = StringIO() inBuffer = StringIO() sys.stdout = outBuffer sys.stdin = inBuffer inBuffer.write(input) inBuffer.seek(0) assert(cmdline.askYn('foo', default=default) == result) finally: sys.stdin, sys.stdout = oldStdin, oldStdout
def _test(input, result, default=None): oldStdin, oldStdout = sys.stdin, sys.stdout try: outBuffer = StringIO() inBuffer = StringIO() sys.stdout = outBuffer sys.stdin = inBuffer inBuffer.write(input) inBuffer.seek(0) assert (cmdline.askYn('foo', default=default) == result) finally: sys.stdin, sys.stdout = oldStdin, oldStdout
def auditPolicy(self): new = True numPkgs = [x for x in self._logFiles if x.auditPolicy()] print("Found %s of %s packages with policy errors" % (len(numPkgs), len(self._logFiles))) prompt = 80 * '=' + '\n' + 'Repeat Log Data? (y/N): ' for logObj in self._logFiles: while new or askYn(prompt): new = False records = logObj.auditPolicy() if not records: break print 'Package: ', logObj for rec in records: print rec new = True
def auditPolicy(self): new = True numPkgs = [ x for x in self._logFiles if x.auditPolicy() ] print ("Found %s of %s packages with policy errors" % (len(numPkgs), len(self._logFiles))) prompt = 80 * '=' + '\n' + 'Repeat Log Data? (y/N): ' for logObj in self._logFiles: while new or askYn(prompt): new = False records = logObj.auditPolicy() if not records: break print 'Package: ', logObj for rec in records: print rec new = True
def _updateTroves(cfg, applyList, **kwargs): # Take out the apply-related keyword arguments applyDefaults = dict( replaceFiles = False, replaceManagedFiles = False, replaceUnmanagedFiles = False, replaceModifiedFiles = False, replaceModifiedConfigFiles = False, tagScript = None, justDatabase = False, skipCapsuleOps = False, info = False, keepJournal = False, noRestart = False, noScripts = False, ) applyKwargs = {} for k in applyDefaults: if k in kwargs: applyKwargs[k] = kwargs.pop(k) callback = kwargs.pop('callback') loadTroveCache = kwargs.pop('loadTroveCache', False) applyKwargs['test'] = kwargs.get('test', False) applyKwargs['localRollbacks'] = cfg.localRollbacks applyKwargs['autoPinList'] = cfg.pinTroves model = kwargs.pop('systemModel', None) modelFile = kwargs.pop('systemModelFile', None) modelGraph = kwargs.pop('modelGraph', None) modelTrace = kwargs.pop('modelTrace', None) noRestart = applyKwargs.get('noRestart', False) client = conaryclient.ConaryClient(cfg, modelFile=modelFile) client.setUpdateCallback(callback) if kwargs.pop('disconnected', False): client.disconnectRepos() migrate = kwargs.get('migrate', False) # even though we no longer differentiate forceMigrate, we still # remove it from kwargs to avoid confusing prepareUpdateJob kwargs.pop('forceMigrate', False) restartInfo = kwargs.get('restartInfo', None) # Initialize the critical update set applyCriticalOnly = kwargs.get('applyCriticalOnly', False) if kwargs.get('criticalUpdateInfo') is not None: kwargs['criticalUpdateInfo'].criticalOnly = applyCriticalOnly else: kwargs['criticalUpdateInfo'] = CriticalUpdateInfo(applyCriticalOnly) info = applyKwargs.pop('info', False) # Rename depCheck to resolveDeps depCheck = kwargs.pop('depCheck', True) kwargs['resolveDeps'] = depCheck if not info: client.checkWriteableRoot() # Unfortunately there's no easy way to make 'test' or 'info' mode work # with capsule sync, doubly so because it influences the decisions made # later on about what troves to update. So this will always really # apply, but the good news is that it never modifies the system outside # of the Conary DB. client.syncCapsuleDatabase(callback, makePins=True) updJob = client.newUpdateJob() try: if model: changeSetList = kwargs.get('fromChangesets', []) criticalUpdates = kwargs.get('criticalUpdateInfo', None) tc = modelupdate.CMLTroveCache(client.getDatabase(), client.getRepos(), callback = callback, changeSetList = changeSetList) tcPath = cfg.root + cfg.dbPath + '/modelcache' if loadTroveCache: if os.path.exists(tcPath): log.info("loading %s", tcPath) callback.loadingModelCache() tc.load(tcPath) ts = client.cmlGraph(model, changeSetList = changeSetList) if modelGraph is not None: ts.g.generateDotFile(modelGraph) suggMap = client._updateFromTroveSetGraph(updJob, ts, tc, fromChangesets = changeSetList, criticalUpdateInfo = criticalUpdates, callback = callback) if modelTrace is not None: ts.g.trace([ parseTroveSpec(x) for x in modelTrace ] ) finalModel = copy.deepcopy(model) if model.suggestSimplifications(tc, ts.g): log.info("possible system model simplifications found") ts2 = client.cmlGraph(model, changeSetList = changeSetList) updJob2 = client.newUpdateJob() try: suggMap2 = client._updateFromTroveSetGraph(updJob2, ts2, tc, fromChangesets = changeSetList, criticalUpdateInfo = criticalUpdates) except errors.TroveNotFound: log.info("bad model generated; bailing") else: if (suggMap == suggMap2 and updJob.getJobs() == updJob2.getJobs()): log.info("simplified model verfied; using it instead") ts = ts2 finalModel = model updJob = updJob2 suggMap = suggMap2 else: log.info("simplified model changed result; ignoring") model = finalModel modelFile.model = finalModel if tc.cacheModified(): log.info("saving %s", tcPath) callback.savingModelCache() tc.save(tcPath) callback.done() else: suggMap = client.prepareUpdateJob(updJob, applyList, **kwargs) except: callback.done() client.close() raise if info: callback.done() displayUpdateInfo(updJob, cfg, noRestart=noRestart) if restartInfo and not model: callback.done() newJobs = set(itertools.chain(*updJob.getJobs())) oldJobs = set(updJob.getItemList()) addedJobs = newJobs - oldJobs removedJobs = oldJobs - newJobs if addedJobs or removedJobs: print print 'NOTE: after critical updates were applied, the contents of the update were recalculated:' print displayChangedJobs(addedJobs, removedJobs, cfg) updJob.close() client.close() return if model: missingLocalTroves = model.getMissingLocalTroves(tc, ts) if missingLocalTroves: print 'Update would leave references to missing local troves:' for troveTup in missingLocalTroves: if not isinstance(troveTup, trovetup.TroveTuple): troveTup = trovetup.TroveTuple(troveTup) print "\t" + str(troveTup) client.close() return if suggMap: callback.done() dcfg = display.DisplayConfig() dcfg.setTroveDisplay(fullFlavors = cfg.fullFlavors, fullVersions = cfg.fullVersions, showLabels = cfg.showLabels) formatter = display.TroveTupFormatter(dcfg) print "Including extra troves to resolve dependencies:" print " ", items = sorted(set(formatter.formatNVF(*x) for x in itertools.chain(*suggMap.itervalues()))) print " ".join(items) askInteractive = cfg.interactive if restartInfo: callback.done() newJobs = set(itertools.chain(*updJob.getJobs())) oldJobs = set(updJob.getItemList()) addedJobs = newJobs - oldJobs removedJobs = oldJobs - newJobs if not model and addedJobs or removedJobs: print 'NOTE: after critical updates were applied, the contents of the update were recalculated:' displayChangedJobs(addedJobs, removedJobs, cfg) else: askInteractive = False if not updJob.jobs: # Nothing to do print 'Update would not modify system' if model and not kwargs.get('test'): # Make sure 'conary sync' clears model.next even if nothing needs # to be done. modelFile.closeSnapshot() updJob.close() client.close() return elif askInteractive: print 'The following updates will be performed:' displayUpdateInfo(updJob, cfg, noRestart=noRestart) if migrate and cfg.interactive: print ('Migrate erases all troves not referenced in the groups' ' specified.') if askInteractive: if migrate: style = 'migrate' else: style = 'update' okay = cmdline.askYn('continue with %s? [Y/n]' % style, default=True) if not okay: updJob.close() client.close() return if not noRestart and updJob.getCriticalJobs(): print "Performing critical system updates, will then restart update." try: restartDir = client.applyUpdateJob(updJob, **applyKwargs) finally: updJob.close() client.close() if restartDir: params = sys.argv # Write command line to disk import xmlrpclib cmdlinefile = open(os.path.join(restartDir, 'cmdline'), "w") cmdlinefile.write(xmlrpclib.dumps((params, ), methodresponse = True)) cmdlinefile.close() # CNY-980: we should have the whole script of changes to perform in # the restart directory (in the job list); if in migrate mode, re-exec # as regular update if migrate and 'migrate' in params: params[params.index('migrate')] = 'update' params.extend(['--restart-info=%s' % restartDir]) client.close() raise errors.ReexecRequired( 'Critical update completed, rerunning command...', params, restartDir) else: if (not kwargs.get('test', False)) and model: modelFile.closeSnapshot()
def branch(repos, cfg, newLabel, troveSpecs, makeShadow=False, sourceOnly=False, binaryOnly=False, allowEmptyShadow=False, info=False, forceBinary=False, ignoreConflicts=False, targetFile=None): branchType = _getBranchType(binaryOnly, sourceOnly) client = conaryclient.ConaryClient(cfg) troveSpecs = [ updatecmd.parseTroveSpec(x) for x in troveSpecs ] componentSpecs = [ x[0] for x in troveSpecs if (':' in x[0] and x[0].split(':')[1] != 'source')] if componentSpecs: raise errors.ParseError('Cannot branch or shadow individual components: %s' % ', '.join(componentSpecs)) result = repos.findTroves(cfg.buildLabel, troveSpecs, cfg.buildFlavor) troveList = [ x for x in itertools.chain(*result.itervalues())] sigKey = selectSignatureKey(cfg, newLabel) if makeShadow: dups, cs = client.createShadowChangeSet(newLabel, troveList, allowEmptyShadow=\ allowEmptyShadow, branchType=branchType, sigKeyId=sigKey) else: dups, cs = client.createBranchChangeSet(newLabel, troveList, branchType=branchType, sigKeyId = sigKey) for (name, branch) in dups: log.warning("%s already has branch %s", name, branch.asString()) if not cs: return if makeShadow: branchOps = 'shadows' else: branchOps = 'branches' hasBinary = False for trvCs in cs.iterNewTroveList(): if not trvCs.getName().endswith(':source'): hasBinary = True break if cfg.interactive or info: print 'The following %s will be created:' % branchOps displayBranchJob(cs, shadow=makeShadow) if cfg.interactive: print if hasBinary and branchType & client.BRANCH_BINARY: print 'WARNING: You have chosen to create binary %s. ' \ 'This is not recommended\nwith this version of cvc.' \ % branchOps print okay = cmdline.askYn('Continue with %s? [y/N]' % branchOps.lower(), default=False) if not okay: return elif (not forceBinary) and hasBinary and branchType & client.BRANCH_BINARY: print 'Creating binary %s is only allowed in interactive mode. ' \ 'Rerun cvc\nwith --interactive.' % branchOps return 1 if not info: if targetFile: cs.writeToFile(targetFile) else: client.repos.commitChangeSet(cs)
(name, versionStr, flavor), cfg.flavor) except errors.TroveNotFound, e: log.error(str(e)) return for trvInfo in trvList: troveStr += "%s=%s[%s]\n" % (trvInfo[0], trvInfo[1].asString(), deps.formatFlavor(trvInfo[2])) jobList.append((trvInfo[0], (None, None), (trvInfo[1], trvInfo[2]), True)) if cfg.interactive: print troveStr print "Total: %d troves" % len(jobList) answer = cmdline.askYn('Are you sure you want to digitally sign these troves [y/N]?', default=False) if not answer: return # We use a changeset here instead of getTroves because changeset knows # how to do efficient recursion. cs = repos.createChangeSet(jobList, withFiles = True, withFileContents = False, recurse = recurse) totalNum = len([ x for x in cs.iterNewTroveList() ]) misfires = [] for i, trvCs in enumerate(cs.iterNewTroveList()): trv = trove.Trove(trvCs) callback.signTrove(i + 1, totalNum)
def branch(repos, cfg, newLabel, troveSpecs, makeShadow=False, sourceOnly=False, binaryOnly=False, allowEmptyShadow=False, info=False, forceBinary=False, ignoreConflicts=False, targetFile=None): branchType = _getBranchType(binaryOnly, sourceOnly) client = conaryclient.ConaryClient(cfg) troveSpecs = [updatecmd.parseTroveSpec(x) for x in troveSpecs] componentSpecs = [ x[0] for x in troveSpecs if (':' in x[0] and x[0].split(':')[1] != 'source') ] if componentSpecs: raise errors.ParseError( 'Cannot branch or shadow individual components: %s' % ', '.join(componentSpecs)) result = repos.findTroves(cfg.buildLabel, troveSpecs, cfg.buildFlavor) troveList = [x for x in itertools.chain(*result.itervalues())] sigKey = selectSignatureKey(cfg, newLabel) if makeShadow: dups, cs = client.createShadowChangeSet(newLabel, troveList, allowEmptyShadow=\ allowEmptyShadow, branchType=branchType, sigKeyId=sigKey) else: dups, cs = client.createBranchChangeSet(newLabel, troveList, branchType=branchType, sigKeyId=sigKey) for (name, branch) in dups: log.warning("%s already has branch %s", name, branch.asString()) if not cs: return if makeShadow: branchOps = 'shadows' else: branchOps = 'branches' hasBinary = False for trvCs in cs.iterNewTroveList(): if not trvCs.getName().endswith(':source'): hasBinary = True break if cfg.interactive or info: print 'The following %s will be created:' % branchOps displayBranchJob(cs, shadow=makeShadow) if cfg.interactive: print if hasBinary and branchType & client.BRANCH_BINARY: print 'WARNING: You have chosen to create binary %s. ' \ 'This is not recommended\nwith this version of cvc.' \ % branchOps print okay = cmdline.askYn('Continue with %s? [y/N]' % branchOps.lower(), default=False) if not okay: return elif (not forceBinary) and hasBinary and branchType & client.BRANCH_BINARY: print 'Creating binary %s is only allowed in interactive mode. ' \ 'Rerun cvc\nwith --interactive.' % branchOps return 1 if not info: if targetFile: cs.writeToFile(targetFile) else: client.repos.commitChangeSet(cs)
def applyRollback(client, rollbackSpec, returnOnError = False, **kwargs): """ Apply a rollback. See L{conary.conaryclient.ConaryClient.applyRollback} for a description of the arguments for this function. """ client.checkWriteableRoot() # Record the transaction counter, to make sure the state of the database # didn't change while we were computing the rollback list. transactionCounter = client.db.getTransactionCounter() log.syslog.command() showInfoOnly = kwargs.pop('showInfoOnly', False) defaults = dict(replaceFiles = False, transactionCounter = transactionCounter, lazyCache = client.lzCache) defaults.update(kwargs) rollbackStack = client.db.getRollbackStack() rollbackList = rollbackStack.getList() if rollbackSpec.startswith('r.'): try: i = rollbackList.index(rollbackSpec) except ValueError: log.error("rollback '%s' not present" % rollbackSpec) if returnOnError: return 1 raise database.RollbackDoesNotExist(rollbackSpec) rollbacks = rollbackList[i:] rollbacks.reverse() else: try: rollbackCount = int(rollbackSpec) except ValueError: log.error("integer rollback count expected instead of '%s'" % rollbackSpec) if returnOnError: return 1 raise database.RollbackDoesNotExist(rollbackSpec) if rollbackCount < 1: log.error("rollback count must be positive") if returnOnError: return 1 raise database.RollbackDoesNotExist(rollbackSpec) elif rollbackCount > len(rollbackList): log.error("rollback count higher then number of rollbacks " "available") if returnOnError: return 1 raise database.RollbackDoesNotExist(rollbackSpec) rollbacks = rollbackList[-rollbackCount:] rollbacks.reverse() capsuleChangeSet = changeset.ReadOnlyChangeSet() for path in defaults.pop('capsuleChangesets', []): if os.path.isdir(path): pathList = [ os.path.join(path, x) for x in os.listdir(path) ] else: pathList = [ path ] for p in pathList: if not os.path.isfile(p): continue try: cs = changeset.ChangeSetFromFile(p) except filecontainer.BadContainer: continue capsuleChangeSet.merge(cs) defaults['capsuleChangeSet'] = capsuleChangeSet #-- Show only information and return if showInfoOnly or client.cfg.interactive: rollbackList = [ rollbackStack.getRollback(x) for x in rollbacks if rollbackStack.hasRollback(x) ] formatRollbacksAsUpdate(client.cfg, rollbackList) if showInfoOnly: return 0 #-- Interactive input (default behaviour) if client.cfg.interactive: okay = cmdline.askYn('continue with rollback? [y/N]', default=False) if not okay: return 1 try: client.db.applyRollbackList(client.getRepos(), rollbacks, **defaults) except database.RollbackError, e: log.error("%s", e) if returnOnError: return 1 raise
except errors.TroveNotFound, e: log.error(str(e)) return for trvInfo in trvList: troveStr += "%s=%s[%s]\n" % (trvInfo[0], trvInfo[1].asString(), deps.formatFlavor(trvInfo[2])) jobList.append( (trvInfo[0], (None, None), (trvInfo[1], trvInfo[2]), True)) if cfg.interactive: print troveStr print "Total: %d troves" % len(jobList) answer = cmdline.askYn( 'Are you sure you want to digitally sign these troves [y/N]?', default=False) if not answer: return # We use a changeset here instead of getTroves because changeset knows # how to do efficient recursion. cs = repos.createChangeSet(jobList, withFiles=True, withFileContents=False, recurse=recurse) totalNum = len([x for x in cs.iterNewTroveList()]) misfires = [] for i, trvCs in enumerate(cs.iterNewTroveList()):
def applyRollback(client, rollbackSpec, returnOnError=False, **kwargs): """ Apply a rollback. See L{conary.conaryclient.ConaryClient.applyRollback} for a description of the arguments for this function. """ client.checkWriteableRoot() # Record the transaction counter, to make sure the state of the database # didn't change while we were computing the rollback list. transactionCounter = client.db.getTransactionCounter() log.syslog.command() showInfoOnly = kwargs.pop('showInfoOnly', False) defaults = dict(replaceFiles=False, transactionCounter=transactionCounter, lazyCache=client.lzCache) defaults.update(kwargs) rollbackStack = client.db.getRollbackStack() rollbackList = rollbackStack.getList() if rollbackSpec.startswith('r.'): try: i = rollbackList.index(rollbackSpec) except ValueError: log.error("rollback '%s' not present" % rollbackSpec) if returnOnError: return 1 raise database.RollbackDoesNotExist(rollbackSpec) rollbacks = rollbackList[i:] rollbacks.reverse() else: try: rollbackCount = int(rollbackSpec) except ValueError: log.error("integer rollback count expected instead of '%s'" % rollbackSpec) if returnOnError: return 1 raise database.RollbackDoesNotExist(rollbackSpec) if rollbackCount < 1: log.error("rollback count must be positive") if returnOnError: return 1 raise database.RollbackDoesNotExist(rollbackSpec) elif rollbackCount > len(rollbackList): log.error("rollback count higher then number of rollbacks " "available") if returnOnError: return 1 raise database.RollbackDoesNotExist(rollbackSpec) rollbacks = rollbackList[-rollbackCount:] rollbacks.reverse() capsuleChangeSet = changeset.ReadOnlyChangeSet() for path in defaults.pop('capsuleChangesets', []): if os.path.isdir(path): pathList = [os.path.join(path, x) for x in os.listdir(path)] else: pathList = [path] for p in pathList: if not os.path.isfile(p): continue try: cs = changeset.ChangeSetFromFile(p) except filecontainer.BadContainer: continue capsuleChangeSet.merge(cs) defaults['capsuleChangeSet'] = capsuleChangeSet #-- Show only information and return if showInfoOnly or client.cfg.interactive: rollbackList = [ rollbackStack.getRollback(x) for x in rollbacks if rollbackStack.hasRollback(x) ] formatRollbacksAsUpdate(client.cfg, rollbackList) if showInfoOnly: return 0 #-- Interactive input (default behaviour) if client.cfg.interactive: okay = cmdline.askYn('continue with rollback? [y/N]', default=False) if not okay: return 1 try: client.db.applyRollbackList(client.getRepos(), rollbacks, **defaults) except database.RollbackError, e: log.error("%s", e) if returnOnError: return 1 raise