def main(args): import time from conary import conarycfg from conary.conaryclient.cmdline import parseTroveSpec setupLogging(consoleLevel=logging.DEBUG, consoleFormat='file') if len(args) == 2: troveSpec, kernelSpec, workDir = args[0], args[1], '.' elif len(args) == 3: troveSpec, kernelSpec, workDir = args else: sys.exit("Usage: %s <troveSpec> <kernelSpec> [<workDir>]" % sys.argv[0]) cfg = conarycfg.ConaryConfiguration(False) cfg.configLine('includeConfigFile http://localhost/conaryrc') cli = ConaryClient(cfg) repos = cli.getRepos() troveTup = sorted(repos.findTrove(None, parseTroveSpec(troveSpec)))[-1] kernelTup = sorted(repos.findTrove(None, parseTroveSpec(kernelSpec)))[-1] generator = TemplateGenerator(troveTup, kernelTup, cfg, workDir) generator.getTemplate(start=True) while True: status, path = generator.getTemplate(start=False) if status == generator.Status.NOT_FOUND: print 'Failed!' break elif status == generator.Status.DONE: print 'Done:', path break time.sleep(1) generator.wait()
def createChangeSetFile(self, jobId, path, troveSpecs=None): """ Creates a changeset file with all the built troves for a job. @param jobId: jobId or uuid for a given job. @type jobId: int or uuid @return: False if changeset not created, True if it was. @raise: JobNotFound: If job does not exist """ job = self.client.getJob(jobId) binTroves = [] for trove in job.iterTroves(): binTroves.extend(trove.iterBuiltTroves()) if not binTroves: log.error('No built troves associated with this job') return False if troveSpecs: troveSpecs = [ cmdline.parseTroveSpec(x) for x in troveSpecs ] source = trovesource.SimpleTroveSource(binTroves) results = source.findTroves(None, troveSpecs) binTroves = list(itertools.chain(*results.values())) primaryTroveList = binTroves recurse = True else: recurse = False primaryTroveList = [ x for x in binTroves if ':' not in x[0]] jobList = [(x[0], (None, None), (x[1], x[2]), True) for x in binTroves ] self.getRepos().createChangeSetFile(jobList, path, recurse=recurse, primaryTroveList=primaryTroveList) return True
def iterAllLoadedSpecs(self): stack = [self.getLoadedSpecs()] while stack: specDict = stack.pop() for troveSpec, (troveTup, subLoadDict) in specDict.iteritems(): yield cmdline.parseTroveSpec(troveSpec), troveTup stack.append(subLoadDict)
def shortTroveSpec(spec): n, v, f = parseTroveSpec(spec) try: v = versions.VersionFromString(v) except conary.errors.ParseError: # we got a frozen version string v = versions.ThawVersion(v) return "%s=%s (%s)" % (n, str(v.trailingRevision()), getArchFromFlavor(f))
def getProductVersionPlatformVersion(self, hostname, version): self.auth.requireProductReadAccess(hostname) pd = self.productMgr.getProductVersionDefinition(hostname, version) platformName = pd.getPlatformName() sourceTrove = pd.getPlatformSourceTrove() if not sourceTrove: return models.EmptyPlatformVersion() n,v,f = cmdline.parseTroveSpec(sourceTrove) v = versions.VersionFromString(v) # convert trove name from unicode platformLabel = str(v.trailingLabel()) localPlatform = self.platformMgr.getPlatformByLabel(platformLabel) if localPlatform: platformTroves = [pt for pt in pd.getPlatformSearchPaths() \ if pt.isPlatformTrove] if not platformTroves: return models.EmptyPlatformVersion() platformTrove = platformTroves[0] name = str(platformTrove.troveName) revision = str(platformTrove.version) return self.platformMgr.getPlatformVersion( localPlatform.platformId, "%s=%s" % (name, revision)) else: return models.EmptyPlatformVersion()
def _findTroves(self, specList, labelPath=None, defaultFlavor=None, allowMissing=False): newSpecList = [] specMap = {} for spec in specList: if not isinstance(spec, tuple): newSpec = cmdline.parseTroveSpec(spec) else: newSpec = spec newSpecList.append(newSpec) specMap[newSpec] = spec repos = self._getRepositoryClient() if isinstance(labelPath, (tuple, list)): labelPath = [self._getLabel(x) for x in labelPath] elif labelPath: labelPath = self._getLabel(labelPath) defaultFlavor = self._getFlavor(defaultFlavor, keepNone=True) results = repos.findTroves(labelPath, newSpecList, defaultFlavor=defaultFlavor, allowMissing=allowMissing) return dict((specMap[x[0]], x[1]) for x in results.items())
def getProductVersionPlatform(self, hostname, version): self.auth.requireProductReadAccess(hostname) pd = self.productMgr.getProductVersionDefinition(hostname, version) platformName = pd.getPlatformName() sourceTrove = pd.getPlatformSourceTrove() if not sourceTrove: return models.ProductPlatform(platformTroveName='', platformVersion='', label='', platformName=platformName, hostname=hostname, productVersion=version) n,v,f = cmdline.parseTroveSpec(sourceTrove) v = versions.VersionFromString(v) # convert trove name from unicode platformLabel = str(v.trailingLabel()) localPlatform = self.platformMgr.getPlatformByLabel(platformLabel) platformId = None platformEnabled = None if localPlatform: platformId = localPlatform.platformId platformEnabled = bool(localPlatform.enabled) return models.ProductPlatform(platformTroveName=str(n), platformVersion=str(v.trailingRevision()), label=platformLabel, platformName=platformName, hostname=hostname, productVersion=version, enabled=platformEnabled, platformId = platformId)
def setProductInfo(trvSpec, info): trvSpec = cmdline.parseTroveSpec(trvSpec) cfg = conarycfg.ConaryConfiguration(True) client = conaryclient.ConaryClient(cfg) repos = client.getRepos() nvfs = repos.findTrove(None, trvSpec) if not len(nvfs): print >>sys.stderr, 'did not find any troves matching %s' % trvSpec return 1 nvf = nvfs[0] trv = repos.getTrove(*nvf) md = trv.troveInfo.metadata keyValue = md.get(1).get('keyValue') if not keyValue: mi = trove.MetadataItem() md.addItem(mi) keyValue = mi.keyValue keyValue['product_info'] = json.dumps(info) repos.setTroveInfo([(nvf, trv.troveInfo), ])
def createSearchPathFromStrings(searchPath): """ Creates a list of items that can be passed into createSearchSource. Valid items in the searchPath include: 1. troveSpec (foo=:devel) or list of trovespecs 2. string for label (conary.rpath.com@rpl:devel) 3. label objects or list of label objects. """ from conary.conaryclient import cmdline from conary import conarycfg labelList = [] finalPath = [] if not isinstance(searchPath, (list, tuple)): searchPath = [searchPath] for item in searchPath: if isinstance(item, conarycfg.CfgLabelList): item = tuple(item) elif isinstance(item, versions.Label): labelList.append(item) continue elif isinstance(item, (list, tuple)): # recurse item = list(itertools.chain(*createSearchPathFromStrings(item))) elif isinstance(item, str): if '=' in item: # only troveSpecs have = in them item = (cmdline.parseTroveSpec(item), ) elif '@' in item: try: item = versions.Label(item) except baseerrors.ParseError, err: raise baseerrors.ParseError( 'Error parsing label "%s": %s' % (item, err)) labelList.append(item) continue else: item = (cmdline.parseTroveSpec(item), ) else: raise baseerrors.ParseError('Unknown searchPath item "%s"' % item) # labels don't get here, so we know that this is not part of a # labelPath if labelList: finalPath.append(tuple(labelList)) labelList = [] finalPath.append(item)
def _check(tups, troveSpecs): source = trovesource.SimpleTroveSource(tups) source.searchAsDatabase() troveSpecs = [ cmdline.parseTroveSpec(x) for x in troveSpecs ] results = source.findTroves(None, troveSpecs) receivedTups = itertools.chain(*results.itervalues()) assert(set(receivedTups) == set(tups))
def createSearchPathFromStrings(searchPath): """ Creates a list of items that can be passed into createSearchSource. Valid items in the searchPath include: 1. troveSpec (foo=:devel) or list of trovespecs 2. string for label (conary.rpath.com@rpl:devel) 3. label objects or list of label objects. """ from conary.conaryclient import cmdline from conary import conarycfg labelList = [] finalPath = [] if not isinstance(searchPath, (list, tuple)): searchPath = [searchPath] for item in searchPath: if isinstance(item, conarycfg.CfgLabelList): item = tuple(item) elif isinstance(item, versions.Label): labelList.append(item) continue elif isinstance(item, (list, tuple)): # recurse item = list(itertools.chain(*createSearchPathFromStrings(item))) elif isinstance(item, str): if '=' in item: # only troveSpecs have = in them item = ( cmdline.parseTroveSpec(item), ) elif '@' in item: try: item = versions.Label(item) except baseerrors.ParseError, err: raise baseerrors.ParseError( 'Error parsing label "%s": %s' % (item, err)) labelList.append(item) continue else: item = (cmdline.parseTroveSpec(item),) else: raise baseerrors.ParseError('Unknown searchPath item "%s"' % item) # labels don't get here, so we know that this is not part of a # labelPath if labelList: finalPath.append(tuple(labelList)) labelList = [] finalPath.append(item)
def _check(tups, troveSpecs): source = trovesource.SimpleTroveSource(tups) source.searchAsDatabase() troveSpecs = [cmdline.parseTroveSpec(x) for x in troveSpecs] results = source.findTroves(None, troveSpecs) receivedTups = itertools.chain(*results.itervalues()) assert (set(receivedTups) == set(tups))
def _filterListByMatchSpecs(reposName, matchSpecs, troveList): matchSpecs = [ cmdline.parseTroveSpec(x, allowEmptyName=True) for x in matchSpecs ] hasAddSpec = False newTroveList = [] for troveTup in troveList: if troveTup[2] is None: flavor = deps.parseFlavor('') else: flavor = troveTup[2] newTroveList.append((troveTup[0], troveTup[1], flavor)) troveList = newTroveList troveMap = {} for troveTup in troveList: key = (troveTup[0].split(':')[0], troveTup[1], troveTup[2]) troveMap.setdefault(key, []).append(troveTup) finalMatchSpecs = {} for matchSpec in matchSpecs: name = matchSpec[0] if name and name[0] == '-': removeSpec = True name = name[1:] else: hasAddSpec = True removeSpec = False if not name: filterFn = lambda x: True else: filterFn = lambda x: fnmatch.fnmatchcase(x[0], name) # add all packages that match glob (could be empty in which case # all packages are added. finalMatchSpecs.update(dict.fromkeys([(x[0], matchSpec[1], matchSpec[2]) for x in troveMap if filterFn(x)], removeSpec)) troveSource = trovesource.SimpleTroveSource(troveMap) troveSource = recipeutil.RemoveHostSource(troveSource, reposName) results = troveSource.findTroves(None, finalMatchSpecs, None, allowMissing=True) toRemove = [] toAdd = set() for matchSpec, resultList in results.iteritems(): if not finalMatchSpecs[matchSpec]: # this matchSpec was prepended by # a - sign toAdd.update(resultList) else: toRemove.extend(resultList) if not hasAddSpec: toAdd = set(troveMap) toAdd.difference_update(toRemove) return list(itertools.chain(*(troveMap[x] for x in toAdd)))
def CloneTrove(cfg, targetBranch, troveSpecList, updateBuildInfo=True, info=False, cloneSources=False, message=None, test=False, fullRecurse=False, ignoreConflicts=False, exactFlavors=False): client = ConaryClient(cfg) repos = client.getRepos() targetBranch = versions.VersionFromString(targetBranch) if not isinstance(targetBranch, versions.Branch): raise errors.ParseError( 'Cannot specify full version "%s" to clone to - must specify target branch' % targetBranch) troveSpecs = [cmdline.parseTroveSpec(x) for x in troveSpecList] componentSpecs = [ x[0] for x in troveSpecs if ':' in x[0] and x[0].split(':')[1] != 'source' ] if componentSpecs: raise errors.ParseError('Cannot clone components: %s' % ', '.join(componentSpecs)) trovesToClone = repos.findTroves(cfg.installLabelPath, troveSpecs, cfg.flavor, exactFlavors=exactFlavors) trovesToClone = list(set(itertools.chain(*trovesToClone.itervalues()))) if not client.cfg.quiet: callback = client_callbacks.CloneCallback(client.cfg, message) else: callback = callbacks.CloneCallback() okay, cs = client.createCloneChangeSet(targetBranch, trovesToClone, updateBuildInfo=updateBuildInfo, infoOnly=info, callback=callback, fullRecurse=fullRecurse, cloneSources=cloneSources) if not okay: return return _finishClone(client, cfg, cs, callback, info=info, test=test, ignoreConflicts=ignoreConflicts)
def _thawData(class_, data): parsedData = [] lines = data.split('\000') for line in lines: troveSpec, flavor = line.split('\001') troveSpec = cmdline.parseTroveSpec(troveSpec) troveTup = troveSpec[0], versions.VersionFromString(troveSpec[1]), troveSpec[2] dep = ThawFlavor(flavor) parsedData.append((troveTup, dep)) return parsedData
def testParseTroveSpec(self): tests = [ (("foo=/bar@baz:1/2-3-4", False, False), ("foo", "/bar@baz:1/2-3-4", None)), ((" foo=/bar@baz:1/2-3-4\n\n\n", False, False), ("foo", "/bar@baz:1/2-3-4", None)), ] for (specStr, allowEmptyName, withFrozenFlavor), exp in tests: ret = cmdline.parseTroveSpec(specStr, allowEmptyName = allowEmptyName, withFrozenFlavor = withFrozenFlavor) self.assertEqual(ret, exp)
def testParseTroveSpec(self): tests = [ (("foo=/bar@baz:1/2-3-4", False, False), ("foo", "/bar@baz:1/2-3-4", None)), ((" foo=/bar@baz:1/2-3-4\n\n\n", False, False), ("foo", "/bar@baz:1/2-3-4", None)), ] for (specStr, allowEmptyName, withFrozenFlavor), exp in tests: ret = cmdline.parseTroveSpec(specStr, allowEmptyName=allowEmptyName, withFrozenFlavor=withFrozenFlavor) self.assertEqual(ret, exp)
def createImageJob(self, productName, imageList): raise NotImplementedError # XXX allTroveSpecs = {} finalImageList = [] for image in imageList: image = list(image) if len(image) < 4: image.append('') # Make it easy to append more parameters extensibly later image = image[0:4] finalImageList.append(image) for troveSpec, imageType, imageOptions, buildName in finalImageList: if isinstance(troveSpec, str): troveSpec = cmdline.parseTroveSpec(troveSpec) allTroveSpecs.setdefault(troveSpec, []).append((imageType, buildName, imageOptions)) cfg = self.buildConfig cfg.initializeFlavors() repos = self.getRepos() results = repos.findTroves(cfg.buildLabel, allTroveSpecs, cfg.buildFlavor) def getContextName(buildName): return buildName.replace(' ', '_') contextCache = set() i = 1 job = buildjob.BuildJob() for troveSpec, troveTupList in results.iteritems(): for imageType, buildName, imageOptions in allTroveSpecs[troveSpec]: for name, version, flavor in troveTupList: context = getContextName(buildName) while not context or context in contextCache: if buildName: context = '%s_(%d)' %(context, i) else: context = 'Image_%d' %i i += 1 contextCache.add(context) imageTrove = imagetrove.ImageTrove(None, name, version, flavor, context=context) imageTrove.setImageType(imageType) imageTrove.setImageOptions(imageOptions) imageTrove.setProductName(productName) imageTrove.setBuildName(buildName) job.setTroveConfig(imageTrove, cfg) job.addTrove(name, version, flavor, context, imageTrove) job.setMainConfig(cfg) return job
def testMultipleContexts(self): config = """ [nossl] buildFlavor !ssl """ repos = self.openRepository() trv = self.addComponent('testcase:source', '1.0-1', '', [('testcase.recipe', basicRecipe + '\tif Use.ssl:pass')]) self.openRmakeRepository() self.writeFile(self.workDir + '/config', config) self.buildCfg.read(self.workDir + '/config') uri = 'unix://%s/socket' % self.rootDir self.buildCfg.strictMode = True srv = server.rMakeServer(#None, uri, self.rmakeCfg, None, quiet=True) rmakeClient = client.rMakeClient(uri) pid = os.fork() if pid: srv._close() try: helper = self.getRmakeHelper(rmakeClient.uri) troveSpec = '%s=%s[%s]' % trv.getNameVersionFlavor() troveSpec2 = '%s=%s[%s]{nossl}' % trv.getNameVersionFlavor() jobId = helper.buildTroves([troveSpec, troveSpec2]) buildCfg = rmakeClient.getJobConfig(jobId) self.assertEquals(buildCfg.buildTroveSpecs, [cmdline.parseTroveSpec(troveSpec)]) helper.waitForJob(jobId) job = helper.getJob(jobId) # make sure a trove can actually be found if job.isFailed(): raise RuntimeError('Job Failed: %s' % job.getFailureReason()) trvs = job.findTrovesWithContext(None, [('testcase:source', None, None, None)]) assert(len(trvs) == 1) self.assertEquals(len(trvs.values()[0]), 2) finally: os.kill(pid, signal.SIGTERM) self.waitThenKill(pid) else: try: sys.stdin = open('/dev/null') lf = logfile.LogFile(self.rootDir + '/srv.log') lf.redirectOutput() srv.serve_forever() finally: os._exit(1)
def createImageJob(self, productName, imageList): allTroveSpecs = {} finalImageList = [] for image in imageList: image = list(image) if len(image) < 4: image.append('') # Make it easy to append more parameters extensibly later image = image[0:4] finalImageList.append(image) for troveSpec, imageType, imageOptions, buildName in finalImageList: if isinstance(troveSpec, str): troveSpec = cmdline.parseTroveSpec(troveSpec) allTroveSpecs.setdefault(troveSpec, []).append((imageType, buildName, imageOptions)) cfg = self.buildConfig cfg.initializeFlavors() repos = self.getRepos() results = repos.findTroves(cfg.buildLabel, allTroveSpecs, cfg.buildFlavor) def getContextName(buildName): return buildName.replace(' ', '_') contextCache = set() i = 1 job = buildjob.BuildJob() for troveSpec, troveTupList in results.iteritems(): for imageType, buildName, imageOptions in allTroveSpecs[troveSpec]: for name, version, flavor in troveTupList: context = getContextName(buildName) while not context or context in contextCache: if buildName: context = '%s_(%d)' %(context, i) else: context = 'Image_%d' %i i += 1 contextCache.add(context) imageTrove = imagetrove.ImageTrove(None, name, version, flavor, context=context) imageTrove.setImageType(imageType) imageTrove.setImageOptions(imageOptions) imageTrove.setProductName(productName) imageTrove.setBuildName(buildName) job.setTroveConfig(imageTrove, cfg) job.addTrove(name, version, flavor, context, imageTrove) job.setMainConfig(cfg) return job
def validate(self): # wait to check build requires until the object is instantiated # so that we can include all of the parent classes' buildreqs # in the check for buildRequires in self.buildRequires: (n, vS, f) = cmdline.parseTroveSpec(buildRequires) if n.count(":") > 1: raise RecipeFileError("Build requirement '%s' cannot have two colons in its name" % (buildRequires)) # we don't allow full version strings or just releases if vS and vS[0] not in ":@": raise RecipeFileError("Unsupported buildReq format %s" % buildRequires)
def changePins(cfg, troveStrList, pin = True, systemModel = None, systemModelFile = None, callback = None): client = conaryclient.ConaryClient(cfg) client.checkWriteableRoot() troveList = [] for item in troveStrList: name, ver, flv = parseTroveSpec(item) troves = client.db.findTrove(None, (name, ver, flv)) troveList += troves client.pinTroves(troveList, pin = pin) if systemModel and systemModelFile and not pin: doModelUpdate(cfg, systemModel, systemModelFile, [], callback=callback)
def testBasic(self): repos = self.openRepository() trv = self.addComponent('testcase:source', '1.0-1', '', [('testcase.recipe', basicRecipe)]) self.openRmakeRepository() uri = 'unix://%s/socket' % self.rootDir srv = server.rMakeServer(#None, uri, self.rmakeCfg, None, quiet=True) self.buildCfg.uuid = self.genUUID('foo') self.buildCfg.strictMode = True #client = server.rMakeClient(srv) #client = server.rMakeClient('http://*****:*****@local:linux', None), self.buildCfg.flavor) finally: os.kill(pid, signal.SIGTERM) self.waitThenKill(pid) else: try: sys.stdin = open('/dev/null') lf = logfile.LogFile(self.rootDir + '/srv.log') lf.redirectOutput() srv.serve_forever() finally: os._exit(1)
def validate(self): # wait to check build requires until the object is instantiated # so that we can include all of the parent classes' buildreqs # in the check for buildRequires in self.buildRequires: (n, vS, f) = cmdline.parseTroveSpec(buildRequires) if n.count(':') > 1: raise RecipeFileError( "Build requirement '%s' cannot have two colons in its name" % (buildRequires)) # we don't allow full version strings or just releases if vS and vS[0] not in ':@': raise RecipeFileError("Unsupported buildReq format %s" % buildRequires)
def _getUpdateJob(self, cclient, troveName): self.callback.setChangeSet(troveName) trvSpec = self.getBuildData(troveName) if trvSpec and trvSpec.upper() != 'NONE': n, v, f = parseTroveSpec(trvSpec.encode('utf8')) try: v = versions.ThawVersion(v) except: try: v = versions.VersionFromString(v) except: log.error("Bad version string %r in custom trove field %r" " -- using it anyway.", v, troveName) itemList = [(n, (None, None), (v, f), True)] uJob, suggMap = cclient.updateChangeSet(itemList, resolveDeps = False) return uJob
def substResolveTroves(resolveTroves, macros): ''' Substitute C{macros} into the config item C{resolveTroves}. @type resolveTroves: C{[[(name, version, flavor)]]} @type macros: dict or Macros ''' ret = [] for bucket in resolveTroves: newBucket = [] for spec in bucket: spec %= macros newBucket.append(parseTroveSpec(spec)) ret.append(newBucket) return ret
def findAction(self, actionList, data): troveSpecsByInSet = {} for action in actionList: l = troveSpecsByInSet.setdefault(action.primaryTroveSet, []) from conary.conaryclient.cmdline import parseTroveSpec for troveSpec in action.troveSpecs: # handle str's that need parsing as well as tuples which # have already been parsed if isinstance(troveSpec, str): l.append((action.outSet, parseTroveSpec(troveSpec))) else: l.append((action.outSet, troveSpec)) notFound = set() for inSet, searchList in troveSpecsByInSet.iteritems(): cacheable = set() cached = set() for i, (outSet, troveSpec) in enumerate(searchList): if troveSpec.version and '/' in troveSpec.version: match = data.troveCache.getFindResult(troveSpec) if match is None: cacheable.add(i) else: cached.add(i) outSet._setInstall(match) d = inSet._findTroves([ x[1] for i, x in enumerate(searchList) if i not in cached ]) for i, (outSet, troveSpec) in enumerate(searchList): if i in cached: continue if troveSpec in d: outSet._setInstall(d[troveSpec]) if i in cacheable: data.troveCache.addFindResult(troveSpec, d[troveSpec]) else: notFound.add(troveSpec) if notFound: raise TroveSpecsNotFound(sorted(notFound)) return True
def findAction(self, actionList, data): troveSpecsByInSet = {} for action in actionList: l = troveSpecsByInSet.setdefault(action.primaryTroveSet, []) from conary.conaryclient.cmdline import parseTroveSpec for troveSpec in action.troveSpecs: # handle str's that need parsing as well as tuples which # have already been parsed if isinstance(troveSpec, str): l.append((action.outSet, parseTroveSpec(troveSpec))) else: l.append((action.outSet, troveSpec)) notFound = set() for inSet, searchList in troveSpecsByInSet.iteritems(): cacheable = set() cached = set() for i, (outSet, troveSpec) in enumerate(searchList): if troveSpec.version and '/' in troveSpec.version: match = data.troveCache.getFindResult(troveSpec) if match is None: cacheable.add(i) else: cached.add(i) outSet._setInstall(match) d = inSet._findTroves( [x[1] for i, x in enumerate(searchList) if i not in cached]) for i, (outSet, troveSpec) in enumerate(searchList): if i in cached: continue if troveSpec in d: outSet._setInstall(d[troveSpec]) if i in cacheable: data.troveCache.addFindResult(troveSpec, d[troveSpec]) else: notFound.add(troveSpec) if notFound: raise TroveSpecsNotFound(sorted(notFound)) return True
def _matchReqs(reqList, db): reqMap = {} missingReqs = [] for buildReq in reqList: (name, versionStr, flavor) = cmdline.parseTroveSpec(buildReq) # XXX move this to use more of db.findTrove's features, instead # of hand parsing troves = db.trovesByName(name) troves = db.getTroves(troves) versionMatches = _filterBuildReqsByVersionStr(versionStr, troves) if not versionMatches: missingReqs.append(buildReq) continue match = _filterBuildReqsByFlavor(flavor, versionMatches) if match: reqMap[buildReq] = match else: missingReqs.append(buildReq) return reqMap, missingReqs
def _matchReqs(reqList, db): reqMap = {} missingReqs = [] for buildReq in reqList: (name, versionStr, flavor) = cmdline.parseTroveSpec(buildReq) # XXX move this to use more of db.findTrove's features, instead # of hand parsing troves = db.trovesByName(name) troves = db.getTroves(troves) versionMatches = _filterBuildReqsByVersionStr( versionStr, troves) if not versionMatches: missingReqs.append(buildReq) continue match = _filterBuildReqsByFlavor(flavor, versionMatches) if match: reqMap[buildReq] = match else: missingReqs.append(buildReq) return reqMap, missingReqs
def _findTroves(self, specList, labelPath=None, defaultFlavor=None, allowMissing=False): newSpecList = [] specMap = {} for spec in specList: if not isinstance(spec, tuple): newSpec = cmdline.parseTroveSpec(spec) else: newSpec = spec newSpecList.append(newSpec) specMap[newSpec] = spec repos = self._getRepositoryClient() if isinstance(labelPath, (tuple, list)): labelPath = [ self._getLabel(x) for x in labelPath ] elif labelPath: labelPath = self._getLabel(labelPath) defaultFlavor = self._getFlavor(defaultFlavor, keepNone=True) results = repos.findTroves(labelPath, newSpecList, defaultFlavor = defaultFlavor, allowMissing=allowMissing) return dict((specMap[x[0]], x[1]) for x in results.items())
def CloneTrove(cfg, targetBranch, troveSpecList, updateBuildInfo = True, info = False, cloneSources = False, message = None, test = False, fullRecurse = False, ignoreConflicts = False, exactFlavors = False): client = ConaryClient(cfg) repos = client.getRepos() targetBranch = versions.VersionFromString(targetBranch) if not isinstance(targetBranch, versions.Branch): raise errors.ParseError('Cannot specify full version "%s" to clone to - must specify target branch' % targetBranch) troveSpecs = [ cmdline.parseTroveSpec(x) for x in troveSpecList] componentSpecs = [ x[0] for x in troveSpecs if ':' in x[0] and x[0].split(':')[1] != 'source'] if componentSpecs: raise errors.ParseError('Cannot clone components: %s' % ', '.join(componentSpecs)) trovesToClone = repos.findTroves(cfg.installLabelPath, troveSpecs, cfg.flavor, exactFlavors = exactFlavors) trovesToClone = list(set(itertools.chain(*trovesToClone.itervalues()))) if not client.cfg.quiet: callback = client_callbacks.CloneCallback(client.cfg, message) else: callback = callbacks.CloneCallback() okay, cs = client.createCloneChangeSet(targetBranch, trovesToClone, updateBuildInfo=updateBuildInfo, infoOnly=info, callback=callback, fullRecurse=fullRecurse, cloneSources=cloneSources) if not okay: return return _finishClone(client, cfg, cs, callback, info=info, test=test, ignoreConflicts=ignoreConflicts)
def testTroveFilterBasics(self): recipe = self.getRecipe() filt = trovefilter.TroveFilter(recipe, 'foo', version = 'test.rpath.local@rpl:devel') nvf = cmdline.parseTroveSpec('foo=test.rpath.local@rpl:devel') self.assertEquals(filt.match((nvf,)), True) nvf = cmdline.parseTroveSpec('foo=foo.rpath.local@rpl:devel') self.assertEquals(filt.match((nvf,)), False) filt = trovefilter.TroveFilter(recipe, 'foo', version = '/test.rpath.local@rpl:devel') nvf = cmdline.parseTroveSpec('foo=/test.rpath.local@rpl:devel') self.assertEquals(filt.match((nvf,)), True) nvf = cmdline.parseTroveSpec('foo=/foo.rpath.local@rpl:devel') self.assertEquals(filt.match((nvf,)), False) filt = trovefilter.TroveFilter(recipe, 'foo', version = '/test.rpath.local@rpl:devel/1-1-1') nvf = cmdline.parseTroveSpec('foo=/test.rpath.local@rpl:devel/1-1-1') self.assertEquals(filt.match((nvf,)), True) nvf = cmdline.parseTroveSpec('foo=/foo.rpath.local@rpl:devel/1-1-1') self.assertEquals(filt.match((nvf,)), False)
def testTroveFilterBasics(self): recipe = self.getRecipe() filt = trovefilter.TroveFilter(recipe, 'foo', version='test.rpath.local@rpl:devel') nvf = cmdline.parseTroveSpec('foo=test.rpath.local@rpl:devel') self.assertEquals(filt.match((nvf, )), True) nvf = cmdline.parseTroveSpec('foo=foo.rpath.local@rpl:devel') self.assertEquals(filt.match((nvf, )), False) filt = trovefilter.TroveFilter(recipe, 'foo', version='/test.rpath.local@rpl:devel') nvf = cmdline.parseTroveSpec('foo=/test.rpath.local@rpl:devel') self.assertEquals(filt.match((nvf, )), True) nvf = cmdline.parseTroveSpec('foo=/foo.rpath.local@rpl:devel') self.assertEquals(filt.match((nvf, )), False) filt = trovefilter.TroveFilter( recipe, 'foo', version='/test.rpath.local@rpl:devel/1-1-1') nvf = cmdline.parseTroveSpec('foo=/test.rpath.local@rpl:devel/1-1-1') self.assertEquals(filt.match((nvf, )), True) nvf = cmdline.parseTroveSpec('foo=/foo.rpath.local@rpl:devel/1-1-1') self.assertEquals(filt.match((nvf, )), False)
def getCrossRequirementSpecs(self): return [cmdline.parseTroveSpec(x) for x in self.getCrossRequirements()]
def getBuildRequirementSpecs(self): return [ cmdline.parseTroveSpec(x) for x in self.iterBuildRequirements() ]
def _updateTroves(cfg, applyList, **kwargs): # Take out the apply-related keyword arguments applyDefaults = dict( replaceFiles = False, replaceManagedFiles = False, replaceUnmanagedFiles = False, replaceModifiedFiles = False, replaceModifiedConfigFiles = False, tagScript = None, justDatabase = False, skipCapsuleOps = False, info = False, keepJournal = False, noRestart = False, noScripts = False, ) applyKwargs = {} for k in applyDefaults: if k in kwargs: applyKwargs[k] = kwargs.pop(k) callback = kwargs.pop('callback') loadTroveCache = kwargs.pop('loadTroveCache', False) applyKwargs['test'] = kwargs.get('test', False) applyKwargs['localRollbacks'] = cfg.localRollbacks applyKwargs['autoPinList'] = cfg.pinTroves model = kwargs.pop('systemModel', None) modelFile = kwargs.pop('systemModelFile', None) modelGraph = kwargs.pop('modelGraph', None) modelTrace = kwargs.pop('modelTrace', None) noRestart = applyKwargs.get('noRestart', False) client = conaryclient.ConaryClient(cfg, modelFile=modelFile) client.setUpdateCallback(callback) if kwargs.pop('disconnected', False): client.disconnectRepos() migrate = kwargs.get('migrate', False) # even though we no longer differentiate forceMigrate, we still # remove it from kwargs to avoid confusing prepareUpdateJob kwargs.pop('forceMigrate', False) restartInfo = kwargs.get('restartInfo', None) # Initialize the critical update set applyCriticalOnly = kwargs.get('applyCriticalOnly', False) if kwargs.get('criticalUpdateInfo') is not None: kwargs['criticalUpdateInfo'].criticalOnly = applyCriticalOnly else: kwargs['criticalUpdateInfo'] = CriticalUpdateInfo(applyCriticalOnly) info = applyKwargs.pop('info', False) # Rename depCheck to resolveDeps depCheck = kwargs.pop('depCheck', True) kwargs['resolveDeps'] = depCheck if not info: client.checkWriteableRoot() # Unfortunately there's no easy way to make 'test' or 'info' mode work # with capsule sync, doubly so because it influences the decisions made # later on about what troves to update. So this will always really # apply, but the good news is that it never modifies the system outside # of the Conary DB. client.syncCapsuleDatabase(callback, makePins=True) updJob = client.newUpdateJob() try: if model: changeSetList = kwargs.get('fromChangesets', []) criticalUpdates = kwargs.get('criticalUpdateInfo', None) tc = modelupdate.CMLTroveCache(client.getDatabase(), client.getRepos(), callback = callback, changeSetList = changeSetList) tcPath = cfg.root + cfg.dbPath + '/modelcache' if loadTroveCache: if os.path.exists(tcPath): log.info("loading %s", tcPath) callback.loadingModelCache() tc.load(tcPath) ts = client.cmlGraph(model, changeSetList = changeSetList) if modelGraph is not None: ts.g.generateDotFile(modelGraph) suggMap = client._updateFromTroveSetGraph(updJob, ts, tc, fromChangesets = changeSetList, criticalUpdateInfo = criticalUpdates, callback = callback) if modelTrace is not None: ts.g.trace([ parseTroveSpec(x) for x in modelTrace ] ) finalModel = copy.deepcopy(model) if model.suggestSimplifications(tc, ts.g): log.info("possible system model simplifications found") ts2 = client.cmlGraph(model, changeSetList = changeSetList) updJob2 = client.newUpdateJob() try: suggMap2 = client._updateFromTroveSetGraph(updJob2, ts2, tc, fromChangesets = changeSetList, criticalUpdateInfo = criticalUpdates) except errors.TroveNotFound: log.info("bad model generated; bailing") else: if (suggMap == suggMap2 and updJob.getJobs() == updJob2.getJobs()): log.info("simplified model verfied; using it instead") ts = ts2 finalModel = model updJob = updJob2 suggMap = suggMap2 else: log.info("simplified model changed result; ignoring") model = finalModel modelFile.model = finalModel if tc.cacheModified(): log.info("saving %s", tcPath) callback.savingModelCache() tc.save(tcPath) callback.done() else: suggMap = client.prepareUpdateJob(updJob, applyList, **kwargs) except: callback.done() client.close() raise if info: callback.done() displayUpdateInfo(updJob, cfg, noRestart=noRestart) if restartInfo and not model: callback.done() newJobs = set(itertools.chain(*updJob.getJobs())) oldJobs = set(updJob.getItemList()) addedJobs = newJobs - oldJobs removedJobs = oldJobs - newJobs if addedJobs or removedJobs: print print 'NOTE: after critical updates were applied, the contents of the update were recalculated:' print displayChangedJobs(addedJobs, removedJobs, cfg) updJob.close() client.close() return if model: missingLocalTroves = model.getMissingLocalTroves(tc, ts) if missingLocalTroves: print 'Update would leave references to missing local troves:' for troveTup in missingLocalTroves: if not isinstance(troveTup, trovetup.TroveTuple): troveTup = trovetup.TroveTuple(troveTup) print "\t" + str(troveTup) client.close() return if suggMap: callback.done() dcfg = display.DisplayConfig() dcfg.setTroveDisplay(fullFlavors = cfg.fullFlavors, fullVersions = cfg.fullVersions, showLabels = cfg.showLabels) formatter = display.TroveTupFormatter(dcfg) print "Including extra troves to resolve dependencies:" print " ", items = sorted(set(formatter.formatNVF(*x) for x in itertools.chain(*suggMap.itervalues()))) print " ".join(items) askInteractive = cfg.interactive if restartInfo: callback.done() newJobs = set(itertools.chain(*updJob.getJobs())) oldJobs = set(updJob.getItemList()) addedJobs = newJobs - oldJobs removedJobs = oldJobs - newJobs if not model and addedJobs or removedJobs: print 'NOTE: after critical updates were applied, the contents of the update were recalculated:' displayChangedJobs(addedJobs, removedJobs, cfg) else: askInteractive = False if not updJob.jobs: # Nothing to do print 'Update would not modify system' if model and not kwargs.get('test'): # Make sure 'conary sync' clears model.next even if nothing needs # to be done. modelFile.closeSnapshot() updJob.close() client.close() return elif askInteractive: print 'The following updates will be performed:' displayUpdateInfo(updJob, cfg, noRestart=noRestart) if migrate and cfg.interactive: print ('Migrate erases all troves not referenced in the groups' ' specified.') if askInteractive: if migrate: style = 'migrate' else: style = 'update' okay = cmdline.askYn('continue with %s? [Y/n]' % style, default=True) if not okay: updJob.close() client.close() return if not noRestart and updJob.getCriticalJobs(): print "Performing critical system updates, will then restart update." try: restartDir = client.applyUpdateJob(updJob, **applyKwargs) finally: updJob.close() client.close() if restartDir: params = sys.argv # Write command line to disk import xmlrpclib cmdlinefile = open(os.path.join(restartDir, 'cmdline'), "w") cmdlinefile.write(xmlrpclib.dumps((params, ), methodresponse = True)) cmdlinefile.close() # CNY-980: we should have the whole script of changes to perform in # the restart directory (in the job list); if in migrate mode, re-exec # as regular update if migrate and 'migrate' in params: params[params.index('migrate')] = 'update' params.extend(['--restart-info=%s' % restartDir]) client.close() raise errors.ReexecRequired( 'Critical update completed, rerunning command...', params, restartDir) else: if (not kwargs.get('test', False)) and model: modelFile.closeSnapshot()
def promoteTroves(cfg, troveSpecs, targetList, skipBuildInfo=False, info=False, message=None, test=False, ignoreConflicts=False, cloneOnlyByDefaultTroves=False, cloneSources=False, allFlavors=False, client=None, targetFile=None, exactFlavors=None, excludeGroups=False): targetMap = {} searchPath = [] for fromLoc, toLoc in targetList: context = cfg.buildLabel fromLoc = _convertLabelOrBranch(fromLoc, context) if fromLoc is not None: if isinstance(fromLoc, versions.Branch): context = fromLoc.label() else: context = fromLoc searchPath.append(context) toLoc = _convertLabelOrBranch(toLoc, context) targetMap[fromLoc] = toLoc troveSpecs = [cmdline.parseTroveSpec(x, False) for x in troveSpecs] if exactFlavors: allFlavors = False elif allFlavors: cfg.flavor = [] troveSpecFlavors = {} for troveSpec in troveSpecs: troveSpecFlavors.setdefault((troveSpec[0], troveSpec[1], None), []).append(troveSpec[2]) troveSpecs = list(troveSpecFlavors) client = ConaryClient(cfg) if not searchPath: searchPath = cfg.buildLabel searchSource = client.getSearchSource(installLabelPath=searchPath) results = searchSource.findTroves(troveSpecs, bestFlavor=not allFlavors, exactFlavors=exactFlavors) if allFlavors: trovesToClone = [] for troveSpec, troveTups in results.items(): specFlavors = troveSpecFlavors[troveSpec] for specFlavor in specFlavors: if specFlavor is None: matchingTups = troveTups else: matchingTups = [ x for x in troveTups if x[2].stronglySatisfies(specFlavor) ] # we only clone the latest version for all troves. # bestFlavor=False resturns the leaves for all flavors, so # we may need to cut some out. latest = max([x[1] for x in matchingTups]) matchingTups = [x for x in matchingTups if x[1] == latest] trovesToClone.extend(matchingTups) else: trovesToClone = itertools.chain(*results.itervalues()) trovesToClone = list(set(trovesToClone)) if not client.cfg.quiet: callback = client_callbacks.CloneCallback(client.cfg, message) else: callback = callbacks.CloneCallback() okay, cs = client.createSiblingCloneChangeSet( targetMap, trovesToClone, updateBuildInfo=not skipBuildInfo, infoOnly=info, callback=callback, cloneOnlyByDefaultTroves=cloneOnlyByDefaultTroves, cloneSources=cloneSources, excludeGroups=excludeGroups) if not okay: return False return _finishClone(client, cfg, cs, callback, info=info, test=test, ignoreConflicts=ignoreConflicts, targetFile=targetFile)
self.assertEquals(filt.match((nvf, )), False) @testhelp.context('trove-filter') def testBadTroveFilters(self): recipe = self.getRecipe() filt = trovefilter.AbstractFilter() self.assertRaises(NotImplementedError, filt.match) try: filt = trovefilter.TroveFilter(recipe, 'foo(') except RuntimeError, e: self.assertEquals(str(e), "Bad Regexp: 'foo(' for name") else: self.fail("Expected RuntimeError") nvf = cmdline.parseTroveSpec('foo=/test.rpath.local@rpl:devel') filt = trovefilter.TroveFilter(recipe, 'foo') self.assertEquals(filt.match((nvf, )), True) filt.compile() filt.versionType = True filt.version = 'foo' self.assertEquals(filt.match((nvf, )), False) @testhelp.context('trove-filter') def testTroveFilterVersion(self): recipe = self.getRecipe() filt = trovefilter.TroveFilter(recipe, 'foo', version='test.rpath.local@rpl:linux') filt2 = trovefilter.TroveFilter(recipe, 'bar',
def parseString(self, val): (name, version, flavor) = cmdline.parseTroveSpec(val) return (name, versions.VersionFromString(version), flavor)
def parseTroveSpec(troveSpec): return cmdline.parseTroveSpec(troveSpec)
def getCrossRequirementSpecs(self): return [ cmdline.parseTroveSpec(x) for x in self.getCrossRequirements() ]
def parseString(self, val): return cmdline.parseTroveSpec(val)
def generateChangeSet(self, troveNameList, all=False): if self.display != DISPLAY_NONE: # save memory by not keeping the changeset around; this is # particularly useful when all=True self.finalCs = None else: self.finalCs = changeset.ReadOnlyChangeSet() troveNames = [ cmdline.parseTroveSpec(x) for x in troveNameList ] if all: assert(not troveNameList) client = conaryclient.ConaryClient(self.cfg) troveInfo = client.getUpdateItemList() troveInfo.sort() else: troveInfo = [] for (troveName, versionStr, flavor) in troveNames: try: troveInfo += self.db.findTrove(None, (troveName, versionStr, flavor)) except errors.TroveNotFound: if versionStr: if flavor is not None and not flavor.isEmpty(): flavorStr = deps.formatFlavor(flavor) log.error("version %s with flavor '%s' of " "trove %s is not installed", versionStr, flavorStr, troveName) else: log.error("version %s of trove %s is not installed", versionStr, troveName) elif flavor is not None and not flavor.isEmpty(): flavorStr = deps.formatFlavor(flavor) log.error("flavor '%s' of trove %s is not installed", flavorStr, troveName) else: log.error("trove %s is not installed", troveName) # we need the recursive closure of the set; self.db.walkTroveSet(trv) # is surely not the most efficient thing to do, but it's easy. remember # it's depth first; keeping the order depth first helps keep the # output sane troves = self.db.getTroves(troveInfo, withDeps = False, withFileObjects = True, pristine = False) seen = set() fullTroveList = [] for topTrv in troves: for nvf in self.db.walkTroveSet(topTrv, withFiles = False, asTuple = True): seen.add(nvf) fullTroveList.append(nvf) if self.newFiles: newFilesByTrove = self._scanFilesystem(fullTroveList, dirType = self.newFiles) else: newFilesByTrove = {} self._verifyTroves(fullTroveList, newFilesByTrove) if None in newFilesByTrove: self._addUnownedNewFiles(newFilesByTrove[None]) if self.finalCs: for trv in troves: self.finalCs.addPrimaryTrove( trv.getName(), trv.getVersion().createShadow(versions.LocalLabel()), trv.getFlavor()) return self.finalCs
def derive(repos, cfg, targetLabel, troveSpec, checkoutDir=None, extract=False, info=False, callback=None): """ Performs all the commands necessary to create a derived recipe. First it shadows the package, then it creates a checkout of the shadow and converts the checkout to a derived recipe package. Finally if extract = True, it installs an version of the binary package into a root. @param repos: trovesource to search for and derive packages from @param cfg: configuration to use when deriving the package @type cfg: ConaryConfiguration object @param targetLabel: label to derive from @type targetLabel: versions.Label @param checkoutDir: directory to create the checkout in. If None, defaults to currentDir + packageName. @param extract: If True, creates a subdirectory of the checkout named _ROOT_ with the contents of the binary of the derived package. @param info: If true, only display the information about the shadow that would be performed if the derive command were completed. @param callback: """ origDir = os.getcwd() try: if callback is None: callback = DeriveCallback() if isinstance(troveSpec, tuple): troveName, versionSpec, flavor = troveSpec versionSpec = str(versionSpec) troveSpec = cmdline.toTroveSpec(troveName, versionSpec, flavor) else: troveName, versionSpec, flavor = cmdline.parseTroveSpec(troveSpec) if isinstance(targetLabel, str): targetLabel = Label(targetLabel) troveName, versionSpec, flavor = cmdline.parseTroveSpec(troveSpec) result = repos.findTrove(cfg.buildLabel, (troveName, versionSpec, flavor), cfg.flavor) # findTrove shouldn't return multiple items for one package anymore # when a flavor is specified. troveToDerive, = result # displaying output along the screen allows there to be a record # of what operations were performed. Since this command is # an aggregate of several commands I think that is appropriate, # rather than simply using a progress callback. log.info('Shadowing %s=%s[%s] onto %s' % (troveToDerive[0], troveToDerive[1], troveToDerive[2], targetLabel)) if info: cfg.interactive = False error = branch.branch(repos, cfg, str(targetLabel), ['%s=%s[%s]' % troveToDerive], makeShadow=True, sourceOnly=True, binaryOnly=False, allowEmptyShadow=True, info=info) if info or error: return shadowedVersion = troveToDerive[1].createShadow(targetLabel) shadowedVersion = shadowedVersion.getSourceVersion(False) troveName = troveName.split(':')[0] checkoutDir = checkoutDir or troveName checkin.checkout(repos, cfg, checkoutDir, ["%s=%s" % (troveName, shadowedVersion)], callback=callback) os.chdir(checkoutDir) nvfs = repos.getTrovesBySource(troveToDerive[0] + ':source', troveToDerive[1].getSourceVersion()) trvs = repos.getTroves(nvfs) hasCapsule = [x for x in trvs if x.troveInfo.capsule.type()] if hasCapsule: derivedRecipeType = 'DerivedCapsuleRecipe' removeText = '' else: derivedRecipeType = 'DerivedPackageRecipe' removeText = \ """ # This appliance uses PHP as a command interpreter but does # not include a web server, so remove the file that creates # a dependency on the web server r.Remove('/etc/httpd/conf.d/php.conf') """ log.info('Rewriting recipe file') recipeName = troveName + '.recipe' className = util.convertPackageNameToClassName(troveName) derivedRecipe = """ class %(className)sRecipe(%(recipeBaseClass)s): name = '%(name)s' version = '%(version)s' def setup(r): ''' In this recipe, you can make modifications to the package. Examples: # This appliance has high-memory-use PHP scripts r.Replace('memory_limit = 8M', 'memory_limit = 32M', '/etc/php.ini') %(removeText)s # This appliance requires that a few binaries be replaced # with binaries built from a custom archive that includes # a Makefile that honors the DESTDIR variable for its # install target. r.addArchive('foo.tar.gz') r.Make() r.MakeInstall() # This appliance requires an extra configuration file r.Create('/etc/myconfigfile', contents='some data') ''' """ % dict(className=className, name=troveName, version=shadowedVersion.trailingRevision().getVersion(), recipeBaseClass=derivedRecipeType, removeText=removeText) open(recipeName, 'w').write(derivedRecipe) log.info('Removing extra files from checkout') conaryState = state.ConaryStateFromFile('CONARY', repos) sourceState = conaryState.getSourceState() # clear the factory since we don't care about how the parent trove was # created sourceState.setFactory('') addRecipe = True for (pathId, path, fileId, version) in list(sourceState.iterFileList()): if path == recipeName: addRecipe = False continue sourceState.removeFile(pathId) if util.exists(path): statInfo = os.lstat(path) try: if statInfo.st_mode & stat.S_IFDIR: os.rmdir(path) else: os.unlink(path) except OSError, e: log.warning("cannot remove %s: %s" % (path, e.strerror)) conaryState.write('CONARY') if addRecipe: checkin.addFiles([recipeName]) if extract: log.info('extracting files from %s=%s[%s]' % (troveToDerive)) # extract to _ROOT_ extractDir = os.path.join(os.getcwd(), '_ROOT_') ts = [(troveToDerive[0], (None, None), (troveToDerive[1], troveToDerive[2]), True)] cs = repos.createChangeSet(ts, recurse=True) ChangesetExploder(cs, extractDir) # extract to _OLD_ROOT_ secondDir = os.path.join(os.getcwd(), '_OLD_ROOT_') cs = repos.createChangeSet(ts, recurse=True) ChangesetExploder(cs, secondDir)
def _getTuple(self, troveString): try: name, version, flavor = cmdline.parseTroveSpec(troveString) except cmdline.TroveSpecError, e: raise errors.InvalidTroveSpec("Error parsing trove %s: %s" % (troveString, str(e)))
def parseTroveSpec(troveSpec, allowEmptyName=False): troveSpec, context = re.match('^(.*?)(?:{(.*)})?$', troveSpec).groups() troveSpec = cmdline.parseTroveSpec(troveSpec, allowEmptyName=allowEmptyName) return troveSpec + (context, )
def getTrovesToDisplay(db, troveSpecs, pathList=[], whatProvidesList=[], exactFlavors=False): """ Finds the given trove and path specifiers, and returns matching (n,v,f) tuples. @param db: database to search @type db: local.database.Database @param troveSpecs: troves to search for @type troveSpecs: list of troveSpecs (n[=v][[f]]) @param pathList: paths which should be linked to some trove in this database. @type pathList: list of strings @param whatProvidesList: deps to search for providers of @type whatProvidesList: list of strings @raises TroveSpecError: Raised if one of the troveSpecs is of an invalid format @note: This function calls database routines which could raise any errors defined in L{dbstore.sqlerrors} @rtype: troveTupleList (list of (name, version, flavor) tuples), and a boolean that stats whether the troves returned should be considered primary (and therefore not compressed ever). """ primary = True if troveSpecs: troveSpecs = [ cmdline.parseTroveSpec(x, allowEmptyName=False) \ for x in troveSpecs ] else: troveSpecs = [] normPathList = [ util.realpath(os.path.abspath(util.normpath(x))) for x in pathList ] troveTups = [] for path, origPath in itertools.izip(normPathList, pathList): if origPath.endswith('/'): allPaths = [path + '/' + x for x in os.listdir(db.root + path)] else: allPaths = [path] for thisPath in allPaths: for trove in db.iterTrovesByPath(thisPath): troveTups.append( (trove.getName(), trove.getVersion(), trove.getFlavor())) if whatProvidesList: results = db.getTrovesWithProvides(whatProvidesList) troveTups.extend(itertools.chain(*results.itervalues())) if not (troveSpecs or pathList or whatProvidesList): troveTups = sorted(db.iterAllTroves()) primary = False else: results = db.findTroves(None, troveSpecs, exactFlavors=exactFlavors) for troveSpec in troveSpecs: troveTups.extend(results.get(troveSpec, [])) return troveTups, primary
def generateChangeSet(self, troveNameList, all=False): if self.display != DISPLAY_NONE: # save memory by not keeping the changeset around; this is # particularly useful when all=True self.finalCs = None else: self.finalCs = changeset.ReadOnlyChangeSet() troveNames = [cmdline.parseTroveSpec(x) for x in troveNameList] if all: assert (not troveNameList) client = conaryclient.ConaryClient(self.cfg) troveInfo = client.getUpdateItemList() troveInfo.sort() else: troveInfo = [] for (troveName, versionStr, flavor) in troveNames: try: troveInfo += self.db.findTrove( None, (troveName, versionStr, flavor)) except errors.TroveNotFound: if versionStr: if flavor is not None and not flavor.isEmpty(): flavorStr = deps.formatFlavor(flavor) log.error( "version %s with flavor '%s' of " "trove %s is not installed", versionStr, flavorStr, troveName) else: log.error( "version %s of trove %s is not installed", versionStr, troveName) elif flavor is not None and not flavor.isEmpty(): flavorStr = deps.formatFlavor(flavor) log.error("flavor '%s' of trove %s is not installed", flavorStr, troveName) else: log.error("trove %s is not installed", troveName) # we need the recursive closure of the set; self.db.walkTroveSet(trv) # is surely not the most efficient thing to do, but it's easy. remember # it's depth first; keeping the order depth first helps keep the # output sane troves = self.db.getTroves(troveInfo, withDeps=False, withFileObjects=True, pristine=False) seen = set() fullTroveList = [] for topTrv in troves: for nvf in self.db.walkTroveSet(topTrv, withFiles=False, asTuple=True): seen.add(nvf) fullTroveList.append(nvf) if self.newFiles: newFilesByTrove = self._scanFilesystem(fullTroveList, dirType=self.newFiles) else: newFilesByTrove = {} self._verifyTroves(fullTroveList, newFilesByTrove) if None in newFilesByTrove: self._addUnownedNewFiles(newFilesByTrove[None]) if self.finalCs: for trv in troves: self.finalCs.addPrimaryTrove( trv.getName(), trv.getVersion().createShadow(versions.LocalLabel()), trv.getFlavor()) return self.finalCs