Пример #1
0
    def log(self,
            remoteIp,
            authToken,
            methodName,
            args,
            kwArgs={},
            exception=None,
            latency=None,
            systemId=None):
        # lazy re-open the log file in case it was rotated from underneath us
        self.reopen()
        if exception:
            exception = str(exception)

        (user, entitlements) = authToken[0], authToken[2]
        logStr = cPickle.dumps(
            (self.logFormatRevision, self.serverNameList, time.time(),
             remoteIp, (user, entitlements), methodName, args, kwArgs,
             exception, latency, systemId))
        try:
            self.fobj.write(struct.pack("!I", len(logStr)) + logStr)
            self.fobj.flush()
        except IOError, e:
            log.warning("'%s' while logging call from (%s,%s) to %s\n", str(e),
                        remoteIp, user, methodName)
Пример #2
0
    def searchNetworkSources(self, url, headers, single):
        if url.scheme not in NETWORK_SCHEMES:
            return

        # check for negative cache entries to avoid spamming servers
        if not single:
            negativePath = self.repCache.checkNegativeCache(
                self.recipeName, url)
            if negativePath:
                log.warning('not fetching %s (negative cache entry %s exists)',
                            url, negativePath)
                return

        log.info('Trying %s...', str(url))
        if headers is None:
            headers = {}

        inFile = self._fetchUrl(url, headers)
        if inFile is None:
            self.repCache.createNegativeCacheEntry(self.recipeName, url)
        else:
            contentLength = int(inFile.headers.get('Content-Length', 0))
            path = self.repCache.addFileToCache(self.recipeName, url, inFile,
                                                contentLength)
            if path:
                raise PathFound(path, False)
        return
Пример #3
0
    def excepthook(type, exc_msg, tb):
        cfg = self.recipe.cfg
        sys.excepthook = sys.__excepthook__
        if cfg.debugRecipeExceptions:
            lines = traceback.format_exception(type, exc_msg, tb)
            print string.joinfields(lines, "")
        if self.linenum is not None:
            prefix = "%s:%s:" % (self.file, self.linenum)
            prefix_len = len(prefix)
            if str(exc_msg)[:prefix_len] != prefix:
                exc_message = "%s:%s: %s: %s" % (self.file, self.linenum,
                                              type.__name__, exc_msg)
            print exc_message

        if self.recipe.buildinfo:
            try:
                buildinfo = self.recipe.buildinfo
                buildinfo.error = exc_message
                buildinfo.file = self.file
                buildinfo.lastline = self.linenum
                buildinfo.stop()
            except:
                log.warning("could not write out to buildinfo")

        if cfg.debugRecipeExceptions and self.recipe.isatty():
            debugger.post_mortem(tb, type, exc_msg)
        else:
            sys.exit(1)
Пример #4
0
    def searchNetworkSources(self, url, headers):
        if url.scheme not in NETWORK_SCHEMES:
            return

        # check for negative cache entries to avoid spamming servers
        negativePath = self.repCache.checkNegativeCache(self.recipeName, url)
        if negativePath:
            log.warning('not fetching %s (negative cache entry %s exists)',
                        url, negativePath)
            return

        log.info('Trying %s...', str(url))
        if headers is None:
            headers = {}

        inFile = self._fetchUrl(url, headers)
        if inFile is None:
            self.repCache.createNegativeCacheEntry(self.recipeName, url)
        else:
            contentLength = int(inFile.headers.get('Content-Length', 0))
            path = self.repCache.addFileToCache(self.recipeName, url,
                                                inFile, contentLength)
            if path:
                raise PathFound(path, False)
        return
Пример #5
0
    def _selectMatchingResolutionTrove(self, requiredBy, dep, depClass,
                                       flavoredList):
        # finally, filter by latest then score.
        trovesByNL = {}
        for installFlavor, (n,v,f) in flavoredList:
            l = v.trailingLabel()
            myTimeStamp = v.timeStamps()[-1]
            if installFlavor is None:
                myScore = 0
            else:
                # FIXME: we should cache this scoring from before.
                myScore = installFlavor.score(f)

            if (n,l) in trovesByNL:
                curScore, curTimeStamp, curTup = trovesByNL[n,l]
                if curTimeStamp > myTimeStamp:
                    continue
                if curTimeStamp == myTimeStamp:
                    if myScore < curScore:
                        continue

            trovesByNL[n,l] = (myScore, myTimeStamp, (n,v,f))

        scoredList = sorted(trovesByNL.itervalues())
        if not scoredList:
            return None
        if len(scoredList) > 1 and [x for x in scoredList if x[1] == 0]:
            log.warning("Dependency tie-breaking may not be deterministic "
                    "because some versions are missing timestamps")
        # highest score, then latest timestamp, then name.
        return scoredList[-1][-1]
Пример #6
0
        def _filterBuildReqsByVersionStr(versionStr, troves):
            if not versionStr:
                return troves

            versionMatches = []
            if versionStr.find('@') == -1:
                if versionStr.find(':') == -1:
                    log.warning('Deprecated buildreq format.  Use '
                                ' foo=:tag, not foo=tag')
                    versionStr = ':' + versionStr

            for trove in troves:
                labels = trove.getVersion().iterLabels()
                if versionStr[0] == ':':
                    branchTag = versionStr[1:]
                    branchTags = [x.getLabel() for x in labels]
                    if branchTag in branchTags:
                        versionMatches.append(trove)
                else:
                    # versionStr must begin with an @
                    branchNames = []
                    for label in labels:
                        branchNames.append(
                            '@%s:%s' %
                            (label.getNamespace(), label.getLabel()))
                    if versionStr in branchNames:
                        versionMatches.append(trove)
            return versionMatches
Пример #7
0
    def excepthook(type, exc_msg, tb):
        cfg = self.recipe.cfg
        sys.excepthook = sys.__excepthook__
        if cfg.debugRecipeExceptions:
            lines = traceback.format_exception(type, exc_msg, tb)
            print string.joinfields(lines, "")
        if self.linenum is not None:
            prefix = "%s:%s:" % (self.file, self.linenum)
            prefix_len = len(prefix)
            if str(exc_msg)[:prefix_len] != prefix:
                exc_message = "%s:%s: %s: %s" % (self.file, self.linenum,
                                                 type.__name__, exc_msg)
            print exc_message

        if self.recipe.buildinfo:
            try:
                buildinfo = self.recipe.buildinfo
                buildinfo.error = exc_message
                buildinfo.file = self.file
                buildinfo.lastline = self.linenum
                buildinfo.stop()
            except:
                log.warning("could not write out to buildinfo")

        if cfg.debugRecipeExceptions and self.recipe.isatty():
            debugger.post_mortem(tb, type, exc_msg)
        else:
            sys.exit(1)
Пример #8
0
        def _filterBuildReqsByVersionStr(versionStr, troves):
            if not versionStr:
                return troves

            versionMatches = []
            if versionStr.find("@") == -1:
                if versionStr.find(":") == -1:
                    log.warning("Deprecated buildreq format.  Use " " foo=:tag, not foo=tag")
                    versionStr = ":" + versionStr

            for trove in troves:
                labels = trove.getVersion().iterLabels()
                if versionStr[0] == ":":
                    branchTag = versionStr[1:]
                    branchTags = [x.getLabel() for x in labels]
                    if branchTag in branchTags:
                        versionMatches.append(trove)
                else:
                    # versionStr must begin with an @
                    branchNames = []
                    for label in labels:
                        branchNames.append("@%s:%s" % (label.getNamespace(), label.getLabel()))
                    if versionStr in branchNames:
                        versionMatches.append(trove)
            return versionMatches
Пример #9
0
def _expandOnePath(path,
                   macros,
                   defaultDir=None,
                   braceGlob=False,
                   error=False):
    if braceGlob:
        return _expandPaths([path], macros, defaultDir, True, error)
    if defaultDir is None:
        defaultDir = macros.builddir

    path = path % macros
    if path and path[0] == '/':
        if path.startswith(macros.destdir):
            log.warning(
                "remove destdir from path name %s;"
                " absolute paths are automatically relative to destdir" % path)
        else:
            path = macros.destdir + path
    else:
        path = os.path.join(defaultDir, path)

    if error:
        if not os.path.exists(path):
            raise RuntimeError, "No such file '%s'" % path
    return path
Пример #10
0
def mirrorRemoved(sourceRepos, targetRepos, troveSet, test = False, callback = ChangesetCallback()):
    if not troveSet:
        return 0
    log.debug("checking on %d removed troves", len(troveSet))
    # these removed troves better exist on the target
    present = targetRepos.hasTroves(list(troveSet))
    missing = [ x for x in troveSet if not present[x] ]
    # we can not have any "missing" troves while we mirror removals
    for t in missing:
        log.warning("Mirroring removed trove: valid trove not found on target: %s", t)
        troveSet.remove(t)
    # for the remaining removed troves, are any of them already mirrored?
    jobList = [ (name, (None, None), (version, flavor), True) for
                (name, version, flavor) in troveSet ]
    cs = targetRepos.createChangeSet(jobList, recurse=False, withFiles=False,
                                     withFileContents=False, callback=callback)
    for trvCs in cs.iterNewTroveList():
        if trvCs.getType() == trove.TROVE_TYPE_REMOVED:
            troveSet.remove(trvCs.getNewNameVersionFlavor())
    log.debug("mirroring %d removed troves", len(troveSet))
    if not troveSet:
        return 0
    jobList = [ (name, (None, None), (version, flavor), True) for
                (name, version, flavor) in troveSet ]
    log.debug("mirroring removed troves %s" % (displayJobList(jobList),))
    # grab the removed troves changeset
    cs = sourceRepos.createChangeSet(jobList, recurse = False,
                                     withFiles = False, withFileContents = False,
                                     callback = callback)
    log.debug("committing")
    targetRepos.commitChangeSet(cs, mirror = True, callback = callback)
    callback.done()
    return len(jobList)
Пример #11
0
    def log(self, remoteIp, authToken, methodName, args, kwArgs={}, exception=None, latency=None, systemId=None):
        # lazy re-open the log file in case it was rotated from underneath us
        self.reopen()
        if exception:
            exception = str(exception)

        (user, entitlements) = authToken[0], authToken[2]
        logStr = cPickle.dumps(
            (
                self.logFormatRevision,
                self.serverNameList,
                time.time(),
                remoteIp,
                (user, entitlements),
                methodName,
                args,
                kwArgs,
                exception,
                latency,
                systemId,
            )
        )
        try:
            self.fobj.write(struct.pack("!I", len(logStr)) + logStr)
            self.fobj.flush()
        except IOError, e:
            log.warning("'%s' while logging call from (%s,%s) to %s\n", str(e), remoteIp, user, methodName)
Пример #12
0
def mirrorRemoved(sourceRepos, targetRepos, troveSet, test = False, callback = ChangesetCallback()):
    if not troveSet:
        return 0
    log.debug("checking on %d removed troves", len(troveSet))
    # these removed troves better exist on the target
    present = targetRepos.hasTroves(list(troveSet))
    missing = [ x for x in troveSet if not present[x] ]
    # we can not have any "missing" troves while we mirror removals
    for t in missing:
        log.warning("Mirroring removed trove: valid trove not found on target: %s", t)
        troveSet.remove(t)
    # for the remaining removed troves, are any of them already mirrored?
    jobList = [ (name, (None, None), (version, flavor), True) for
                (name, version, flavor) in troveSet ]
    cs = targetRepos.createChangeSet(jobList, recurse=False, withFiles=False,
                                     withFileContents=False, callback=callback)
    for trvCs in cs.iterNewTroveList():
        if trvCs.getType() == trove.TROVE_TYPE_REMOVED:
            troveSet.remove(trvCs.getNewNameVersionFlavor())
    log.debug("mirroring %d removed troves", len(troveSet))
    if not troveSet:
        return 0
    jobList = [ (name, (None, None), (version, flavor), True) for
                (name, version, flavor) in troveSet ]
    log.debug("mirroring removed troves %s" % (displayJobList(jobList),))
    # grab the removed troves changeset
    cs = sourceRepos.createChangeSet(jobList, recurse = False,
                                     withFiles = False, withFileContents = False,
                                     callback = callback)
    log.debug("committing")
    targetRepos.commitChangeSet(cs, mirror = True, callback = callback)
    callback.done()
    return len(jobList)
Пример #13
0
    def _selectMatchingResolutionTrove(self, requiredBy, dep, depClass,
                                       flavoredList):
        # finally, filter by latest then score.
        trovesByNL = {}
        for installFlavor, (n, v, f) in flavoredList:
            l = v.trailingLabel()
            myTimeStamp = v.timeStamps()[-1]
            if installFlavor is None:
                myScore = 0
            else:
                # FIXME: we should cache this scoring from before.
                myScore = installFlavor.score(f)

            if (n, l) in trovesByNL:
                curScore, curTimeStamp, curTup = trovesByNL[n, l]
                if curTimeStamp > myTimeStamp:
                    continue
                if curTimeStamp == myTimeStamp:
                    if myScore < curScore:
                        continue

            trovesByNL[n, l] = (myScore, myTimeStamp, (n, v, f))

        scoredList = sorted(trovesByNL.itervalues())
        if not scoredList:
            return None
        if len(scoredList) > 1 and [x for x in scoredList if x[1] == 0]:
            log.warning("Dependency tie-breaking may not be deterministic "
                        "because some versions are missing timestamps")
        # highest score, then latest timestamp, then name.
        return scoredList[-1][-1]
Пример #14
0
 def check(self):
     db = self.getDB()
     cu = db.cursor()
     log.info("checking existing Permissions cache")
     cu.execute(
         """
     select p.permissionId, p.userGroupId, ug.userGroup, i.item, l.label, coalesce(ugap.c,0)
     from Permissions as p
     join UserGroups as ug using (userGroupId)
     join Items as i on p.itemId = i.itemId
     join Labels as l on p.labelId = l.labelId
     left join (
         select permissionId, count(*) as c
         from UserGroupAllPermissions
         join Instances using(instanceId)
         where Instances.isPresent != ?
         group by permissionId ) as ugap on p.permissionId = ugap.permissionId
     """, instances.INSTANCE_PRESENT_MISSING)
     info = {}
     existing = {}
     for permissionId, roleId, role, item, label, count in cu:
         info[permissionId] = (roleId, role, item, label)
         existing[permissionId] = count
     log.info("checking for missing Permissions caches...")
     cu.execute(
         """
     select p.permissionId, coalesce(checker.c,0)
     from Permissions as p
     left join (
         select permissionId, count(*) as c from (
             select Permissions.permissionId as permissionId,
                    Instances.instanceId as instanceId
             from Instances
             join Nodes using(itemId, versionId)
             join LabelMap using(itemId, branchId)
             join Permissions on
                 Permissions.labelId = 0 or Permissions.labelId = LabelMap.labelId
             join CheckTroveCache on
                 Permissions.itemId = CheckTroveCache.patternId and
                 Instances.itemId = CheckTroveCache.itemId
             where Instances.isPresent != ?
             ) as perms
          group by permissionId ) as checker using (permissionId)
     """, instances.INSTANCE_PRESENT_MISSING)
     self._status = set()
     ret = True
     for permissionId, newCounter in cu:
         crtCounter = existing.get(permissionId, 0)
         if crtCounter == newCounter:
             continue
         roleId, role, item, label = info[permissionId]
         log.warning(
             "acl(%d) (%s %s %s) caches %d entries instead of %d entries",
             permissionId, role, label, item, crtCounter, newCounter)
         self._status.add((permissionId, roleId, role))
         ret = False
     if not ret:
         log.info("check fails with %d errors found", len(self._status))
     return ret
Пример #15
0
 def doAction(self):
     if not self._isSupportedTarget():
         log.warning('Action %s not supported for target OS %s'
             % self.__class__.__name__, self._getTarget())
         return
     if self.debug:
         debugger.set_trace()
     self.do()
Пример #16
0
 def doAction(self):
     if not self._isSupportedTarget():
         log.warning(
             'Action %s not supported for target OS %s' %
             self.__class__.__name__, self._getTarget())
         return
     if self.debug:
         debugger.set_trace()
     self.do()
Пример #17
0
 def check(self):
     db = self.getDB()
     cu = db.cursor()
     log.info("checking existing Permissions cache")
     cu.execute("""
     select p.permissionId, p.userGroupId, ug.userGroup, i.item, l.label, coalesce(ugap.c,0)
     from Permissions as p
     join UserGroups as ug using (userGroupId)
     join Items as i on p.itemId = i.itemId
     join Labels as l on p.labelId = l.labelId
     left join (
         select permissionId, count(*) as c
         from UserGroupAllPermissions
         join Instances using(instanceId)
         where Instances.isPresent != ?
         group by permissionId ) as ugap on p.permissionId = ugap.permissionId
     """, instances.INSTANCE_PRESENT_MISSING)
     info = {}
     existing = {}
     for permissionId, roleId, role, item, label, count in cu:
         info[permissionId] = (roleId, role, item, label)
         existing[permissionId] = count
     log.info("checking for missing Permissions caches...")
     cu.execute("""
     select p.permissionId, coalesce(checker.c,0)
     from Permissions as p
     left join (
         select permissionId, count(*) as c from (
             select Permissions.permissionId as permissionId,
                    Instances.instanceId as instanceId
             from Instances
             join Nodes using(itemId, versionId)
             join LabelMap using(itemId, branchId)
             join Permissions on
                 Permissions.labelId = 0 or Permissions.labelId = LabelMap.labelId
             join CheckTroveCache on
                 Permissions.itemId = CheckTroveCache.patternId and
                 Instances.itemId = CheckTroveCache.itemId
             where Instances.isPresent != ?
             ) as perms
          group by permissionId ) as checker using (permissionId)
     """, instances.INSTANCE_PRESENT_MISSING)
     self._status = set()
     ret = True
     for permissionId, newCounter in cu:
         crtCounter = existing.get(permissionId, 0)
         if crtCounter == newCounter:
             continue
         roleId, role, item, label = info[permissionId]
         log.warning("acl(%d) (%s %s %s) caches %d entries instead of %d entries",
                     permissionId, role, label, item, crtCounter, newCounter)
         self._status.add((permissionId, roleId, role))
         ret = False
     if not ret:
         log.info("check fails with %d errors found", len(self._status))
     return ret
Пример #18
0
    def freezeTimestamp(self):
        """
        Returns a binary representation of the revision's timestamp, which can
        be later used to restore the timestamp to the string'ified version
        of a version object.

        @rtype: str
        """
        if not self.timeStamp:
            log.warning('freezeTimestamp() called on a Revision that has no timestamp')
        return "%.3f" % self.timeStamp
Пример #19
0
def mainWorkflow(cfg = None, callback=ChangesetCallback(),
                 test=False, sync=False, infoSync=False,
                 checkSync=False, fastSync=False):
    import fcntl
    if cfg.lockFile:
        try:
            log.debug('checking for lock file')
            lock = open(cfg.lockFile, 'w')
            fcntl.lockf(lock, fcntl.LOCK_EX|fcntl.LOCK_NB)
        except IOError:
            log.warning('lock held by another process, exiting')
            return

    # need to make sure we have a 'source' section
    if not cfg.hasSection('source'):
        log.debug("ERROR: mirror configuration file is missing a [source] section")
        raise RuntimeError("Mirror configuration file is missing a [source] section")
    sourceRepos = _getMirrorClient(cfg, 'source')

    # Optional reference repository
    if cfg.hasSection('reference'):
        refRepos = _getMirrorClient(cfg, 'reference')
    else:
        refRepos = sourceRepos

    # we need to build a target repo client for each of the "target*"
    # sections in the config file
    targets = []
    for name in cfg.iterSectionNames():
        if not name.startswith("target"):
            continue
        target = _getMirrorClient(cfg, name)
        target = TargetRepository(target, cfg, name, test=test)
        targets.append(target)
    # checkSync is a special operation...
    if checkSync:
        return checkSyncRepos(cfg, refRepos, targets)
    # we pass in the sync flag only the first time around, because after
    # that we need the targetRepos mark to advance accordingly after being
    # reset to -1
    callAgain = mirrorRepository(sourceRepos, targets, cfg,
                                 test = test, sync = sync,
                                 syncSigs = infoSync,
                                 callback = callback,
                                 fastSync = fastSync,
                                 referenceRepos=refRepos,
                                 )
    while callAgain:
        callAgain = mirrorRepository(sourceRepos, targets, cfg,
                                     test = test, callback = callback,
                                     fastSync = fastSync,
                                     referenceRepos=refRepos,
                                     )
Пример #20
0
    def freezeTimestamp(self):
        """
        Returns a binary representation of the revision's timestamp, which can
        be later used to restore the timestamp to the string'ified version
        of a version object.

        @rtype: str
        """
        if not self.timeStamp:
            log.warning(
                'freezeTimestamp() called on a Revision that has no timestamp')
        return "%.3f" % self.timeStamp
Пример #21
0
 def check(self):
     db = self.getDB()
     cu = db.cursor()
     log.info("checking for extraneous troveinfo records")
     cu.execute(""" select instanceId, count(*)
         from Instances join TroveInfo using(instanceId)
         where Instances.isPresent = ?
         group by instanceId having count(*) > 0 """, instances.INSTANCE_PRESENT_MISSING)
     self._status = cu.fetchall()
     if self._status:
         log.warning("found %d non-present troves with troveinfo records", len(self._status))
         return False
     return True
Пример #22
0
    def incrementSourceCount(self):
        """
        The release number for the final element in the version is
        incremented by one and the time stamp is reset.
        """
        self._clearVersionCache()

        self.hash = None
        self.strRep = None
        self.versions[-1]._incrementSourceCount(self.shadowLength())
        if self.cached:
            log.warning('incrementSourceCount() was called on a version that '
                        'is cached.  Someone may already have a reference to '
                        'the cached object.')
Пример #23
0
    def runCommand(self, client, cfg, argSet, args):
        if self.verbose:
            log.setVerbosity(log.DEBUG)
        else:
            log.setVerbosity(log.INFO)
        command, troveSpecs = self.requireParameters(args,
                                                     'troveSpec',
                                                     appendExtra=True)
        if command == 'buildgroup':
            log.warning(
                '"buildgroup" is deprecated and will be removed in a future release - use "build --recurse" instead'
            )
        rebuild = (command == 'rebuild')
        flavorSpec = argSet.pop('flavor', None)
        if flavorSpec:
            flavor = deps.parseFlavor(flavorSpec)
            if flavor is None:
                raise errors.ParseError("Invalid flavor: '%s'" % flavorSpec)
            newFlavor = deps.overrideFlavor(client.buildConfig.buildFlavor,
                                            flavor)
            client.buildConfig.buildFlavor = newFlavor
            newFlavors = []
            for oldFlavor in client.buildConfig.flavor:
                newFlavors.append(deps.overrideFlavor(oldFlavor, flavor))
            client.buildConfig.flavor = newFlavors

        matchSpecs = argSet.pop('match', [])
        hosts = argSet.pop('host', [])
        labels = argSet.pop('label', [])
        recurseGroups = argSet.pop('recurse', False) or command == 'buildgroup'

        if recurseGroups:
            if argSet.pop('binary-search', False):
                recurseGroups = client.BUILD_RECURSE_GROUPS_BINARY
            elif not compat.ConaryVersion().supportsFindGroupSources():
                log.warning('Your conary does not support recursing a group'
                            ' source component, defaulting to searching the'
                            ' binary version')
                recurseGroups = client.BUILD_RECURSE_GROUPS_BINARY
            else:
                recurseGroups = client.BUILD_RECURSE_GROUPS_SOURCE

        self._prep(client, argSet)
        job = client.createBuildJob(troveSpecs,
                                    limitToHosts=hosts,
                                    limitToLabels=labels,
                                    recurseGroups=recurseGroups,
                                    matchSpecs=matchSpecs,
                                    rebuild=rebuild)
        return self._build(client, job, argSet)
Пример #24
0
    def resetTimeStamps(self, clearCache=True):
        """ set timeStamps to time.time(), can be used to add somewhat
            arbitrary timestamps to user-supplied strings
        """
        if self.cached:
            log.warning('resetTimeStamps() was called on a version that is '
                        'cached.  Someone may already have a reference to '
                        'the cached object.')
        # assert not self.cached
        if clearCache:
            self._clearVersionCache()

        for item in self.iterRevisions():
            item.timeStamp = time.time()
Пример #25
0
    def resetTimeStamps(self, clearCache=True):
        """ set timeStamps to time.time(), can be used to add somewhat
            arbitrary timestamps to user-supplied strings
        """
        if self.cached:
            log.warning('resetTimeStamps() was called on a version that is '
                        'cached.  Someone may already have a reference to '
                        'the cached object.')
        # assert not self.cached
        if clearCache:
            self._clearVersionCache()

        for item in self.iterRevisions():
            item.timeStamp = time.time()
Пример #26
0
    def incrementSourceCount(self):
        """
        The release number for the final element in the version is
        incremented by one and the time stamp is reset.
        """
        self._clearVersionCache()

        self.hash = None
        self.strRep = None
        self.versions[-1]._incrementSourceCount(self.shadowLength())
        if self.cached:
            log.warning('incrementSourceCount() was called on a version that '
                        'is cached.  Someone may already have a reference to '
                        'the cached object.')
Пример #27
0
def _expandPaths(paths, macros, defaultDir=None, braceGlob=True, error=False):
    """
    Expand braces, globs, and macros in path names, and root all path names
    to either the build dir or dest dir.  Relative paths (not starting with
    a /) are relative to builddir.  All absolute paths to are relative to
    destdir.
    """
    destdir = macros.destdir
    if defaultDir is None:
        defaultDir = macros.builddir
    expPaths = []
    for item in paths:
        if isinstance(item, Regexp):
            isRegexp = True
            path = item.pattern
        elif isinstance(item, Glob):
            isRegexp = False
            braceGlob = True
            path = item.pattern
        else:
            isRegexp = False
            path = item
        path = path % macros
        if path[0] == '/':
            if path.startswith(destdir):
                log.warning(
                    "remove destdir from path name %s;"
                    " absolute paths are automatically relative to destdir" %
                    path)
            else:
                path = destdir + path
            baseDir = destdir
        else:
            path = defaultDir + os.sep + path
            baseDir = defaultDir
        if isRegexp:
            expPaths.extend(matchRegexp(baseDir, path, item))
        elif braceGlob:
            expPaths.extend(util.braceGlob(path))
        else:
            expPaths.append(path)
    if error:
        notfound = []
        for path in expPaths:
            if not os.path.exists(path):
                notfound.append(path)
        if notfound:
            raise RuntimeError, "No such file(s) '%s'" % "', '".join(notfound)
    return expPaths
Пример #28
0
def _expandPaths(paths, macros, defaultDir=None, braceGlob=True, error=False):
    """
    Expand braces, globs, and macros in path names, and root all path names
    to either the build dir or dest dir.  Relative paths (not starting with
    a /) are relative to builddir.  All absolute paths to are relative to
    destdir.
    """
    destdir = macros.destdir
    if defaultDir is None:
        defaultDir = macros.builddir
    expPaths = []
    for item in paths:
        if isinstance(item, Regexp):
            isRegexp = True
            path = item.pattern
        elif isinstance(item, Glob):
            isRegexp = False
            braceGlob = True
            path = item.pattern
        else:
            isRegexp = False
            path = item
        path = path % macros
        if path[0] == '/':
            if path.startswith(destdir):
                log.warning(
                    "remove destdir from path name %s;"
                    " absolute paths are automatically relative to destdir"
                    %path)
            else:
                path = destdir + path
            baseDir = destdir
        else:
            path = defaultDir + os.sep + path
            baseDir = defaultDir
        if isRegexp:
            expPaths.extend(matchRegexp(baseDir, path, item))
        elif braceGlob:
            expPaths.extend(util.braceGlob(path))
        else:
            expPaths.append(path)
    if error:
        notfound = []
        for path in expPaths:
            if not os.path.exists(path):
                notfound.append(path)
        if notfound:
            raise RuntimeError, "No such file(s) '%s'" % "', '".join(notfound)
    return expPaths
Пример #29
0
    def setTimeStamps(self, timeStamps, clearCache=True):
        if self.cached:
            log.warning('setTimeStamps() was called on a version that is '
                        'cached.  Someone may already have a reference to '
                        'the cached object.')
        # assert not self.cached
        if clearCache and self.timeStamps():
            self._clearVersionCache()

        i = 0
        for item in self.versions:
            if isinstance(item, AbstractRevision):
                assert (isinstance(timeStamps[i], float))
                item.timeStamp = timeStamps[i]
                i += 1
Пример #30
0
    def setTimeStamps(self, timeStamps, clearCache=True):
        if self.cached:
            log.warning('setTimeStamps() was called on a version that is '
                        'cached.  Someone may already have a reference to '
                        'the cached object.')
        # assert not self.cached
        if clearCache and self.timeStamps():
            self._clearVersionCache()

        i = 0
        for item in self.versions:
            if isinstance(item, AbstractRevision):
                assert(isinstance(timeStamps[i], float))
                item.timeStamp = timeStamps[i]
                i += 1
Пример #31
0
    def __init__(self, readConfigFiles=False, root='', conaryConfig=None, 
                 serverConfig=None, ignoreErrors=False, log=None, 
                 strictMode=None):
        # we default the value of these items to whatever they
        # are set to on the local system's conaryrc.
        conarycfg.ConaryConfiguration.__init__(self, readConfigFiles=False)
        if hasattr(self, 'setIgnoreErrors'):
            self.setIgnoreErrors(ignoreErrors)
        for info in RmakeBuildContext._getConfigOptions():
            if info[0] not in self:
                self.addConfigOption(*info)
        if strictMode is not None:
            self.strictMode = strictMode
        if not hasattr(self, 'rmakeUrl'):
            self.rmakeUrl = None
        if not hasattr(self, 'clientCert'):
            self.clientCert = None

        if readConfigFiles:
            if os.path.exists(root + '/etc/rmake/clientrc'):
                clog.warning(root + '/etc/rmake/clientrc should be renamed'
                                   ' to /etc/rmake/rmakerc')
                self.read(root + '/etc/rmake/clientrc', exception=False)
            self.read(root + '/etc/rmake/rmakerc', exception=False)
            if os.environ.has_key("HOME"):
                self.read(root + os.environ["HOME"] + "/" + ".rmakerc",
                          exception=False)
            self.read('rmakerc', exception=False)

        if self.strictMode:
            self.enforceManagedPolicy = True
            self.copyInConary = False
            self.copyInConfig = False

        # these values are not set based on 
        # config file values - we don't want to touch the system database, 
        # and we don't want to use conary's logging mechanism.
        self.root = ':memory:'
        self.dbPath = ':memory:'
        self.logFile = []
        for option in self._hiddenOptions:
            del self._lowerCaseMap[option.lower()]

        self.useConaryConfig(conaryConfig)
        if serverConfig:
            self.reposName = serverConfig.reposName
            self.repositoryMap.update(serverConfig.getRepositoryMap())
            self.user.extend(serverConfig.reposUser)
Пример #32
0
 def check(self):
     db = self.getDB()
     cu = db.cursor()
     log.info("checking for extraneous troveinfo records")
     cu.execute(
         """ select instanceId, count(*)
         from Instances join TroveInfo using(instanceId)
         where Instances.isPresent = ?
         group by instanceId having count(*) > 0 """,
         instances.INSTANCE_PRESENT_MISSING)
     self._status = cu.fetchall()
     if self._status:
         log.warning("found %d non-present troves with troveinfo records",
                     len(self._status))
         return False
     return True
Пример #33
0
 def updateFromReloaded(self, newCfg, log):
     """Copy updateable options from a newly reloaded config"""
     newCfg.sanityCheck()
     newCfg.sanityCheckForStart()
     for option in self.keys():
         if self[option] == newCfg[option]:
             continue
         if option not in self._reloadable:
             if log:
                 log.warning("Change of option %s requires a restart", option)
             continue
         self[option] = newCfg[option]
         sio = StringIO()
         self.displayKey(option, sio)
         if log:
             log.info("Configuration changed: %s", sio.getvalue().rstrip())
Пример #34
0
    def runCommand(self, client, cfg, argSet, args):
        if self.verbose:
            log.setVerbosity(log.DEBUG)
        else:
            log.setVerbosity(log.INFO)
        command, troveSpecs = self.requireParameters(args, 'troveSpec',
                                                     appendExtra=True)
        if command == 'buildgroup':
            log.warning('"buildgroup" is deprecated and will be removed in a future release - use "build --recurse" instead')
        rebuild = (command == 'rebuild')
        flavorSpec = argSet.pop('flavor', None)
        if flavorSpec:
            flavor = deps.parseFlavor(flavorSpec)
            if flavor is None:
                raise errors.ParseError("Invalid flavor: '%s'" % flavorSpec)
            newFlavor = deps.overrideFlavor(client.buildConfig.buildFlavor, 
                                            flavor)
            client.buildConfig.buildFlavor = newFlavor
            newFlavors = []
            for oldFlavor in client.buildConfig.flavor:
                newFlavors.append(deps.overrideFlavor(oldFlavor, flavor))
            client.buildConfig.flavor = newFlavors

        matchSpecs = argSet.pop('match', [])
        hosts = argSet.pop('host', [])
        labels = argSet.pop('label', [])
        recurseGroups = argSet.pop('recurse', False) or command == 'buildgroup'

        if recurseGroups:
            if argSet.pop('binary-search', False):
                recurseGroups = client.BUILD_RECURSE_GROUPS_BINARY
            elif not compat.ConaryVersion().supportsFindGroupSources():
                log.warning('Your conary does not support recursing a group'
                            ' source component, defaulting to searching the'
                            ' binary version')
                recurseGroups = client.BUILD_RECURSE_GROUPS_BINARY
            else:
                recurseGroups = client.BUILD_RECURSE_GROUPS_SOURCE

        self._prep(client, argSet)
        job = client.createBuildJob(troveSpecs, limitToHosts=hosts,
                                    limitToLabels=labels,
                                    recurseGroups=recurseGroups,
                                    matchSpecs=matchSpecs,
                                    rebuild=rebuild)
        return self._build(client, job, argSet)
Пример #35
0
 def resolveDependencies(self):
     try:
         label, leavesOnly = self._labelPathWithLeaves[self.index]
         if hasattr(self.troveSource, 'resolveDependenciesWithFilter'):
             return self.troveSource.resolveDependenciesWithFilter(label,
                             self.fullDepList, self.filterSuggestions,
                             leavesOnly=leavesOnly)
         else:
             return self.troveSource.resolveDependencies(label,
                             self.depList, leavesOnly=leavesOnly)
     except repoerrors.OpenError, err:
         log.warning('Could not access %s for dependency resolution: %s' % (
                             self._labelPathWithLeaves[self.index][0], err))
         # return an empty result.
         results = {}
         for depSet in self.depList:
             results[depSet] = [ [] for x in depSet.iterDeps() ]
         return results
Пример #36
0
    def __init__(self, conaryVersion=None):
        global testing
        if conaryVersion is None:
            if not testing:
                conaryVersion = constants.version
            else:
                conaryVersion = [9999, 9999, 9999]

        try:
            # first, remove any changeset id (RMK-1077)
            conaryVersion = conaryVersion.split('_', 1)[0]
            # then convert to integers
            self.conaryVersion = [int(x) for x in conaryVersion.split('.')]
        except ValueError, err:
            if not self._warnedUser:
                log.warning('nonstandard conary version "%s". '
                            'Assuming latest.' % (conaryVersion))
                ConaryVersion._warnedUser = True
            self.conaryVersion = [9999]
Пример #37
0
 def _exceptionOccured(self, exc_info):
     etype, e, tb = exc_info
     # format the exception
     msg = "%s" % etype.__name__
     s = str(e)
     if s:
         msg += ": %s" % s
     # get the line info that raised the exception
     inner = tb.tb_next
     while inner.tb_next:
         inner = inner.tb_next
     filename = inner.tb_frame.f_code.co_filename
     linenum = inner.tb_frame.f_lineno
     log.warning("Unhandled exception occurred when invoking callback:\n" "%s:%s\n" " %s", filename, linenum, msg)
     # log the full traceback if debugging (--debug=all)
     log.debug("".join(traceback.format_exception(*exc_info)))
     if not hasattr(self, "exceptions"):
         self.exceptions = []
     self.exceptions.append(e)
Пример #38
0
 def resolveDependencies(self):
     try:
         label, leavesOnly = self._labelPathWithLeaves[self.index]
         if hasattr(self.troveSource, 'resolveDependenciesWithFilter'):
             return self.troveSource.resolveDependenciesWithFilter(
                 label,
                 self.fullDepList,
                 self.filterSuggestions,
                 leavesOnly=leavesOnly)
         else:
             return self.troveSource.resolveDependencies(
                 label, self.depList, leavesOnly=leavesOnly)
     except repoerrors.OpenError, err:
         log.warning('Could not access %s for dependency resolution: %s' %
                     (self._labelPathWithLeaves[self.index][0], err))
         # return an empty result.
         results = {}
         for depSet in self.depList:
             results[depSet] = [[] for x in depSet.iterDeps()]
         return results
Пример #39
0
    def __init__(self, conaryVersion=None):
        global testing
        if conaryVersion is None:
            if not testing:
                conaryVersion = constants.version
            else:
                conaryVersion = [9999,9999,9999]

        try:
            # first, remove any changeset id (RMK-1077)
            conaryVersion = conaryVersion.split('_', 1)[0]
            # then convert to integers
            self.conaryVersion = [int(x) for x in conaryVersion.split('.')]
        except ValueError, err:
            if not self._warnedUser:
                log.warning('nonstandard conary version "%s". '
                            'Assuming latest.'
                            % (conaryVersion))
                ConaryVersion._warnedUser = True
            self.conaryVersion = [9999]
Пример #40
0
def get_bootloader(parent, image_root, geometry, override=None):
    """
    Choose an appropriate bootloader for the given image and return a
    Bootloader instance used to prepare and install the bootloader.
    """

    grubpath = util.searchFile("grub", util.braceExpand("%s/{sbin,usr/sbin}" % image_root))
    if override == "extlinux" or (
        not override
        and os.path.exists(util.joinPaths(image_root, "sbin/bootman"))
        and os.path.exists(util.joinPaths(image_root, "sbin/extlinux"))
    ):
        return ExtLinuxInstaller(parent, image_root, geometry)
    elif override == "grub2" or (not override and os.path.exists(util.joinPaths(image_root, "usr/sbin/grub2-install"))):
        return Grub2Installer(parent, image_root, geometry)
    elif override == "grub" or (not override and grubpath):
        return GrubInstaller(parent, image_root, geometry, grubpath.replace(image_root, ""))
    log.warning("Could not find extlinux (with bootman) or grub")
    log.warning("No bootloader will be installed!")
    return DummyInstaller(parent, image_root, geometry)
Пример #41
0
    def load(self, path):
        assert (not self.cache and not self.depCache)
        try:
            cs = changeset.ChangeSetFromFile(path)
        except filecontainer.BadContainer:
            log.warning('trove cache %s was corrupt, ignoring' % path)
            return
        except (IOError, errors.ConaryError):
            return

        for trvCs in cs.iterNewTroveList():
            trv = trove.Trove(trvCs, skipIntegrityChecks=True)
            self.cache[trv.getNameVersionFlavor()] = trv

        self._cached(self.cache.keys(), [x[1] for x in self.cache.values()])

        try:
            # NB: "fileid" and pathid got reversed here by mistake, try not to
            # think too hard about it.
            contType, depContents = cs.getFileContents(
                self._fileId, self._troveCacheVersionPathId)
        except KeyError:
            self.version = (0, 0)
        else:
            versionList = depContents.get().read().split(' ')
            self.version = (int(versionList[0]), int(versionList[1]))

        if self.version[0] > self.VERSION[0]:
            # major number is too big for us; we can't load this
            return

        # Timestamps must come first because some other caches use it to
        # construct versions.
        self._cs = cs
        self._loadTimestamps()
        self._loadDeps()
        self._loadDepSolutions()
        self._loadFileCache()
        self._startingSizes = self._getSizeTuple()
        self._cs = None
Пример #42
0
    def load(self, path):
        assert(not self.cache and not self.depCache)
        try:
            cs = changeset.ChangeSetFromFile(path)
        except filecontainer.BadContainer:
            log.warning('trove cache %s was corrupt, ignoring' %path)
            return
        except (IOError, errors.ConaryError):
            return

        for trvCs in cs.iterNewTroveList():
            trv = trove.Trove(trvCs, skipIntegrityChecks = True)
            self.cache[trv.getNameVersionFlavor()] = trv

        self._cached(self.cache.keys(), [ x[1] for x in self.cache.values() ])

        try:
            # NB: "fileid" and pathid got reversed here by mistake, try not to
            # think too hard about it.
            contType, depContents = cs.getFileContents(
                    self._fileId, self._troveCacheVersionPathId)
        except KeyError:
            self.version = (0, 0)
        else:
            versionList = depContents.get().read().split(' ')
            self.version = (int(versionList[0]), int(versionList[1]))

        if self.version[0] > self.VERSION[0]:
            # major number is too big for us; we can't load this
            return

        # Timestamps must come first because some other caches use it to
        # construct versions.
        self._cs = cs
        self._loadTimestamps()
        self._loadDeps()
        self._loadDepSolutions()
        self._loadFileCache()
        self._startingSizes = self._getSizeTuple()
        self._cs = None
Пример #43
0
    def lookupName(self, root, name):
        theId = self.nameCache.get(name, None)
        if theId is not None:
            return theId

        # if not root, cannot chroot and so fall back to system ids
        getChrootIds = root and root != '/' and not os.getuid()

        if getChrootIds:
            if root[0] != '/':
                root = os.sep.join((os.getcwd(), root))
            curDir = os.open(".", os.O_RDONLY)
            # chdir to the current root to allow us to chroot
            # back out again
            os.chdir('/')
            os.chroot(root)

        if name and name[0] == '+':
            # An id mapped as a string
            try:
                theId = int(name)
            except ValueError:
                log.warning('%s %s does not exist - using root', self.name,
                            name)
        else:
            try:
                theId = self.nameLookupFn(name)[2]
            except KeyError:
                log.warning('%s %s does not exist - using root', self.name,
                            name)
                theId = 0

        if getChrootIds:
            os.chroot(".")
            os.fchdir(curDir)
            os.close(curDir)

        self.nameCache[name] = theId
        self.idCache[theId] = name
        return theId
Пример #44
0
 def check(self):
     db = self.getDB()
     log.info("checking the state of the CheckTroveCache table")
     cu = db.cursor()
     cu.execute("select patternId, itemId from CheckTroveCache")
     existing = set([(x[0],x[1]) for x in cu.fetchall()])
     required = []
     cu.execute("select distinct i.itemId, i.item from Permissions as p "
                "join Items as i using(itemId)")
     patterns = set([(x[0], x[1]) for x in cu.fetchall()])
     cu.execute("select itemId, item from Items")
     troveNames = set([(x[0], x[1]) for x in cu.fetchall()])
     for patternId, pattern in patterns:
         for itemId, item in troveNames:
             if items.checkTrove(pattern, item):
                 required.append((patternId, itemId))
     required = set(required)
     self._status = required.difference(existing)
     if len(self._status):
         log.warning("found %d entries that are missing from CheckTroveCache", len(self._status))
         return False
     return True
Пример #45
0
    def lookupName(self, root, name):
        theId = self.nameCache.get(name, None)
        if theId is not None:
            return theId

        # if not root, cannot chroot and so fall back to system ids
        getChrootIds = root and root != '/' and not os.getuid()

        if getChrootIds:
            if root[0] != '/':
                root = os.sep.join((os.getcwd(), root))
            curDir = os.open(".", os.O_RDONLY)
            # chdir to the current root to allow us to chroot
            # back out again
            os.chdir('/')
            os.chroot(root)

        if name and name[0] == '+':
            # An id mapped as a string
            try:
                theId = int(name)
            except ValueError:
                log.warning('%s %s does not exist - using root', self.name,
                            name)
        else:
            try:
                theId = self.nameLookupFn(name)[2]
            except KeyError:
                log.warning('%s %s does not exist - using root', self.name, name)
                theId = 0

        if getChrootIds:
            os.chroot(".")
            os.fchdir(curDir)
            os.close(curDir)

        self.nameCache[name] = theId
        self.idCache[theId] = name
        return theId
Пример #46
0
def _expandOnePath(path, macros, defaultDir=None, braceGlob=False, error=False):
    if braceGlob:
        return _expandPaths([path], macros, defaultDir, True, error)
    if defaultDir is None:
        defaultDir = macros.builddir

    path = path % macros
    if path and path[0] == '/':
        if path.startswith(macros.destdir):
            log.warning(
                "remove destdir from path name %s;"
                " absolute paths are automatically relative to destdir"
                %path)
        else:
            path = macros.destdir + path
    else:
        path = os.path.join(defaultDir, path)

    if error:
        if not os.path.exists(path):
            raise RuntimeError, "No such file '%s'" % path
    return path
Пример #47
0
    def _simpleTroveList(self, troveList, newFilesByTrove):
        log.info('Verifying %s' % " ".join(x[1].getName() for x in troveList))
        changedTroves = set()

        try:
            result = update.buildLocalChanges(self.db, troveList,
                                              root=self.cfg.root,
                                              forceSha1=self.forceHashCheck,
                                              ignoreTransient=True,
                                              updateContainers=True,
                                              statCache = self.statCache)
            if not result: return
            cs = result[0]
            changed = False
            for (changed, trv) in result[1]:
                if changed:
                    changedTroves.add(trv.getNameVersionFlavor())
        except OSError, err:
            if err.errno == 13:
                log.warning("Permission denied creating local changeset for"
                            " %s " % str([ x[0].getName() for x in troveList ]))
            return
Пример #48
0
 def _exceptionOccured(self, exc_info):
     etype, e, tb = exc_info
     # format the exception
     msg = '%s' % etype.__name__
     s = str(e)
     if s:
         msg += ': %s' % s
     # get the line info that raised the exception
     inner = tb.tb_next
     while inner.tb_next:
         inner = inner.tb_next
     filename = inner.tb_frame.f_code.co_filename
     linenum = inner.tb_frame.f_lineno
     log.warning(
         'Unhandled exception occurred when invoking callback:\n'
         '%s:%s\n'
         ' %s', filename, linenum, msg)
     # log the full traceback if debugging (--debug=all)
     log.debug(''.join(traceback.format_exception(*exc_info)))
     if not hasattr(self, 'exceptions'):
         self.exceptions = []
     self.exceptions.append(e)
Пример #49
0
    def incrementBuildCount(self):
        """
        Incremements the build count
        """
        # if the source count is the right length for this shadow
        # depth, just increment the build count (without lengthing
        # it). if the source count is too short, make the build count
        # the right length for this shadow
        self._clearVersionCache()

        shadowLength = self.shadowLength()
        self.hash = None
        self.strRep = None

        sourceCount = self.versions[-1].getSourceCount()
        buildCount = self.versions[-1].getBuildCount()

        if sourceCount.shadowCount() == shadowLength:
            if buildCount:
                buildCount.increment(buildCount.shadowCount())
            else:
                buildCount = SerialNumber('1')
                self.versions[-1]._setBuildCount(buildCount)
        else:
            if buildCount:
                buildCount.increment(shadowLength)
            else:
                buildCount = SerialNumber(
                            ".".join([ '0' ] * shadowLength + [ '1' ] ))
                self.versions[-1]._setBuildCount(buildCount)

        if self.cached:
            log.warning('incrementBuildCount() was called on a version that '
                        'is cached.  Someone may already have a reference to '
                        'the cached object.')
        # assert not self.cached

        self.versions[-1].resetTimeStamp()
Пример #50
0
    def incrementBuildCount(self):
        """
        Incremements the build count
        """
        # if the source count is the right length for this shadow
        # depth, just increment the build count (without lengthing
        # it). if the source count is too short, make the build count
        # the right length for this shadow
        self._clearVersionCache()

        shadowLength = self.shadowLength()
        self.hash = None
        self.strRep = None

        sourceCount = self.versions[-1].getSourceCount()
        buildCount = self.versions[-1].getBuildCount()

        if sourceCount.shadowCount() == shadowLength:
            if buildCount:
                buildCount.increment(buildCount.shadowCount())
            else:
                buildCount = SerialNumber('1')
                self.versions[-1]._setBuildCount(buildCount)
        else:
            if buildCount:
                buildCount.increment(shadowLength)
            else:
                buildCount = SerialNumber(".".join(['0'] * shadowLength +
                                                   ['1']))
                self.versions[-1]._setBuildCount(buildCount)

        if self.cached:
            log.warning('incrementBuildCount() was called on a version that '
                        'is cached.  Someone may already have a reference to '
                        'the cached object.')
        # assert not self.cached

        self.versions[-1].resetTimeStamp()
Пример #51
0
    def warning(self, msg, *args, **kwargs):
        """Warning handling callback

        @param msg: A message to display
        @type msg: str
        @keyword exc_text: Traceback text that should be printed verbatim
        @type exc_text: str
        """
        exc_text = kwargs.pop('exc_text', None)
        # Append the traceback to the message
        if exc_text:
            msg += "\n%s"
            args += (exc_text, )
        return log.warning(msg, *args, **kwargs)
Пример #52
0
    def warning(self, msg, *args, **kwargs):
        """Warning handling callback

        @param msg: A message to display
        @type msg: str
        @keyword exc_text: Traceback text that should be printed verbatim
        @type exc_text: str
        """
        exc_text = kwargs.pop("exc_text", None)
        # Append the traceback to the message
        if exc_text:
            msg += "\n%s"
            args += (exc_text,)
        return log.warning(msg, *args, **kwargs)
Пример #53
0
    def __init__(self, uri=None, rmakeConfig=None, buildConfig=None, root='/',
                 plugins=None, configureClient=True,
                 clientCert=None, promptPassword=False):
        if rmakeConfig:
            log.warning('rmakeConfig parameter is now deprecated')
        if not buildConfig:
            buildConfig = buildcfg.BuildConfiguration(True, root)

        if configureClient:
            if clientCert is None:
                clientCert = buildConfig.clientCert
            if uri is None:
                if (promptPassword and buildConfig.rmakeUser
                        and buildConfig.rmakeUser[0]
                        and not buildConfig.rmakeUser[1]
                        and not clientCert):
                    self._promptPassword(buildConfig)
                uri = buildConfig.getServerUri()

            self.client = client.rMakeClient(uri, clientCert)

        self.buildConfig = buildConfig
        self.plugins = plugins
Пример #54
0
class MultinodeClientPlugin(plugin.ClientPlugin, plugin.LibraryPlugin):
    types = [plugin.TYPE_CLIENT, plugin.TYPE_LIBRARY]

    def client_preInit(self, main, argv):
        # Add support for remote rmake clients
        buildcfg.updateConfig()
        client.attach()
        command.addCommands(main)

    def library_preInit(self):
        buildcfg.updateConfig()
        client.attach()

    def client_preCommand(self, main, thisCommand, (buildConfig, conaryConfig),
                          argSet, args):
        if buildConfig.copyInConfig and not buildConfig.isDefault(
                'copyInConfig'):
            log.warning('Cannot set copyInConfig in multinode mode')
        if buildConfig.copyInConary and not buildConfig.isDefault(
                'copyInConary'):
            log.warning('Cannot set copyInConary in multinode mode')
        buildConfig.copyInConary = False
        buildConfig.copyInConfig = False
Пример #55
0
    def __init__(self, uri=None, rmakeConfig=None, buildConfig=None, root='/',
                 plugins=None, configureClient=True,
                 clientCert=None, promptPassword=False):
        if rmakeConfig:
            log.warning('rmakeConfig parameter is now deprecated')
        if not buildConfig:
            buildConfig = buildcfg.BuildConfiguration(True, root)

        if configureClient:
            if clientCert is None:
                clientCert = buildConfig.clientCert
            if uri is None:
                if (promptPassword and buildConfig.rmakeUser
                        and buildConfig.rmakeUser[0]
                        and not buildConfig.rmakeUser[1]
                        and not clientCert):
                    self._promptPassword(buildConfig)
                uri = buildConfig.getServerUri()

            self.client = client.rMakeClient(uri, clientCert)

        self.buildConfig = buildConfig
        self.plugins = plugins
Пример #56
0
    def _simpleTroveList(self, troveList, newFilesByTrove):
        log.info('Verifying %s' % " ".join(x[1].getName() for x in troveList))
        changedTroves = set()

        try:
            result = update.buildLocalChanges(self.db,
                                              troveList,
                                              root=self.cfg.root,
                                              forceSha1=self.forceHashCheck,
                                              ignoreTransient=True,
                                              updateContainers=True,
                                              statCache=self.statCache)
            if not result: return
            cs = result[0]
            changed = False
            for (changed, trv) in result[1]:
                if changed:
                    changedTroves.add(trv.getNameVersionFlavor())
        except OSError, err:
            if err.errno == 13:
                log.warning("Permission denied creating local changeset for"
                            " %s " % str([x[0].getName() for x in troveList]))
            return
Пример #57
0
 def check(self):
     db = self.getDB()
     log.info("checking the state of the CheckTroveCache table")
     cu = db.cursor()
     cu.execute("select patternId, itemId from CheckTroveCache")
     existing = set([(x[0], x[1]) for x in cu.fetchall()])
     required = []
     cu.execute("select distinct i.itemId, i.item from Permissions as p "
                "join Items as i using(itemId)")
     patterns = set([(x[0], x[1]) for x in cu.fetchall()])
     cu.execute("select itemId, item from Items")
     troveNames = set([(x[0], x[1]) for x in cu.fetchall()])
     for patternId, pattern in patterns:
         for itemId, item in troveNames:
             if items.checkTrove(pattern, item):
                 required.append((patternId, itemId))
     required = set(required)
     self._status = required.difference(existing)
     if len(self._status):
         log.warning(
             "found %d entries that are missing from CheckTroveCache",
             len(self._status))
         return False
     return True
Пример #58
0
    def doAction(self):
        if not self._isSupportedTarget():
            log.warning('Action %s not supported for target OS' %
                        self.__class__.__name__)
            return

        if self.use:
            try:
                if self.linenum is None:
                    self.do()
                else:
                    oldexcepthook = sys.excepthook
                    sys.excepthook = genExcepthook(self)
                    if self.recipe.buildinfo:
                        self.recipe.buildinfo.lastline = self.linenum
                    self.do()
                    sys.excepthook = oldexcepthook

            finally:
                # we need to provide suggestions even in the failure case
                self.doSuggestAutoBuildReqs()
        else:
            # any invariant suggestions should be provided even if not self.use
            self.doSuggestAutoBuildReqs()