Esempio n. 1
0
    def __init__(self,
                 owner,
                 slug,
                 branch=None,
                 pollInterval=10 * 60,
                 useTimestamps=True,
                 category=None,
                 project='',
                 pullrequest_filter=True,
                 encoding='utf-8',
                 pollAtLaunch=False):

        self.owner = owner
        self.slug = slug
        self.branch = branch
        base.PollingChangeSource.__init__(self,
                                          name='/'.join([owner, slug]),
                                          pollInterval=pollInterval,
                                          pollAtLaunch=pollAtLaunch)
        self.encoding = encoding

        if hasattr(pullrequest_filter, '__call__'):
            self.pullrequest_filter = pullrequest_filter
        else:
            self.pullrequest_filter = (lambda _: pullrequest_filter)

        self.lastChange = time.time()
        self.lastPoll = time.time()
        self.useTimestamps = useTimestamps
        self.category = category if callable(category) else ascii2unicode(
            category)
        self.project = ascii2unicode(project)
        self.initLock = defer.DeferredLock()
    def perspective_try(self, branch, revision, patch, repository, project,
                        builderNames, who="", comment="", properties=None):
        log.msg("user %s requesting build on builders %s" % (self.username,
                                                             builderNames))
        if properties is None:
            properties = {}
        # build the intersection of the request and our configured list
        builderNames = self.scheduler.filterBuilderList(builderNames)
        if not builderNames:
            return

        reason = u"'try' job"

        if who:
            reason += u" by user %s" % ascii2unicode(who)

        if comment:
            reason += u" (%s)" % ascii2unicode(comment)

        sourcestamp = dict(
            branch=branch, revision=revision, repository=repository,
            project=project, patch_level=patch[0], patch_body=patch[1],
            patch_subdir='', patch_author=who or '',
            patch_comment=comment or '', codebase='',
        )           # note: no way to specify patch subdir - #1769

        requested_props = Properties()
        requested_props.update(properties, "try build")
        (bsid, brids) = yield self.scheduler.addBuildsetForSourceStamps(
            sourcestamps=[sourcestamp], reason=reason,
            properties=requested_props, builderNames=builderNames)

        # return a remotely-usable BuildSetStatus object
        bss = RemoteBuildSetStatus(self.scheduler.master, bsid, brids)
        defer.returnValue(bss)
Esempio n. 3
0
    def perspective_try(self, branch, revision, patch, repository, project,
                        builderNames, who="", comment="", properties=None):
        log.msg("user %s requesting build on builders %s" % (self.username,
                                                             builderNames))
        if properties is None:
            properties = {}
        # build the intersection of the request and our configured list
        builderNames = self.scheduler.filterBuilderList(builderNames)
        if not builderNames:
            return

        reason = u"'try' job"

        if who:
            reason += u" by user %s" % ascii2unicode(who)

        if comment:
            reason += u" (%s)" % ascii2unicode(comment)

        sourcestamp = dict(
            branch=branch, revision=revision, repository=repository,
            project=project, patch_level=patch[0], patch_body=patch[1],
            patch_subdir='', patch_author=who or '',
            patch_comment=comment or '', codebase='',
        )           # note: no way to specify patch subdir - #1769

        requested_props = Properties()
        requested_props.update(properties, "try build")
        (bsid, brids) = yield self.scheduler.addBuildsetForSourceStamps(
            sourcestamps=[sourcestamp], reason=reason,
            properties=requested_props, builderNames=builderNames)

        # return a remotely-usable BuildSetStatus object
        bss = RemoteBuildSetStatus(self.scheduler.master, bsid, brids)
        defer.returnValue(bss)
Esempio n. 4
0
    def __init__(self, name, password, max_builds=None,
                 notify_on_missing=[], missing_timeout=3600,
                 properties={}, locks=None, keepalive_interval=3600):
        """
        @param name: botname this machine will supply when it connects
        @param password: password this machine will supply when
                         it connects
        @param max_builds: maximum number of simultaneous builds that will
                           be run concurrently on this buildslave (the
                           default is None for no limit)
        @param properties: properties that will be applied to builds run on
                           this slave
        @type properties: dictionary
        @param locks: A list of locks that must be acquired before this slave
                      can be used
        @type locks: dictionary
        """
        name = ascii2unicode(name)

        service.AsyncMultiService.__init__(self)
        self.slavename = ascii2unicode(name)
        self.password = password

        # protocol registration
        self.registration = None

        # these are set when the service is started
        self.botmaster = None
        self.manager = None
        self.master = None
        self.buildslaveid = None

        self.slave_status = SlaveStatus(name)
        self.slave_commands = None
        self.slavebuilders = {}
        self.max_builds = max_builds
        self.access = []
        if locks:
            self.access = locks
        self.lock_subscriptions = []

        self.properties = Properties()
        self.properties.update(properties, "BuildSlave")
        self.properties.setProperty("slavename", name, "BuildSlave")

        self.lastMessageReceived = 0
        if isinstance(notify_on_missing, str):
            notify_on_missing = [notify_on_missing]
        self.notify_on_missing = notify_on_missing
        for i in notify_on_missing:
            if not isinstance(i, str):
                config.error(
                    'notify_on_missing arg %r is not a string' % (i,))
        self.missing_timeout = missing_timeout
        self.missing_timer = None

        # a protocol connection, if we're currently connected
        self.conn = None

        self._old_builder_list = None
Esempio n. 5
0
    def __init__(self, owner, slug,
                 branch=None,
                 pollInterval=10 * 60,
                 useTimestamps=True,
                 category=None,
                 project='',
                 pullrequest_filter=True,
                 encoding='utf-8',
                 pollAtLaunch=False
                 ):

        self.owner = owner
        self.slug = slug
        self.branch = branch
        base.PollingChangeSource.__init__(
            self, name='/'.join([owner, slug]), pollInterval=pollInterval, pollAtLaunch=pollAtLaunch)
        self.encoding = encoding

        if hasattr(pullrequest_filter, '__call__'):
            self.pullrequest_filter = pullrequest_filter
        else:
            self.pullrequest_filter = (lambda _: pullrequest_filter)

        self.lastChange = time.time()
        self.lastPoll = time.time()
        self.useTimestamps = useTimestamps
        self.category = category if callable(
            category) else ascii2unicode(category)
        self.project = ascii2unicode(project)
        self.initLock = defer.DeferredLock()
Esempio n. 6
0
    def _processChanges(self, page):
        result = json.loads(page, encoding=self.encoding)
        for pr in result["values"]:
            branch = pr["source"]["branch"]["name"]
            nr = int(pr["id"])
            # Note that this is a short hash. The full length hash can be accessed via the
            # commit api resource but we want to avoid requesting multiple pages as long as
            # we are not sure that the pull request is new or updated.
            revision = pr["source"]["commit"]["hash"]

            # check branch
            if not self.branch or branch in self.branch:
                current = yield self._getCurrentRev(nr)

                if not current or current != revision:
                    # parse pull request api page (required for the filter)
                    page = yield client.getPage(str(pr["links"]["self"]["href"]))
                    pr_json = json.loads(page, encoding=self.encoding)

                    # filter pull requests by user function
                    if not self.pullrequest_filter(pr_json):
                        log.msg("pull request does not match filter")
                        continue

                    # access additional information
                    author = pr["author"]["display_name"]
                    prlink = pr["links"]["html"]["href"]
                    # Get time updated time. Note that the timezone offset is ignored.
                    if self.useTimestamps:
                        updated = datetime.strptime(pr["updated_on"].split(".")[0], "%Y-%m-%dT%H:%M:%S")
                    else:
                        updated = epoch2datetime(reactor.seconds())
                    title = pr["title"]
                    # parse commit api page
                    page = yield client.getPage(str(pr["source"]["commit"]["links"]["self"]["href"]))
                    commit_json = json.loads(page, encoding=self.encoding)
                    # use the full-length hash from now on
                    revision = commit_json["hash"]
                    revlink = commit_json["links"]["html"]["href"]
                    # parse repo api page
                    page = yield client.getPage(str(pr["source"]["repository"]["links"]["self"]["href"]))
                    repo_json = json.loads(page, encoding=self.encoding)
                    repo = repo_json["links"]["html"]["href"]

                    # update database
                    yield self._setCurrentRev(nr, revision)
                    # emit the change
                    yield self.master.data.updates.addChange(
                        author=ascii2unicode(author),
                        revision=ascii2unicode(revision),
                        revlink=ascii2unicode(revlink),
                        comments=u"pull-request #%d: %s\n%s" % (nr, title, prlink),
                        when_timestamp=datetime2epoch(updated),
                        branch=self.branch,
                        category=self.category,
                        project=self.project,
                        repository=ascii2unicode(repo),
                        src=u"bitbucket",
                    )
Esempio n. 7
0
    def setProperty(self, name, value, source, runtime=False):
        name = util.ascii2unicode(name)
        json.dumps(value)  # Let the exception propagate ...
        source = util.ascii2unicode(source)

        self.properties[name] = (value, source)
        if runtime:
            self.runtime.add(name)
Esempio n. 8
0
    def setProperty(self, name, value, source, runtime=False):
        name = util.ascii2unicode(name)
        json.dumps(value)  # Let the exception propagate ...
        source = util.ascii2unicode(source)

        self.properties[name] = (value, source)
        if runtime:
            self.runtime.add(name)
Esempio n. 9
0
    def __init__(self,
                 repourl,
                 branches=None,
                 branch=None,
                 workdir=None,
                 pollInterval=10 * 60,
                 gitbin='git',
                 usetimestamps=True,
                 category=None,
                 project=None,
                 pollinterval=-2,
                 fetch_refspec=None,
                 encoding='utf-8',
                 name=None,
                 pollAtLaunch=False,
                 buildPushesWithNoCommits=False):

        # for backward compatibility; the parameter used to be spelled with 'i'
        if pollinterval != -2:
            pollInterval = pollinterval

        if name is None:
            name = repourl

        base.PollingChangeSource.__init__(self,
                                          name=name,
                                          pollInterval=pollInterval,
                                          pollAtLaunch=pollAtLaunch)

        if project is None:
            project = ''

        if branch and branches:
            config.error("GitPoller: can't specify both branch and branches")
        elif branch:
            branches = [branch]
        elif not branches:
            branches = ['master']

        self.repourl = repourl
        self.branches = branches
        self.encoding = encoding
        self.buildPushesWithNoCommits = buildPushesWithNoCommits
        self.gitbin = gitbin
        self.workdir = workdir
        self.usetimestamps = usetimestamps
        self.category = category if callable(category) else ascii2unicode(
            category)
        self.project = ascii2unicode(project)
        self.changeCount = 0
        self.lastRev = {}

        if fetch_refspec is not None:
            config.error("GitPoller: fetch_refspec is no longer supported. "
                         "Instead, only the given branches are downloaded.")

        if self.workdir is None:
            self.workdir = 'gitpoller-work'
Esempio n. 10
0
    def _process_changes(self, newRev, branch):
        """
        Read changes since last change.

        - Read list of commit hashes.
        - Extract details from each commit.
        - Add changes to database.
        """

        # initial run, don't parse all history
        if not self.lastRev:
            return
        if newRev in itervalues(self.lastRev):
            # TODO: no new changes on this branch
            # should we just use the lastRev again, but with a different branch?
            pass

        # get the change list
        revListArgs = ([r'--format=%H', r'%s' % newRev] +
                       [r'^%s' % rev for rev in itervalues(self.lastRev)] +
                       [r'--'])
        self.changeCount = 0
        results = yield self._dovccmd('log', revListArgs, path=self.workdir)

        # process oldest change first
        revList = results.split()
        revList.reverse()
        self.changeCount = len(revList)
        self.lastRev[branch] = newRev

        if self.changeCount:
            log.msg('gitpoller: processing %d changes: %s from "%s" branch "%s"'
                    % (self.changeCount, revList, self.repourl, branch))

        for rev in revList:
            dl = defer.DeferredList([
                self._get_commit_timestamp(rev),
                self._get_commit_author(rev),
                self._get_commit_files(rev),
                self._get_commit_comments(rev),
            ], consumeErrors=True)

            results = yield dl

            # check for failures
            failures = [r[1] for r in results if not r[0]]
            if failures:
                # just fail on the first error; they're probably all related!
                raise failures[0]

            timestamp, author, files, comments = [r[1] for r in results]

            yield self.master.data.updates.addChange(
                author=author, revision=ascii2unicode(rev), files=files,
                comments=comments, when_timestamp=timestamp,
                branch=ascii2unicode(self._removeHeads(branch)),
                project=self.project, repository=ascii2unicode(self.repourl),
                category=self.category, src=u'git')
Esempio n. 11
0
 def getResultSummary(self):
     src = ascii2unicode(self.src, 'replace')
     dest = ascii2unicode(self.dest, 'replace')
     copy = u"%s to %s" % (src, dest)
     if self.results == SUCCESS:
         rv = u'Copied ' + copy
     else:
         rv = u'Copying ' + copy + ' failed.'
     return {u'step': rv}
Esempio n. 12
0
    def _processChanges(self, unused_output):
        """Send info about pulled changes to the master and record current.

        HgPoller does the recording by moving the working dir to the head
        of the branch.
        We don't update the tree (unnecessary treatment and waste of space)
        instead, we simply store the current rev number in a file.
        Recall that hg rev numbers are local and incremental.
        """
        oid, current = yield self._getCurrentRev()
        # hg log on a range of revisions is never empty
        # also, if a numeric revision does not exist, a node may match.
        # Therefore, we have to check explicitly that branch head > current.
        head = yield self._getHead()
        if head is None:
            return
        elif current is not None and head <= current:
            return
        if current is None:
            # we could have used current = -1 convention as well (as hg does)
            revrange = '%d:%d' % (head, head)
        else:
            revrange = '%d:%s' % (current + 1, head)

        # two passes for hg log makes parsing simpler (comments is multi-lines)
        revListArgs = [
            'log', '-b', self.branch, '-r', revrange,
            r'--template={rev}:{node}\n'
        ]
        results = yield utils.getProcessOutput(self.hgbin,
                                               revListArgs,
                                               path=self._absWorkdir(),
                                               env=os.environ,
                                               errortoo=False)
        results = results.decode(self.encoding)

        revNodeList = [rn.split(u':', 1) for rn in results.strip().split()]

        log.msg('hgpoller: processing %d changes: %r in %r' %
                (len(revNodeList), revNodeList, self._absWorkdir()))
        for rev, node in revNodeList:
            timestamp, author, files, comments = yield self._getRevDetails(
                node)
            yield self.master.data.updates.addChange(
                author=author,
                revision=text_type(node),
                files=files,
                comments=comments,
                when_timestamp=int(timestamp) if timestamp else None,
                branch=ascii2unicode(self.branch),
                category=ascii2unicode(self.category),
                project=ascii2unicode(self.project),
                repository=ascii2unicode(self.repourl),
                src=u'hg')
            # writing after addChange so that a rev is never missed,
            # but at once to avoid impact from later errors
            yield self._setCurrentRev(rev, oid=oid)
Esempio n. 13
0
    def addChange(self, who=None, files=None, comments=None, **kwargs):
        # deprecated in 0.9.0; will be removed in 1.0.0
        log.msg(
            "WARNING: change source is using deprecated "
            "self.master.addChange method; this method will disappear in "
            "Buildbot-1.0.0"
        )
        # handle positional arguments
        kwargs["who"] = who
        kwargs["files"] = files
        kwargs["comments"] = comments

        def handle_deprec(oldname, newname):
            if oldname not in kwargs:
                return
            old = kwargs.pop(oldname)
            if old is not None:
                if kwargs.get(newname) is None:
                    log.msg("WARNING: change source is using deprecated " "addChange parameter '%s'" % oldname)
                    return old
                raise TypeError("Cannot provide '%s' and '%s' to addChange" % (oldname, newname))
            return kwargs.get(newname)

        kwargs["author"] = handle_deprec("who", "author")
        kwargs["when_timestamp"] = handle_deprec("when", "when_timestamp")

        # timestamp must be an epoch timestamp now
        if isinstance(kwargs.get("when_timestamp"), datetime.datetime):
            kwargs["when_timestamp"] = datetime2epoch(kwargs["when_timestamp"])

        # unicodify stuff
        for k in (
            "comments",
            "author",
            "revision",
            "branch",
            "category",
            "revlink",
            "repository",
            "codebase",
            "project",
        ):
            if k in kwargs:
                kwargs[k] = ascii2unicode(kwargs[k])
        if kwargs.get("files"):
            kwargs["files"] = [ascii2unicode(f) for f in kwargs["files"]]
        if kwargs.get("properties"):
            kwargs["properties"] = dict((ascii2unicode(k), v) for k, v in iteritems(kwargs["properties"]))

        # pass the converted call on to the data API
        changeid = yield self.data.updates.addChange(**kwargs)

        # and turn that changeid into a change object, since that's what
        # callers expected (and why this method was deprecated)
        chdict = yield self.db.changes.getChange(changeid)
        change = yield changes.Change.fromChdict(self, chdict)
        defer.returnValue(change)
Esempio n. 14
0
 def getResultSummary(self):
     src = ascii2unicode(self.src, 'replace')
     dest = ascii2unicode(self.dest, 'replace')
     copy = u"%s to %s" % (src, dest)
     if self.results == SUCCESS:
         rv = u'Copied ' + copy
     else:
         rv = u'Copying ' + copy + ' failed.'
     return {u'step': rv}
Esempio n. 15
0
    def _process_changes(self, newRev, branch):
        """
        Read changes since last change.

        - Read list of commit hashes.
        - Extract details from each commit.
        - Add changes to database.
        """

        lastRev = self.lastRev.get(branch)
        self.lastRev[branch] = newRev
        if not lastRev:
            return

        # get the change list
        revListArgs = [r'--format=%H', '%s..%s' % (lastRev, newRev), '--']
        self.changeCount = 0
        results = yield self._dovccmd('log', revListArgs, path=self.workdir)

        # process oldest change first
        revList = results.split()
        revList.reverse()
        self.changeCount = len(revList)

        log.msg('gitpoller: processing %d changes: %s from "%s"' %
                (self.changeCount, revList, self.repourl))

        for rev in revList:
            dl = defer.DeferredList([
                self._get_commit_timestamp(rev),
                self._get_commit_author(rev),
                self._get_commit_files(rev),
                self._get_commit_comments(rev),
            ],
                                    consumeErrors=True)

            results = yield dl

            # check for failures
            failures = [r[1] for r in results if not r[0]]
            if failures:
                # just fail on the first error; they're probably all related!
                raise failures[0]

            timestamp, author, files, comments = [r[1] for r in results]
            yield self.master.data.updates.addChange(
                author=author,
                revision=unicode(rev),
                files=files,
                comments=comments,
                when_timestamp=timestamp,
                branch=ascii2unicode(self._removeHeads(branch)),
                category=self.category,
                project=self.project,
                repository=ascii2unicode(self.repourl),
                src=u'git')
Esempio n. 16
0
    def _process_changes(self, newRev, branch):
        """
        Read changes since last change.

        - Read list of commit hashes.
        - Extract details from each commit.
        - Add changes to database.
        """

        lastRev = self.lastRev.get(branch)
        self.lastRev[branch] = newRev
        if not lastRev:
            return

        # get the change list
        revListArgs = [r'--format=%H', '%s..%s' % (lastRev, newRev), '--']
        self.changeCount = 0
        results = yield self._dovccmd('log', revListArgs, path=self.workdir)

        # process oldest change first
        revList = results.split()
        revList.reverse()
        self.changeCount = len(revList)

        log.msg('gitpoller: processing %d changes: %s from "%s"'
                % (self.changeCount, revList, self.repourl))

        for rev in revList:
            dl = defer.DeferredList([
                self._get_commit_timestamp(rev),
                self._get_commit_author(rev),
                self._get_commit_files(rev),
                self._get_commit_comments(rev),
            ], consumeErrors=True)

            results = yield dl

            # check for failures
            failures = [r[1] for r in results if not r[0]]
            if failures:
                # just fail on the first error; they're probably all related!
                raise failures[0]

            timestamp, author, files, comments = [r[1] for r in results]
            yield self.master.data.updates.addChange(
                author=author,
                revision=unicode(rev),
                files=files,
                comments=comments,
                when_timestamp=timestamp,
                branch=ascii2unicode(self._removeHeads(branch)),
                category=self.category,
                project=self.project,
                repository=ascii2unicode(self.repourl),
                src=u'git')
Esempio n. 17
0
    def __init__(self, repourl, split_file=None,
                 svnuser=None, svnpasswd=None,
                 pollInterval=10 * 60, histmax=100,
                 svnbin='svn', revlinktmpl='', category=None,
                 project='', cachepath=None, pollinterval=-2,
                 extra_args=None, name=None, pollAtLaunch=False):

        # for backward compatibility; the parameter used to be spelled with 'i'
        if pollinterval != -2:
            pollInterval = pollinterval

        if name is None:
            name = repourl

        base.PollingChangeSource.__init__(self, name=name,
                                          pollInterval=pollInterval,
                                          pollAtLaunch=pollAtLaunch)

        if repourl.endswith("/"):
            repourl = repourl[:-1]  # strip the trailing slash
        self.repourl = repourl
        self.extra_args = extra_args
        self.split_file = split_file or split_file_alwaystrunk
        self.svnuser = svnuser
        self.svnpasswd = svnpasswd

        self.revlinktmpl = revlinktmpl

        # include environment variables required for ssh-agent auth
        self.environ = os.environ.copy()

        self.svnbin = svnbin
        self.histmax = histmax
        self._prefix = None
        self.category = category if callable(
            category) else util.ascii2unicode(category)
        self.project = util.ascii2unicode(project)

        self.cachepath = cachepath
        if self.cachepath and os.path.exists(self.cachepath):
            try:
                with open(self.cachepath, "r") as f:
                    self.last_change = int(f.read().strip())
                    log.msg("SVNPoller: SVNPoller(%s) setting last_change to %s" % (
                        self.repourl, self.last_change))
                # try writing it, too
                with open(self.cachepath, "w") as f:
                    f.write(str(self.last_change))
            except Exception:
                self.cachepath = None
                log.msg(("SVNPoller: SVNPoller(%s) cache file corrupt or unwriteable; " +
                         "skipping and not using") % self.repourl)
                log.err()
Esempio n. 18
0
    def __init__(self, repourl, branches=None, branch=None,
                 workdir=None, pollInterval=10 * 60,
                 gitbin='git', usetimestamps=True,
                 category=None, project=None,
                 pollinterval=-2, fetch_refspec=None,
                 encoding='utf-8', name=None, pollAtLaunch=False,
                 buildPushesWithNoCommits=False, only_tags=False):

        # for backward compatibility; the parameter used to be spelled with 'i'
        if pollinterval != -2:
            pollInterval = pollinterval

        if name is None:
            name = repourl

        base.PollingChangeSource.__init__(self, name=name,
                                          pollInterval=pollInterval,
                                          pollAtLaunch=pollAtLaunch)

        if project is None:
            project = ''

        if only_tags and (branch or branches):
            config.error("GitPoller: can't specify only_tags and branch/branches")
        if branch and branches:
            config.error("GitPoller: can't specify both branch and branches")
        elif branch:
            branches = [branch]
        elif not branches:
            if only_tags:
                branches = lambda ref: ref.startswith('refs/tags/')  # noqa: E731
            else:
                branches = ['master']

        self.repourl = repourl
        self.branches = branches
        self.encoding = encoding
        self.buildPushesWithNoCommits = buildPushesWithNoCommits
        self.gitbin = gitbin
        self.workdir = workdir
        self.usetimestamps = usetimestamps
        self.category = category if callable(
            category) else ascii2unicode(category)
        self.project = ascii2unicode(project)
        self.changeCount = 0
        self.lastRev = {}

        if fetch_refspec is not None:
            config.error("GitPoller: fetch_refspec is no longer supported. "
                         "Instead, only the given branches are downloaded.")

        if self.workdir is None:
            self.workdir = 'gitpoller-work'
Esempio n. 19
0
    def __init__(self, repourl, split_file=None,
                 svnuser=None, svnpasswd=None,
                 pollInterval=10 * 60, histmax=100,
                 svnbin='svn', revlinktmpl='', category=None,
                 project='', cachepath=None, pollinterval=-2,
                 extra_args=None, name=None, pollAtLaunch=False):

        # for backward compatibility; the parameter used to be spelled with 'i'
        if pollinterval != -2:
            pollInterval = pollinterval

        if name is None:
            name = repourl

        base.PollingChangeSource.__init__(self, name=name,
                                          pollInterval=pollInterval,
                                          pollAtLaunch=pollAtLaunch)

        if repourl.endswith("/"):
            repourl = repourl[:-1]  # strip the trailing slash
        self.repourl = repourl
        self.extra_args = extra_args
        self.split_file = split_file or split_file_alwaystrunk
        self.svnuser = svnuser
        self.svnpasswd = svnpasswd

        self.revlinktmpl = revlinktmpl

        # include environment variables required for ssh-agent auth
        self.environ = os.environ.copy()

        self.svnbin = svnbin
        self.histmax = histmax
        self._prefix = None
        self.category = category if callable(
            category) else util.ascii2unicode(category)
        self.project = util.ascii2unicode(project)

        self.cachepath = cachepath
        if self.cachepath and os.path.exists(self.cachepath):
            try:
                with open(self.cachepath, "r") as f:
                    self.last_change = int(f.read().strip())
                    log.msg("SVNPoller: SVNPoller(%s) setting last_change to %s" % (
                        self.repourl, self.last_change))
                # try writing it, too
                with open(self.cachepath, "w") as f:
                    f.write(str(self.last_change))
            except Exception:
                self.cachepath = None
                log.msg(("SVNPoller: SVNPoller(%s) cache file corrupt or unwriteable; " +
                         "skipping and not using") % self.repourl)
                log.err()
Esempio n. 20
0
    def _processChanges(self, unused_output):
        """Send info about pulled changes to the master and record current.

        HgPoller does the recording by moving the working dir to the head
        of the branch.
        We don't update the tree (unnecessary treatment and waste of space)
        instead, we simply store the current rev number in a file.
        Recall that hg rev numbers are local and incremental.
        """
        oid, current = yield self._getCurrentRev()
        # hg log on a range of revisions is never empty
        # also, if a numeric revision does not exist, a node may match.
        # Therefore, we have to check explicitly that branch head > current.
        head = yield self._getHead()
        if head is None:
            return
        elif current is not None and head <= current:
            return
        if current is None:
            # we could have used current = -1 convention as well (as hg does)
            revrange = '%d:%d' % (head, head)
        else:
            revrange = '%d:%s' % (current + 1, head)

        # two passes for hg log makes parsing simpler (comments is multi-lines)
        revListArgs = ['log', '-b', self.branch, '-r', revrange,
                       r'--template={rev}:{node}\n']
        results = yield utils.getProcessOutput(self.hgbin, revListArgs,
                                               path=self._absWorkdir(), env=os.environ, errortoo=False)

        revNodeList = [rn.split(':', 1) for rn in results.strip().split()]

        log.msg('hgpoller: processing %d changes: %r in %r'
                % (len(revNodeList), revNodeList, self._absWorkdir()))
        for rev, node in revNodeList:
            timestamp, author, files, comments = yield self._getRevDetails(
                node)
            yield self.master.data.updates.addChange(
                author=author,
                revision=text_type(node),
                files=files,
                comments=comments,
                when_timestamp=int(timestamp) if timestamp else None,
                branch=ascii2unicode(self.branch),
                category=ascii2unicode(self.category),
                project=ascii2unicode(self.project),
                repository=ascii2unicode(self.repourl),
                src=u'hg')
            # writing after addChange so that a rev is never missed,
            # but at once to avoid impact from later errors
            yield self._setCurrentRev(rev, oid=oid)
Esempio n. 21
0
    def addChange(self, who=None, files=None, comments=None, **kwargs):
        # deprecated in 0.9.0; will be removed in 1.0.0
        log.msg("WARNING: change source is using deprecated "
                "self.master.addChange method; this method will disappear in "
                "Buildbot-1.0.0")
        # handle positional arguments
        kwargs['who'] = who
        kwargs['files'] = files
        kwargs['comments'] = comments

        def handle_deprec(oldname, newname):
            if oldname not in kwargs:
                return
            old = kwargs.pop(oldname)
            if old is not None:
                if kwargs.get(newname) is None:
                    log.msg("WARNING: change source is using deprecated "
                            "addChange parameter '%s'" % oldname)
                    return old
                raise TypeError("Cannot provide '%s' and '%s' to addChange" %
                                (oldname, newname))
            return kwargs.get(newname)

        kwargs['author'] = handle_deprec("who", "author")
        kwargs['when_timestamp'] = handle_deprec("when", "when_timestamp")

        # timestamp must be an epoch timestamp now
        if isinstance(kwargs.get('when_timestamp'), datetime.datetime):
            kwargs['when_timestamp'] = datetime2epoch(kwargs['when_timestamp'])

        # unicodify stuff
        for k in ('comments', 'author', 'revision', 'branch', 'category',
                  'revlink', 'repository', 'codebase', 'project'):
            if k in kwargs:
                kwargs[k] = ascii2unicode(kwargs[k])
        if kwargs.get('files'):
            kwargs['files'] = [ascii2unicode(f) for f in kwargs['files']]
        if kwargs.get('properties'):
            kwargs['properties'] = dict(
                (ascii2unicode(k), v)
                for k, v in iteritems(kwargs['properties']))

        # pass the converted call on to the data API
        changeid = yield self.data.updates.addChange(**kwargs)

        # and turn that changeid into a change object, since that's what
        # callers expected (and why this method was deprecated)
        chdict = yield self.db.changes.getChange(changeid)
        change = yield changes.Change.fromChdict(self, chdict)
        defer.returnValue(change)
    def handleJobFile(self, filename, f):
        try:
            parsed_job = self.parseJob(f)
            builderNames = parsed_job['builderNames']
        except BadJobfile:
            log.msg("%s reports a bad jobfile in %s" % (self, filename))
            log.err()
            return defer.succeed(None)

        # Validate/fixup the builder names.
        builderNames = self.filterBuilderList(builderNames)
        if not builderNames:
            log.msg(
                "incoming Try job did not specify any allowed builder names")
            return defer.succeed(None)

        who = ""
        if parsed_job['who']:
            who = parsed_job['who']

        comment = ""
        if parsed_job['comment']:
            comment = parsed_job['comment']

        sourcestamp = dict(
            branch=parsed_job['branch'],
            codebase='',
            revision=parsed_job['baserev'],
            patch_body=parsed_job['patch_body'],
            patch_level=parsed_job['patch_level'],
            patch_author=who,
            patch_comment=comment,
            # TODO: can't set this remotely - #1769
            patch_subdir='',
            project=parsed_job['project'],
            repository=parsed_job['repository'])
        reason = u"'try' job"
        if parsed_job['who']:
            reason += u" by user %s" % ascii2unicode(parsed_job['who'])
        properties = parsed_job['properties']
        requested_props = Properties()
        requested_props.update(properties, "try build")

        return self.addBuildsetForSourceStamps(sourcestamps=[sourcestamp],
                                               reason=reason,
                                               external_idstring=ascii2unicode(
                                                   parsed_job['jobid']),
                                               builderNames=builderNames,
                                               properties=requested_props)
Esempio n. 23
0
    def handleJobFile(self, filename, f):
        try:
            parsed_job = self.parseJob(f)
            builderNames = parsed_job["builderNames"]
        except BadJobfile:
            log.msg("%s reports a bad jobfile in %s" % (self, filename))
            log.err()
            return defer.succeed(None)

        # Validate/fixup the builder names.
        builderNames = self.filterBuilderList(builderNames)
        if not builderNames:
            log.msg("incoming Try job did not specify any allowed builder names")
            return defer.succeed(None)

        who = ""
        if parsed_job["who"]:
            who = parsed_job["who"]

        comment = ""
        if parsed_job["comment"]:
            comment = parsed_job["comment"]

        sourcestamp = dict(
            branch=parsed_job["branch"],
            codebase="",
            revision=parsed_job["baserev"],
            patch_body=parsed_job["patch_body"],
            patch_level=parsed_job["patch_level"],
            patch_author=who,
            patch_comment=comment,
            patch_subdir="",  # TODO: can't set this remotely - #1769
            project=parsed_job["project"],
            repository=parsed_job["repository"],
        )
        reason = u"'try' job"
        if parsed_job["who"]:
            reason += u" by user %s" % ascii2unicode(parsed_job["who"])
        properties = parsed_job["properties"]
        requested_props = Properties()
        requested_props.update(properties, "try build")

        return self.addBuildsetForSourceStamps(
            sourcestamps=[sourcestamp],
            reason=reason,
            external_idstring=ascii2unicode(parsed_job["jobid"]),
            builderNames=builderNames,
            properties=requested_props,
        )
Esempio n. 24
0
 def addHTMLLog(self, name, html):
     logid = yield self.master.data.updates.addLog(self.stepid,
                                                   util.ascii2unicode(name), u'h')
     _log = self._newLog(name, u'h', logid)
     html = bytes2NativeString(html)
     yield _log.addContent(html)
     yield _log.finish()
Esempio n. 25
0
 def addCompleteLog(self, name, text):
     log.msg("addCompleteLog(%s)" % name)
     logid = yield self.master.data.updates.newLog(self.stepid,
                                                   util.ascii2unicode(name), u't')
     l = self._newLog(name, u't', logid)
     yield l.addContent(text)
     yield l.finish()
Esempio n. 26
0
 def addStep(self):
     # create and start the step, noting that the name may be altered to
     # ensure uniqueness
     self.stepid, self.number, self.name = yield self.master.data.updates.addStep(
         buildid=self.build.buildid,
         name=util.ascii2unicode(self.name))
     yield self.master.data.updates.startStep(self.stepid)
Esempio n. 27
0
 def addHTMLLog(self, name, html):
     logid = yield self.master.data.updates.addLog(self.stepid,
                                                   util.ascii2unicode(name),
                                                   u'h')
     l = self._newLog(name, u'h', logid)
     yield l.addContent(html)
     yield l.finish()
Esempio n. 28
0
    def __init__(self, repourl, branch='default',
                 workdir=None, pollInterval=10 * 60,
                 hgbin='hg', usetimestamps=True,
                 category=None, project='', pollinterval=-2,
                 encoding='utf-8', name=None, pollAtLaunch=False):

        # for backward compatibility; the parameter used to be spelled with 'i'
        if pollinterval != -2:
            pollInterval = pollinterval

        if name is None:
            name = "%s[%s]" % (repourl, branch)

        self.repourl = repourl
        self.branch = branch
        base.PollingChangeSource.__init__(
            self, name=name, pollInterval=pollInterval, pollAtLaunch=pollAtLaunch)
        self.encoding = encoding
        self.lastChange = time.time()
        self.lastPoll = time.time()
        self.hgbin = hgbin
        self.workdir = workdir
        self.usetimestamps = usetimestamps
        self.category = category if callable(category) else ascii2unicode(category)
        self.project = project
        self.commitInfo = {}
        self.initLock = defer.DeferredLock()

        if self.workdir is None:
            config.error("workdir is mandatory for now in HgPoller")
Esempio n. 29
0
 def addCompleteLog(self, name, text):
     log.msg("addCompleteLog(%s)" % name)
     logid = yield self.master.data.updates.addLog(self.stepid,
                                                   util.ascii2unicode(name), u't')
     l = self._newLog(name, u't', logid)
     yield l.addContent(text)
     yield l.finish()
Esempio n. 30
0
    def __init__(self,
                 name,
                 builderNames,
                 reason='',
                 createAbsoluteSourceStamps=False,
                 onlyIfChanged=False,
                 branch=NoBranch,
                 change_filter=None,
                 fileIsImportant=None,
                 onlyImportant=False,
                 **kwargs):
        base.BaseScheduler.__init__(self, name, builderNames, **kwargs)

        # tracking for when to start the next build
        self.lastActuated = None

        # A lock to make sure that each actuation occurs without interruption.
        # This lock governs actuateAt, actuateAtTimer, and actuateOk
        self.actuationLock = defer.DeferredLock()
        self.actuateOk = False
        self.actuateAt = None
        self.actuateAtTimer = None

        self.reason = util.ascii2unicode(reason % {'name': name})
        self.branch = branch
        self.change_filter = ChangeFilter.fromSchedulerConstructorArgs(
            change_filter=change_filter)
        self.createAbsoluteSourceStamps = createAbsoluteSourceStamps
        self.onlyIfChanged = onlyIfChanged
        if fileIsImportant and not callable(fileIsImportant):
            config.error("fileIsImportant must be a callable")
        self.fileIsImportant = fileIsImportant
        # If True, only important changes will be added to the buildset.
        self.onlyImportant = onlyImportant
        self._reactor = reactor  # patched by tests
Esempio n. 31
0
 def addCompleteLog(self, name, text):
     logid = yield self.master.data.updates.addLog(self.stepid,
                                                   util.ascii2unicode(name),
                                                   u't')
     _log = self._newLog(name, u't', logid)
     yield _log.addContent(text)
     yield _log.finish()
Esempio n. 32
0
    def reconfigServiceWithBuildbotConfig(self, new_config):
        if new_config.mq.get('type', 'simple') != "wamp":
            return
        wamp = new_config.mq
        log.msg("Starting wamp with config: %r", wamp)
        router_url = wamp.get('router_url', None)

        # This is not a good idea to allow people to switch the router via reconfig
        # how would we continue the current transactions ?
        # how would we tell the workers to switch router ?
        if self.app is not None and self.router_url != router_url:
            raise ValueError(
                "Cannot use different wamp router url when reconfiguring")
        if router_url is None:
            return
        self.router_url = router_url
        self.app = self.serviceClass(
            url=self.router_url,
            extra=dict(master=self.master, parent=self),
            realm=ascii2unicode(wamp.get('realm', 'buildbot')),
            make=make
        )
        wamp_debug_level = wamp.get('wamp_debug_level', 'error')
        txaio.set_global_log_level(wamp_debug_level)
        yield self.app.setServiceParent(self)
        yield service.ReconfigurableServiceMixin.reconfigServiceWithBuildbotConfig(self,
                                                                                   new_config)
Esempio n. 33
0
 def addHTMLLog(self, name, html):
     logid = yield self.master.data.updates.addLog(self.stepid,
                                                   util.ascii2unicode(name), u'h')
     _log = self._newLog(name, u'h', logid)
     html = bytes2NativeString(html)
     yield _log.addContent(html)
     yield _log.finish()
Esempio n. 34
0
 def addStep(self):
     # create and start the step, noting that the name may be altered to
     # ensure uniqueness
     self.name = yield self.build.render(self.name)
     self.stepid, self.number, self.name = yield self.master.data.updates.addStep(
         buildid=self.build.buildid, name=util.ascii2unicode(self.name))
     yield self.master.data.updates.startStep(self.stepid)
Esempio n. 35
0
    def __init__(self, name, shouldntBeSet=NotSet, treeStableTimer=None,
                 builderNames=None, branch=NotABranch, branches=NotABranch,
                 fileIsImportant=None, categories=None,
                 reason="The %(classname)s scheduler named '%(name)s' triggered this build",
                 change_filter=None, onlyImportant=False, **kwargs):
        if shouldntBeSet is not self.NotSet:
            config.error(
                "pass arguments to schedulers using keyword arguments")
        if fileIsImportant and not callable(fileIsImportant):
            config.error(
                "fileIsImportant must be a callable")

        # initialize parent classes
        base.BaseScheduler.__init__(self, name, builderNames, **kwargs)

        self.treeStableTimer = treeStableTimer
        if fileIsImportant is not None:
            self.fileIsImportant = fileIsImportant
        self.onlyImportant = onlyImportant
        self.change_filter = self.getChangeFilter(branch=branch,
                                                  branches=branches, change_filter=change_filter,
                                                  categories=categories)

        # the IDelayedCall used to wake up when this scheduler's
        # treeStableTimer expires.
        self._stable_timers = defaultdict(lambda: None)
        self._stable_timers_lock = defer.DeferredLock()

        self.reason = util.ascii2unicode(reason % {
            'name': name, 'classname': self.__class__.__name__
        })
Esempio n. 36
0
    def __init__(self, name, shouldntBeSet=NotSet, treeStableTimer=None,
                 builderNames=None, branch=NotABranch, branches=NotABranch,
                 fileIsImportant=None, categories=None,
                 reason="The %(classname)s scheduler named '%(name)s' triggered this build",
                 change_filter=None, onlyImportant=False, **kwargs):
        if shouldntBeSet is not self.NotSet:
            config.error(
                "pass arguments to schedulers using keyword arguments")
        if fileIsImportant and not callable(fileIsImportant):
            config.error(
                "fileIsImportant must be a callable")

        # initialize parent classes
        base.BaseScheduler.__init__(self, name, builderNames, **kwargs)

        self.treeStableTimer = treeStableTimer
        if fileIsImportant is not None:
            self.fileIsImportant = fileIsImportant
        self.onlyImportant = onlyImportant
        self.change_filter = self.getChangeFilter(branch=branch,
                                                  branches=branches, change_filter=change_filter,
                                                  categories=categories)

        # the IDelayedCall used to wake up when this scheduler's
        # treeStableTimer expires.
        self._stable_timers = defaultdict(lambda: None)
        self._stable_timers_lock = defer.DeferredLock()

        self.reason = util.ascii2unicode(reason % {
            'name': name, 'classname': self.__class__.__name__
        })
Esempio n. 37
0
    def addChangeFromEvent(self, properties, event):

        if "change" in event and "patchSet" in event:
            event_change = event["change"]
            username = event_change["owner"].get("username", u"unknown")
            return self.addChange({
                'author':
                "%s <%s>" %
                (event_change["owner"].get("name", username),
                 event_change["owner"].get("email", u'*****@*****.**')),
                'project':
                util.ascii2unicode(event_change["project"]),
                'repository':
                u"ssh://%s@%s:%s/%s" %
                (self.username, self.gerritserver, self.gerritport,
                 event_change["project"]),
                'branch':
                self.getGroupingPolicyFromEvent(event),
                'revision':
                event["patchSet"]["revision"],
                'revlink':
                event_change["url"],
                'comments':
                event_change["subject"],
                'files': [u"unknown"],
                'category':
                event["type"],
                'properties':
                properties
            })
Esempio n. 38
0
 def addHTMLLog(self, name, html):
     log.msg("addHTMLLog(%s)" % name)
     logid = yield self.master.data.updates.addLog(self.stepid,
                                                   util.ascii2unicode(name), u'h')
     l = self._newLog(name, u'h', logid)
     yield l.addContent(html)
     yield l.finish()
Esempio n. 39
0
    def reconfigServiceWithBuildbotConfig(self, new_config):
        if new_config.mq.get('type', 'simple') != "wamp":
            return
        wamp = new_config.mq
        log.msg("Starting wamp with config: %r", wamp)
        router_url = wamp.get('router_url', None)

        # This is not a good idea to allow people to switch the router via reconfig
        # how would we continue the current transactions ?
        # how would we tell the workers to switch router ?
        if self.app is not None and self.router_url != router_url:
            raise ValueError(
                "Cannot use different wamp router url when reconfiguring")
        if router_url is None:
            return
        self.router_url = router_url
        self.app = self.serviceClass(url=self.router_url,
                                     extra=dict(master=self.master,
                                                parent=self),
                                     realm=ascii2unicode(
                                         wamp.get('realm', 'buildbot')),
                                     make=make)
        wamp_debug_level = wamp.get('wamp_debug_level', 'error')
        txaio.set_global_log_level(wamp_debug_level)
        yield self.app.setServiceParent(self)
        yield service.ReconfigurableServiceMixin.reconfigServiceWithBuildbotConfig(
            self, new_config)
Esempio n. 40
0
    def __init__(self, name, builderNames, properties={}, reason='',
                 createAbsoluteSourceStamps=False, onlyIfChanged=False,
                 branch=NoBranch, change_filter=None, fileIsImportant=None,
                 onlyImportant=False, **kwargs):
        base.BaseScheduler.__init__(self, name, builderNames, properties,
                                    **kwargs)

        # tracking for when to start the next build
        self.lastActuated = None

        # A lock to make sure that each actuation occurs without interruption.
        # This lock governs actuateAt, actuateAtTimer, and actuateOk
        self.actuationLock = defer.DeferredLock()
        self.actuateOk = False
        self.actuateAt = None
        self.actuateAtTimer = None

        self.reason = util.ascii2unicode(reason % {'name': name})
        self.branch = branch
        self.change_filter = ChangeFilter.fromSchedulerConstructorArgs(change_filter=change_filter)
        self.createAbsoluteSourceStamps = createAbsoluteSourceStamps
        self.onlyIfChanged = onlyIfChanged
        if fileIsImportant and not callable(fileIsImportant):
            config.error(
                "fileIsImportant must be a callable")
        self.fileIsImportant = fileIsImportant
        # If True, only important changes will be added to the buildset.
        self.onlyImportant = onlyImportant
        self._reactor = reactor  # patched by tests
Esempio n. 41
0
    def addChangeFromEvent(self, properties, event):

        if "change" in event and "patchSet" in event:
            event_change = event["change"]
            return self.addChange({
                'author':
                _gerrit_user_to_author(event_change["owner"]),
                'project':
                util.ascii2unicode(event_change["project"]),
                'repository':
                u"%s/%s" % (self.gitBaseURL, event_change["project"]),
                'branch':
                self.getGroupingPolicyFromEvent(event),
                'revision':
                event["patchSet"]["revision"],
                'revlink':
                event_change["url"],
                'comments':
                event_change["subject"],
                'files': [u"unknown"],
                'category':
                event["type"],
                'properties':
                properties
            })
Esempio n. 42
0
    def __init__(self, repourl, branch='default',
                 workdir=None, pollInterval=10 * 60,
                 hgbin='hg', usetimestamps=True,
                 category=None, project='', pollinterval=-2,
                 encoding='utf-8', name=None, pollAtLaunch=False):

        # for backward compatibility; the parameter used to be spelled with 'i'
        if pollinterval != -2:
            pollInterval = pollinterval

        if name is None:
            name = "%s[%s]" % (repourl, branch)

        self.repourl = repourl
        self.branch = branch
        base.PollingChangeSource.__init__(
            self, name=name, pollInterval=pollInterval, pollAtLaunch=pollAtLaunch)
        self.encoding = encoding
        self.lastChange = time.time()
        self.lastPoll = time.time()
        self.hgbin = hgbin
        self.workdir = workdir
        self.usetimestamps = usetimestamps
        self.category = category if callable(
            category) else ascii2unicode(category)
        self.project = project
        self.commitInfo = {}
        self.initLock = defer.DeferredLock()

        if self.workdir is None:
            config.error("workdir is mandatory for now in HgPoller")
Esempio n. 43
0
    def reconfigService(self,
                        owner,
                        repo,
                        branches=None,
                        pollInterval=10 * 60,
                        category=None,
                        baseURL=None,
                        project='',
                        pullrequest_filter=True,
                        token=None,
                        pollAtLaunch=False,
                        repository_link="https",
                        **kwargs):
        yield base.ReconfigurablePollingChangeSource.reconfigService(
            self, name=self.name, **kwargs)

        if baseURL is None:
            baseURL = HOSTED_BASE_URL
        if baseURL.endswith('/'):
            baseURL = baseURL[:-1]

        http_headers = {'User-Agent': 'Buildbot'}
        if token is not None:
            http_headers.update({'Authorization': 'token ' + token})

        self._http = yield httpclientservice.HTTPClientService.getService(
            self.master, baseURL, headers=http_headers)

        if not branches:
            branches = ['master']

        self.token = token
        self.owner = owner
        self.repo = repo
        self.branches = branches
        self.project = project
        self.pollInterval = pollInterval
        self.repository_link = link_urls[repository_link]

        if callable(pullrequest_filter):
            self.pullrequest_filter = pullrequest_filter
        else:
            self.pullrequest_filter = (lambda _: pullrequest_filter)

        self.category = category if callable(category) else ascii2unicode(
            category)
        self.project = ascii2unicode(project)
Esempio n. 44
0
    def _maybeStartBuildsOnBuilder(self, bldr, _reactor=reactor):
        # create a chooser to give us our next builds
        # this object is temporary and will go away when we're done
        bc = self.createBuildChooser(bldr, self.master)

        while True:
            slave, breqs = yield bc.chooseNextBuild()
            if not slave or not breqs:
                break

            # claim brid's
            brids = [br.id for br in breqs]
            claimed_at_epoch = _reactor.seconds()
            claimed_at = epoch2datetime(claimed_at_epoch)
            if not (yield self.master.data.updates.claimBuildRequests(
                    brids, claimed_at=claimed_at)):
                # some brids were already claimed, so start over
                bc = self.createBuildChooser(bldr, self.master)
                continue

            # the claim was successful, so publish a message for each brid
            for brid in brids:
                # TODO: inefficient..
                brdict = yield self.master.db.buildrequests.getBuildRequest(
                    brid)
                key = ('buildsets', str(brdict['buildsetid']), 'builders',
                       str(-1), 'buildrequests', str(brdict['buildrequestid']),
                       'claimed')
                msg = dict(
                    bsid=brdict['buildsetid'],
                    brid=brdict['buildrequestid'],
                    buildername=brdict['buildername'],
                    builderid=-1,
                    # TODO:
                    # claimed_at=claimed_at_epoch,
                    # masterid=masterid)
                )
                self.master.mq.produce(key, msg)

            buildStarted = yield bldr.maybeStartBuild(slave, breqs)
            if not buildStarted:
                yield self.master.data.updates.unclaimBuildRequests(brids)

                for breq in breqs:
                    bsid = breq.bsid
                    buildername = ascii2unicode(breq.buildername)
                    brid = breq.id
                    key = ('buildsets', str(brdict['buildsetid']), 'builders',
                           str(-1), 'buildrequests',
                           str(brdict['buildrequestid']), 'unclaimed')
                    msg = dict(brid=brid,
                               bsid=bsid,
                               buildername=buildername,
                               builderid=-1)
                    self.master.mq.produce(key, msg)

                # and try starting builds again.  If we still have a working slave,
                # then this may re-claim the same buildrequests
                self.botmaster.maybeStartBuildsForBuilder(self.name)
Esempio n. 45
0
    def reconfigService(self,
                        owner,
                        repo,
                        branches=None,
                        pollInterval=10 * 60,
                        category=None,
                        baseURL=None,
                        project='',
                        pullrequest_filter=True,
                        token=None,
                        pollAtLaunch=False,
                        magic_link=False,
                        repository_type="https",
                        **kwargs):
        yield base.ReconfigurablePollingChangeSource.reconfigService(
            self, name=self.name, **kwargs)

        if baseURL is None:
            baseURL = HOSTED_BASE_URL
        if baseURL.endswith('/'):
            baseURL = baseURL[:-1]

        http_headers = {'User-Agent': 'Buildbot'}
        if token is not None:
            http_headers.update({'Authorization': 'token ' + token})

        self._http = yield httpclientservice.HTTPClientService.getService(
            self.master, baseURL, headers=http_headers)

        self.token = token
        self.owner = owner
        self.repo = repo
        self.branches = branches
        self.project = project
        self.pollInterval = pollInterval
        self.repository_type = link_urls[repository_type]
        self.magic_link = magic_link

        if callable(pullrequest_filter):
            self.pullrequest_filter = pullrequest_filter
        else:
            self.pullrequest_filter = (lambda _: pullrequest_filter)

        self.category = category if callable(category) else ascii2unicode(
            category)
        self.project = ascii2unicode(project)
Esempio n. 46
0
    def addLog(self, name, type="s", logEncoding=None):
        d = self.master.data.updates.addLog(self.stepid, util.ascii2unicode(name), unicode(type))

        @d.addCallback
        def newLog(logid):
            return self._newLog(name, type, logid, logEncoding)

        return d
Esempio n. 47
0
    def __init__(self, name):
        # service.Service.__init__(self)  # there is none, oddly

        name = util.ascii2unicode(name)
        self.setName(name)

        self.serviceid = None
        self.active = False
Esempio n. 48
0
    def __init__(self, name):
        # service.Service.__init__(self)  # there is none, oddly

        name = util.ascii2unicode(name)
        self.setName(name)

        self.serviceid = None
        self.active = False
    def _maybeStartBuildsOnBuilder(self, bldr, _reactor=reactor):
        # create a chooser to give us our next builds
        # this object is temporary and will go away when we're done

        bc = self.createBuildChooser(bldr, self.master)

        while True:
            slave, breqs = yield bc.chooseNextBuild()
            if not slave or not breqs:
                break

            # claim brid's
            brids = [br.id for br in breqs]
            claimed_at_epoch = _reactor.seconds()
            claimed_at = epoch2datetime(claimed_at_epoch)
            if not (yield self.master.data.updates.claimBuildRequests(
                    brids, claimed_at=claimed_at)):
                # some brids were already claimed, so start over
                bc = self.createBuildChooser(bldr, self.master)
                continue

            # the claim was successful, so publish a message for each brid
            for brid in brids:
                # TODO: inefficient..
                brdict = yield self.master.db.buildrequests.getBuildRequest(brid)
                key = ('buildsets', str(brdict['buildsetid']),
                       'builders', str(-1),
                       'buildrequests', str(brdict['buildrequestid']), 'claimed')
                msg = dict(
                    bsid=brdict['buildsetid'],
                    brid=brdict['buildrequestid'],
                    buildername=brdict['buildername'],
                    builderid=-1,
                    # TODO:
                    # claimed_at=claimed_at_epoch,
                    # masterid=masterid)
                )
                self.master.mq.produce(key, msg)

            buildStarted = yield bldr.maybeStartBuild(slave, breqs)

            if not buildStarted:
                yield self.master.data.updates.unclaimBuildRequests(brids)

                for breq in breqs:
                    bsid = breq.bsid
                    buildername = ascii2unicode(breq.buildername)
                    brid = breq.id
                    key = ('buildsets', str(brdict['buildsetid']),
                           'builders', str(-1),
                           'buildrequests', str(brdict['buildrequestid']), 'unclaimed')
                    msg = dict(brid=brid, bsid=bsid, buildername=buildername,
                               builderid=-1)
                    self.master.mq.produce(key, msg)

                # and try starting builds again.  If we still have a working slave,
                # then this may re-claim the same buildrequests
                self.botmaster.maybeStartBuildsForBuilder(self.name)
Esempio n. 50
0
    def addLog(self, name, type='s', logEncoding=None):
        d = self.master.data.updates.addLog(self.stepid,
                                            util.ascii2unicode(name),
                                            unicode(type))

        @d.addCallback
        def newLog(logid):
            return self._newLog(name, type, logid, logEncoding)
        return d
    def __init__(self,
                 p4port=None,
                 p4user=None,
                 p4passwd=None,
                 p4base='//',
                 p4bin='p4',
                 split_file=lambda branchfile: (None, branchfile),
                 pollInterval=60 * 10,
                 histmax=None,
                 pollinterval=-2,
                 encoding='utf8',
                 project=None,
                 name=None,
                 use_tickets=False,
                 ticket_login_interval=60 * 60 * 24,
                 server_tz=None,
                 pollAtLaunch=False):

        # for backward compatibility; the parameter used to be spelled with 'i'
        if pollinterval != -2:
            pollInterval = pollinterval

        if name is None:
            name = "P4Source:%s:%s" % (p4port, p4base)

        base.PollingChangeSource.__init__(self,
                                          name=name,
                                          pollInterval=pollInterval,
                                          pollAtLaunch=pollAtLaunch)

        if project is None:
            project = ''

        if use_tickets and not p4passwd:
            config.error(
                "You need to provide a P4 password to use ticket authentication"
            )

        self.p4port = p4port
        self.p4user = p4user
        self.p4passwd = p4passwd
        self.p4base = p4base
        self.p4bin = p4bin
        self.split_file = split_file
        self.encoding = encoding
        self.project = util.ascii2unicode(project)
        self.use_tickets = use_tickets
        self.ticket_login_interval = ticket_login_interval
        self.server_tz = dateutil.tz.gettz(server_tz) if server_tz else None
        if server_tz is not None and self.server_tz is None:
            raise P4PollerError(
                "Failed to get timezone from server_tz string '{}'".format(
                    server_tz))

        self._ticket_passwd = None
        self._ticket_login_counter = 0
Esempio n. 52
0
 def __init__(self, *args, **kwargs):
     name = kwargs.pop("name", None)
     if name is not None:
         self.name = ascii2unicode(name)
     self.checkConfig(*args, **kwargs)
     if self.name is None:
         raise ValueError("%s: must pass a name to constructor" % type(self))
     self._config_args = args
     self._config_kwargs = kwargs
     AsyncMultiService.__init__(self)
Esempio n. 53
0
 def __init__(self, *args, **kwargs):
     name = kwargs.pop("name", None)
     if name is not None:
         self.name = ascii2unicode(name)
     self.checkConfig(*args, **kwargs)
     if self.name is None:
         raise ValueError("%s: must pass a name to constructor" % type(self))
     self._config_args = args
     self._config_kwargs = kwargs
     AsyncMultiService.__init__(self)
Esempio n. 54
0
    def __init__(self,
                 p4port=None,
                 p4user=None,
                 p4passwd=None,
                 p4base='//',
                 p4bin='p4',
                 split_file=lambda branchfile: (None, branchfile),
                 pollInterval=60 * 10,
                 histmax=None,
                 pollinterval=-2,
                 encoding='utf8',
                 project=None,
                 name=None,
                 use_tickets=False,
                 ticket_login_interval=60 * 60 * 24,
                 server_tz=None,
                 pollAtLaunch=False):

        # for backward compatibility; the parameter used to be spelled with 'i'
        if pollinterval != -2:
            pollInterval = pollinterval

        if name is None:
            name = "P4Source:%s:%s" % (p4port, p4base)

        base.PollingChangeSource.__init__(
            self,
            name=name,
            pollInterval=pollInterval,
            pollAtLaunch=pollAtLaunch)

        if project is None:
            project = ''

        if use_tickets and not p4passwd:
            config.error(
                "You need to provide a P4 password to use ticket authentication"
            )

        self.p4port = p4port
        self.p4user = p4user
        self.p4passwd = p4passwd
        self.p4base = p4base
        self.p4bin = p4bin
        self.split_file = split_file
        self.encoding = encoding
        self.project = util.ascii2unicode(project)
        self.use_tickets = use_tickets
        self.ticket_login_interval = ticket_login_interval
        self.server_tz = dateutil.tz.gettz(server_tz) if server_tz else None

        self._ticket_passwd = None
        self._ticket_login_counter = 0
Esempio n. 55
0
    def getOldestRequestTime(self):
        """Returns the submitted_at of the oldest unclaimed build request for
        this builder, or None if there are no build requests.

        @returns: datetime instance or None, via Deferred
        """
        unclaimed = yield self.master.data.get(('builders', ascii2unicode(self.name), 'buildrequests'),
                                               [resultspec.Filter('claimed', 'eq', [False])])
        if unclaimed:
            unclaimed = sorted([brd['submitted_at'] for brd in unclaimed])
            defer.returnValue(unclaimed[0])
        else:
            defer.returnValue(None)
Esempio n. 56
0
    def getOldestRequestTime(self):
        """Returns the submitted_at of the oldest unclaimed build request for
        this builder, or None if there are no build requests.

        @returns: datetime instance or None, via Deferred
        """
        unclaimed = yield self.master.data.get(('builders', ascii2unicode(self.name), 'buildrequests'),
                                               [resultspec.Filter('claimed', 'eq', [False])])
        if unclaimed:
            unclaimed = sorted([brd['submitted_at'] for brd in unclaimed])
            defer.returnValue(unclaimed[0])
        else:
            defer.returnValue(None)
Esempio n. 57
0
def forceIdentifier(maxLength, str):
    if not isinstance(str, string_types):
        raise TypeError("%r cannot be coerced to an identifier" % (str,))

    # usually ascii2unicode can handle it
    str = util.ascii2unicode(str)
    if isIdentifier(maxLength, str):
        return str

    # trim to length and substitute out invalid characters
    str = str[:maxLength]
    str = initial_re.sub('_', str)
    str = subsequent_re.subn('_', str)[0]
    return str
Esempio n. 58
0
    def reconfigServiceBuilders(self, new_config):

        timer = metrics.Timer("BotMaster.reconfigServiceBuilders")
        timer.start()

        # arrange builders by name
        old_by_name = dict([(b.name, b)
                            for b in list(self)
                            if isinstance(b, Builder)])
        old_set = set(old_by_name)
        new_by_name = dict([(bc.name, bc)
                            for bc in new_config.builders])
        new_set = set(new_by_name)

        # calculate new builders, by name, and removed builders
        removed_names, added_names = util.diffSets(old_set, new_set)

        if removed_names or added_names:
            log.msg("adding %d new builders, removing %d" %
                    (len(added_names), len(removed_names)))

            for n in removed_names:
                builder = old_by_name[n]

                del self.builders[n]
                builder.master = None
                builder.botmaster = None

                # pylint: disable=cell-var-from-loop
                yield defer.maybeDeferred(lambda:
                                          builder.disownServiceParent())

            for n in added_names:
                builder = Builder(n)
                self.builders[n] = builder

                builder.botmaster = self
                builder.master = self.master
                yield builder.setServiceParent(self)

        self.builderNames = list(self.builders)

        yield self.master.data.updates.updateBuilderList(
            self.master.masterid,
            [util.ascii2unicode(n) for n in self.builderNames])

        metrics.MetricCountEvent.log("num_builders",
                                     len(self.builders), absolute=True)

        timer.stop()