Пример #1
0
    def create_two_worker_two_builder_lock_config(self, mode):
        stepcontrollers = [BuildStepController(), BuildStepController()]

        master_lock = util.MasterLock("lock1", maxCount=1)

        config_dict = {
            'builders': [
                BuilderConfig(name='builder1',
                              workernames=['worker1'],
                              factory=BuildFactory([stepcontrollers[0].step]),
                              locks=[master_lock.access(mode)]),
                BuilderConfig(name='builder2',
                              workernames=['worker2'],
                              factory=BuildFactory([stepcontrollers[1].step]),
                              locks=[master_lock.access(mode)]),
            ],
            'workers': [
                self.createLocalWorker('worker1'),
                self.createLocalWorker('worker2'),
            ],
            'protocols': {
                'null': {}
            },
            'multiMaster':
            True,
        }
        yield self.setup_master(config_dict)
        builder_ids = [
            (yield self.master.data.updates.findBuilderId('builder1')),
            (yield self.master.data.updates.findBuilderId('builder2')),
        ]

        return stepcontrollers, builder_ids
Пример #2
0
    def __init__(self,
                 name,
                 baseurl,
                 branch,
                 *,
                 nightly=None,
                 enable_force=True,
                 giturl=None,
                 verbose_build=False,
                 description=None):
        super().__init__(name)
        if giturl is None:
            giturl = baseurl + ".git"
        self.baseurl = baseurl
        self.giturl = giturl
        self.branch = branch
        self.nightly = nightly
        self.enable_force = enable_force
        self.verbose_build = verbose_build
        self.description_ = description

        if self.CONFIGURE_GENERATED_FILE is None:
            raise Exception("Invalid CONFIGURE_GENERATED_FILE setting")

        # Lock used to avoid writing source code when it is read by another task
        self.lock_src = util.MasterLock("src-{0}".format(self.name),
                                        maxCount=sys.maxsize)
        self.buildNames()
Пример #3
0
def masterConfig():
    c = {}
    from buildbot.config import BuilderConfig
    from buildbot.process.factory import BuildFactory
    from buildbot.plugins import schedulers
    from buildbot.plugins import util
    lock = util.MasterLock("lock")

    c['schedulers'] = [
        schedulers.AnyBranchScheduler(
            name="sched",
            builderNames=["testy", "testy2", "testy3"]),
        ]
    f = BuildFactory()
    lockstep = LockedStep(locks=[lock.access('exclusive')])
    f.addStep(lockstep)
    # assert lockstep._factory.buildStep() == lockstep._factory.buildStep()
    c['builders'] = [
        BuilderConfig(name="testy",
                      workernames=["local1"],
                      factory=f),
        BuilderConfig(name="testy2",
                      workernames=["local1"],
                      factory=f),
        BuilderConfig(name="testy3",
              workernames=["local1"],
              factory=f)]

    return c
Пример #4
0
def make_builders(repo_url,
                  worker_configs,
                  snapshots_dir=None,
                  snapshots_url=None,
                  snapshots_default_max=2):
    # TODO: Use one lock per container host; for now we have only one container
    # host so we just use a single lock. One lock per worker is not good enough
    # since many workers may run on a single container host and this should block
    # on host container resources.
    # "build1" is the name used for the container host in docker-compose.yml
    master_lock = util.MasterLock("build1")

    builders = []
    for (worker_name, worker_config) in iteritems(worker_configs):
        for (builder_name,
             builder_config) in iteritems(worker_config["builders"]):
            # Workers with only one builder give it a blank name
            if not builder_name:
                builder_name = worker_name
            else:
                builder_name = "%s-%s" % (worker_name, builder_name)

            builders.append(
                make_builder_config(
                    repo_url=repo_url,
                    name=builder_name,
                    worker_name=worker_name,
                    config=builder_config,
                    lock=master_lock,
                    snapshots_dir=snapshots_dir,
                    snapshots_url=snapshots_url,
                    snapshots_default_max=snapshots_default_max))

    return builders
Пример #5
0
    platform.configureargs.append("--host=android-{0}".format(scummvm_target))
    platform.configureargs.append("--enable-debug")
    platform.packaging_cmd = "androiddistdebug"
    platform.built_files = {
        builds.ScummVMBuild: ["debug"],
    }
    platform.archiveext = "zip"
    platform.testable = False

    platform.description = description
    platform.icon = "android"

    register_platform(platform)


android.lock = util.MasterLock("android")
android(suffix="arm",
        scummvm_target="arm-v7a",
        ndk_target="arm-linux-androideabi",
        cxx_target="armv7a-linux-androideabi",
        abi_version=16,
        description="Android (ARM)")
android(suffix="arm64",
        scummvm_target="arm64-v8a",
        ndk_target="aarch64-linux-android",
        cxx_target="aarch64-linux-android",
        abi_version=21,
        description="Android (ARM 64\xa0bits)")
android(suffix="x86",
        scummvm_target="x86",
        ndk_target="i686-linux-android",
Пример #6
0
force_scheduler = schedulers.ForceScheduler(
    name="force", builderNames=force_builder_names
)

c["schedulers"] = []
c["schedulers"].append(build_scheduler)
c["schedulers"].append(director_scheduler)
c["schedulers"].append(force_scheduler)

####### BUILDERS

# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:
# what steps, and which workers can execute them.  Note that any particular build will
# only take place on one worker.

build_lock = util.MasterLock("Build")


c["builders"] = []
c["builders"].extend(generate_builder(target) for target in test_targets)
c["builders"].append(
    util.BuilderConfig(
        name="build",
        workernames=["director-worker"],
        factory=build_factory,
        locks=[build_lock.access("exclusive")],
    )
)

lingo_factory = util.BuildFactory()
lingo_factory.addStep(checkout_step)
Пример #7
0
    def test_builder_lock_release_wakes_builds_for_another_builder(self):
        """
        If a builder locks a master lock then the build request distributor
        must retry running any buildrequests that might have been not scheduled
        due to unavailability of that lock when the lock becomes available.
        """

        stepcontroller1 = BuildStepController()
        stepcontroller2 = BuildStepController()

        master_lock = util.MasterLock("lock1", maxCount=1)

        config_dict = {
            'builders': [
                BuilderConfig(name='builder1',
                              workernames=['worker1'],
                              factory=BuildFactory([stepcontroller1.step]),
                              locks=[master_lock.access('counting')]),
                BuilderConfig(name='builder2',
                              workernames=['worker2'],
                              factory=BuildFactory([stepcontroller2.step]),
                              locks=[master_lock.access('counting')]),
            ],
            'workers': [
                self.createLocalWorker('worker1'),
                self.createLocalWorker('worker2'),
            ],
            'protocols': {
                'null': {}
            },
            'multiMaster':
            True,
        }
        master = yield self.getMaster(config_dict)
        builder1_id = yield master.data.updates.findBuilderId('builder1')
        builder2_id = yield master.data.updates.findBuilderId('builder2')

        # start two builds and verify that a second build starts after the
        # first is finished
        yield self.createBuildrequest(master, [builder1_id])
        yield self.createBuildrequest(master, [builder2_id])

        builds = yield master.data.get(("builds", ))
        self.assertEqual(len(builds), 1)
        self.assertEqual(builds[0]['results'], None)
        self.assertEqual(builds[0]['builderid'], builder1_id)

        stepcontroller1.finish_step(SUCCESS)

        # execute Build.releaseLocks which is called eventually
        yield flushEventualQueue()

        builds = yield master.data.get(("builds", ))
        self.assertEqual(len(builds), 2)
        self.assertEqual(builds[0]['results'], SUCCESS)
        self.assertEqual(builds[1]['results'], None)
        self.assertEqual(builds[1]['builderid'], builder2_id)

        stepcontroller2.finish_step(SUCCESS)

        builds = yield master.data.get(("builds", ))
        self.assertEqual(len(builds), 2)
        self.assertEqual(builds[0]['results'], SUCCESS)
        self.assertEqual(builds[1]['results'], SUCCESS)
Пример #8
0
def factory(constructicon_name, builder_name, deps, commands, upload, zip,
            unzip, url, resources):
    deps = sorted(deps)

    def work_dir_renderer(*suffix, **kwargs):
        @util.renderer
        def work_dir(properties):
            if kwargs.get('log', False):
                log.msg('properties are: ' +
                        pprint.pformat(properties.asDict()))
                log.msg('sourcestamps are: ' + pprint.pformat(
                    [(i.repository, i.branch, i.revision)
                     for i in properties.getBuild().getAllSourceStamps()]))
            sep = '/'
            if all_slaves[properties['slavename']].get('platform',
                                                       0) == 'windows':
                sep = '\\'
            return sep.join(('..', 'constructicons', constructicon_name,
                             constructicon_name) + suffix)

        return work_dir

    result = util.BuildFactory()

    def git_step(repo_url, work_dir, env):
        return common.sane_step(
            steps.Git,
            repourl=repo_url,
            codebase=repo_url,
            workdir=work_dir,
            mode='incremental',
            env=env,
            warnOnWarnings=False,
        )

    def extract_parameters(dict):
        return {
            i[len(parameter_prefix):]: str(j[0])
            for i, j in dict.items() if i.startswith(parameter_prefix)
        }

    @util.renderer
    def env(properties):
        return extract_parameters(properties.asDict())

    def format(command):
        @util.renderer
        def f(properties):
            return command.format(**extract_parameters(properties.asDict()))

        return f

    @util.renderer
    def get_command(properties):
        revisions = ''
        for i in properties.getBuild().getAllSourceStamps():
            revision = None
            if i.revision: revision = i.revision
            elif i.branch: revision = i.branch
            if revision: revisions += ' {}:{}'.format(i.codebase, revision)
        if revisions: revisions = ' -r' + revisions
        return common.constructicon_slave_go('g {}{}'.format(
            builder_name,
            revisions,
        ))

    for resource in resources:
        if resource not in resource_locks:
            resource_locks[resource] = util.MasterLock(resource)
    locks = [resource_locks[i].access('exclusive') for i in resources]
    #properties, get, compile
    result.addSteps([
        common.sane_step(
            steps.SetProperty,
            name='devastator git state',
            property='devastator_git_state',
            value={{{devastator_git_state}}},
        ),
        common.sane_step(
            steps.SetProperty,
            name='cybertron git state',
            property='cybertron_git_state',
            value={{{cybertron_git_state}}},
        ),
        common.sane_step(
            steps.SetProperty,
            name='git state',
            property='git_state',
            value=global_git_states[constructicon_name],
        ),
        git_step(global_repo_urls[constructicon_name], work_dir_renderer(),
                 env),
        common.sane_step(
            steps.ShellCommand,
            name='get',
            command=get_command,
            workdir=work_dir_renderer(log=True),
            env=env,
            warnOnWarnings=False,
        ),
    ])
    for command_i in range(len(commands)):
        kwargs = {}
        meat = commands[command_i][1]
        timeout = 5 * 60
        if type(meat) == str:
            command = meat
        else:
            command = meat['command']
            warning_pattern = '(.*warning[: ])'
            if 'warnings' in meat:
                warning_pattern = '({})'.format('|'.join(meat['warnings']))
            if 'suppress_warnings' in meat:
                warning_pattern = warning_pattern + '(?!{})'.format('|'.join(
                    meat['suppress_warnings']))
            kwargs['warningPattern'] = warning_pattern
            timeout = meat.get('timeout', timeout)
        result.addStep(
            common.sane_step(steps.Compile,
                             name=commands[command_i][0],
                             command=format(command),
                             workdir=work_dir_renderer(),
                             env=env,
                             locks=locks,
                             timeout=timeout,
                             maxTime=2 * 60 * 60,
                             **kwargs))
    #upload
    for i, j in upload.items(True):
        zip_steps = []
        upload_steps = []
        unzip_steps = []
        slave_src = i
        master_dst_extension = ''
        #zip
        if i in zip:

            @util.renderer
            def command(properties, i=i):
                return 'python -m zipfile -c {0}.zip {0}'.format(i)

            zip_steps.append(
                steps.ShellCommand(
                    command=command,
                    workdir=work_dir_renderer(),
                    alwaysRun=True,
                ))
            slave_src += '.zip'
            master_dst_extension = '.zip'
        #unzip
        def master_dst_function(properties,
                                j=j,
                                extension=master_dst_extension,
                                suffix=None):
            return os.path.join(
                make_full_builder_name(constructicon_name, builder_name),
                str(properties['buildnumber']) + '-constructicon',
                suffix if suffix else j + master_dst_extension)

        @util.renderer
        def master_dst_renderer(properties, f=master_dst_function):
            return f(properties)

        url_trim = 0
        if j in unzip:

            @util.renderer
            def command(properties, master_dst_function=master_dst_function):
                master_dst = master_dst_function(properties)
                unzipped = os.path.split(master_dst)[0] or '.'
                return 'python -m zipfile -e {} {}'.format(
                    master_dst, unzipped)

            unzip_steps.append(
                steps.MasterShellCommand(command=command, alwaysRun=True))
            url_trim = 4
        devastator_file_server_port = cybertron['devastator_file_server_port']
        #upload
        suffix = url.get(j, None)

        @util.renderer
        def url_renderer(
                properties,
                j=j,
                suffix=suffix,
                master_dst_function=master_dst_function,
                devastator_file_server_port=devastator_file_server_port,
                url_trim=url_trim):
            return ('http://{}:{}'.format({{{devastator_host}}},
                                          devastator_file_server_port) + '/' +
                    master_dst_function(properties, suffix=suffix))

        upload_steps.append(
            steps.FileUpload(
                slavesrc=slave_src,
                masterdest=master_dst_renderer,
                url=url_renderer,
                workdir=work_dir_renderer(),
                alwaysRun=True,
            ))
        #append
        result.addSteps(zip_steps + upload_steps + unzip_steps)
    return result
def getBuildersForBranch(props):

    pretty_branch_name = props['branch_pretty']

    deb_props = dict(props)
    deb_props['image'] = random.choice({{docker_debian_worker_images}})

    cent_props = dict(props)
    cent_props['image'] = random.choice({{docker_centos_worker_images}})

    builders = getPullRequestBuilder(props, pretty_branch_name)

    #Only one maven build, per branch, at a time
    branch_mvn_lock = util.MasterLock(pretty_branch_name + "mvn_lock")

    for jdk in common.getJDKBuilds(props):
        jdk_props = dict(props)
        jdk_props['jdk'] = str(jdk)

        builders.append(
            util.BuilderConfig(
                name=pretty_branch_name + " Build JDK " + str(jdk),
                workernames=workers,
                factory=build.getBuildPipeline(),
                properties=jdk_props,
                collapseRequests=True,
                #A note on these locks: We want a single maven build per branch,
                # AND a single maven build per worker
                locks=[
                    mvn_lock.access('exclusive'),
                    branch_mvn_lock.access('exclusive')
                ]))

        report_props = dict(jdk_props)
        report_props['cores'] = '1'

        builders.append(
            util.BuilderConfig(
                name=pretty_branch_name + " Reports JDK " + str(jdk),
                workernames=workers,
                factory=reports.getBuildPipeline(),
                properties=jdk_props,
                collapseRequests=True,
                #A note on these locks: We want a single maven build per branch,
                # AND a single maven build per worker
                locks=[
                    mvn_lock.access('exclusive'),
                    branch_mvn_lock.access('exclusive')
                ]))

    release_props = dict(props)
    #We use the first listed JDK since that (should) be the lowest, most common version
    release_props['jdk'] = str(common.getJDKBuilds(props)[0])
    builders.append(
        util.BuilderConfig(
            name=pretty_branch_name + " Release",
            workernames=workers,
            factory=release.getBuildPipeline(),
            properties=release_props,
            collapseRequests=True,
            #Note: We want a single maven build per worker, but since this is a release we don't
            # care if there are other maven builds running elsewhere
            locks=[mvn_lock.access('exclusive')]))

    builders.append(
        util.BuilderConfig(name=pretty_branch_name + " Markdown",
                           workernames=workers,
                           factory=markdown.getBuildPipeline(),
                           properties=props,
                           collapseRequests=True))

    #    builders.append(util.BuilderConfig(
    #        name=pretty_branch_name + " Database Tests",
    #        workernames=workers,
    #        factory=database.getBuildPipeline(),
    #        properties=props,
    #        collapseRequests=True,
    #        locks=[db_lock.access('exclusive')]))

    builders.append(
        util.BuilderConfig(name=pretty_branch_name + " Debian Packaging",
                           workernames=workers,
                           factory=debs.getBuildPipeline(),
                           properties=deb_props,
                           collapseRequests=True,
                           locks=[deb_lock.access('exclusive')]))

    for distro in (7, 8):
        el_props = dict(props)
        el_props['el_version'] = distro
        if 7 == distro:
            el_props['image'] = f"cent{distro}"
            lock = el7_lock
        elif 8 == distro:
            el_props['image'] = f"rocky{distro}"
            lock = el8_lock

        if "Develop" == pretty_branch_name:
            #Set the RPM branch to master
            el_props['rpmspec_override'] = "master"
            #Override/set a bunch of the build props since the RPM's dont relaly have a develop...

        builders.append(
            util.BuilderConfig(name=pretty_branch_name +
                               f" el{distro} RPM Packaging",
                               workernames=workers,
                               factory=rpms.getBuildPipeline(),
                               properties=el_props,
                               collapseRequests=True,
                               locks=[lock.access('exclusive')]))

    if props['deploy_env']:
        deploy_props = dict(props)
        deploy_props['deploy_suite'] = '{{ repo_deploy_suite }}'
        deploy_props['package_repo_host'] = "{{ repo_host }}"
        deploy_props['key_url'] = "{{ key_url }}"
        deploy_props['key_id'] = "{{ key_id }}"

        builders.append(
            util.BuilderConfig(
                name=pretty_branch_name + " Ansible Deploy",
                workernames=workers,
                factory=ansible.getBuildPipeline(),
                properties=deploy_props,
                collapseRequests=True,
                #Ensure that no one is changing the package databases while we're deploying!
                locks=[
                    deb_lock.access('exclusive'),
                    el7_lock.access('exclusive'),
                    el8_lock.access('exclusive')
                ]))

    return builders
import reports
import markdown
import database
import debs
import rpms
import rpm_repo
import ansible
import release

#One of each of these per worker at a time
mvn_lock = util.WorkerLock("mvn_lock", maxCount=1)
db_lock = util.WorkerLock("db_lock", maxCount=1)

#These are used for the repository generation builders
#of which there must only be a single one running at a time across the whole cluster
deb_lock = util.MasterLock("deb_lock", maxCount=1)
el7_lock = util.MasterLock("el7_lock", maxCount=1)
el8_lock = util.MasterLock("el8_lock", maxCount=1)

# We're doing the filter here to remove blank entries (ie, "") since some of these lines in some cases don't yield
# results, but it's hard to keep from adding the front and end quotes in Jinja
workers = list(
    filter(lambda a: a,
           [{{
               '\"' + groups['workers'] | map('extract', hostvars)
               | selectattr('only_repo_builder', 'undefined')
               | map(attribute='name') | join('\", \"') + '\"'
           }},
            {{
                '\"' + groups['workers'] | map('extract', hostvars)
                | selectattr('only_repo_builder', 'defined')