Esempio n. 1
0
def setup_global_config(c):
    ####### PROJECT IDENTITY
    c['title'] = "Natural Intelligence"
    c['titleURL'] = "http://www.naturalint.com/"
    c['buildbotURL'] = os.environ.get("BUILDBOT_WEB_URL", "http://*****:*****@gmail.com",
            relayhost=os.environ.get("SMTP_SERVER", "smtp.gmail.com"),
            mode=('change', 'failing'),
            buildSetSummary=True,
            smtpPort=int(os.environ.get("SMTP_PORT", 465)),
            useSmtps=int(os.environ.get("USE_SMTPS", True)),
            smtpUser='******',
            smtpPassword='******')
    ]
Esempio n. 2
0
 def __init__(
     self,
     settings,
     name,
     tags=None,
     branches=None,
     parallel_builders=None,
     parallel_tests=None,
 ):
     self.name = name
     self.tags = tags or set()
     self.branches = branches
     self.parallel_builders = parallel_builders
     self.parallel_tests = parallel_tests
     worker_settings = settings.workers[name]
     owner = name.split("-")[0]
     owner_settings = settings.owners[owner]
     pw = worker_settings.get("password", None) or owner_settings.password
     owner_email = owner_settings.get("email", None)
     emails = list(
         map(
             str,
             filter(None,
                    (settings.get("status_email", None), owner_email))))
     if settings.use_local_worker:
         self.bb_worker = _worker.LocalWorker(name)
     else:
         self.bb_worker = _worker.Worker(name,
                                         str(pw),
                                         notify_on_missing=emails,
                                         keepalive_interval=KEEPALIVE)
Esempio n. 3
0
def configure(c):
    for k in infosun:
        slave = infosun[k]
        props = {}
        if "properties" in slave:
            props = slave["properties"]
        c['workers'].append(worker.Worker(slave["host"], slave[
            "password"], properties = props))
Esempio n. 4
0
 def __init__(self, settings, name,
              tags=None, branches=None,
              parallel_builders=None, parallel_tests=None):
     self.name = name
     self.tags = tags or set()
     self.branches = branches
     self.parallel_builders = parallel_builders
     self.parallel_tests = parallel_tests
     worker_settings = settings.workers[name]
     owner = name.split('-')[0]
     owner_settings = settings.owners[owner]
     pw = worker_settings.get('password', None) or owner_settings.password
     owner_email = owner_settings.get('email', None)
     emails = list(map(str, filter(None, (settings.get('status_email', None), owner_email))))
     self.bb_worker = _worker.Worker(name, str(pw), notify_on_missing=emails)
Esempio n. 5
0
from buildbot.plugins import worker
from yoctoabb import config

workers = []

for w in config.workers:
    workers.append(worker.Worker(w, config.worker_password,
                                 max_builds=config.worker_max_builds,
                                 notify_on_missing=config.notify_on_missing))
Esempio n. 6
0
def create_worker(name, *args, **kwargs):
    password = config.options.get('Worker Passwords', name)
    return worker.Worker(name, password=password, *args, **kwargs)
Esempio n. 7
0
    projectName = _getImage(rawProjectName)
    return '{}/{}:latest'.format(Config.get("docker", "registry"), projectName)


####### WORKERS
workernames = []
for wh in json.loads(Config.get("buildbot", "workers")):
    for i in range(1, int(Config.get("buildbot", "worker_instance")) + 1):
        workernames.append("%s-%02d" % (wh, i))

# The 'workers' list defines the set of recognized workers. Each element is
# a Worker object, specifying a unique worker name and password.  The same
# worker name and password must be configured on the worker.
workers = []
for wn in workernames:
    workers.append(worker.Worker(wn, Config.get("buildbot",
                                                "worker_password")))
c['workers'] = workers

# 'protocols' contains information about protocols which master will use for
# communicating with workers. You must define at least 'port' option that workers
# could connect to your master with this protocol.
# 'port' must match the value configured into the workers (with their
# --master option)
c['protocols'] = {'pb': {'port': 9989}}

####### CHANGESOURCES

# the 'change_source' setting tells the buildmaster how it should find out
# about source code changes.  Here we point to the buildbot clone of pyflakes.
c['change_source'] = []
Esempio n. 8
0
 def createWorkerConfigWorker(self, config, name):
     return worker.Worker(name, password=config['password'])
Esempio n. 9
0
import config

import bb.utils

c = BuildmasterConfig = {}

# Add workers
c["workers"] = []
ALL_WORKERS_NAMES = []
for worker_ in config.WORKERS.values():
    for w_name, prop in worker_.items():
        ALL_WORKERS_NAMES.append(w_name)
        c["workers"].append(
            worker.Worker(w_name,
                          config.WORKER_PASS,
                          properties=prop,
                          max_builds=prop.get('max_builds')
                          or 1))  # To disable parallel builds on one worker

# Basic config
c["protocols"] = {"pb": {"port": config.WORKER_PORT}}
c["buildbotNetUsageData"] = config.BUILDBOT_NET_USAGE_DATA
c["title"] = config.BUILDBOT_TITLE
c["titleURL"] = config.REPO_URL
c["buildbotURL"] = config.BUILDBOT_URL


def get_workers(worker_pool):
    if worker_pool is None:
        return ALL_WORKERS_NAMES
    return list(config.WORKERS[worker_pool].keys())
Esempio n. 10
0
def build_config() -> dict[str, Any]:
    c = {}
    c["buildbotNetUsageData"] = None

    # configure a janitor which will delete all logs older than one month, and will run on sundays at noon
    c['configurators'] = [util.JanitorConfigurator(
        logHorizon=timedelta(weeks=4),
        hour=12,
        dayOfWeek=6
    )]

    c["schedulers"] = [
        # build all pushes to master
        schedulers.SingleBranchScheduler(
            name="master",
            change_filter=util.ChangeFilter(branch="master"),
            builderNames=["nix-eval"],
        ),
        # build all pull requests
        schedulers.SingleBranchScheduler(
            name="prs",
            change_filter=util.ChangeFilter(category="pull"),
            builderNames=["nix-eval"],
        ),
        # this is triggered from `nix-eval`
        schedulers.Triggerable(
            name="nix-build",
            builderNames=["nix-build"],
        ),
        # allow to manually trigger a nix-build
        schedulers.ForceScheduler(name="force", builderNames=["nix-eval"]),
        # allow to manually update flakes
        schedulers.ForceScheduler(
            name="update-flake",
            builderNames=["nix-update-flake"],
            buttonName="Update flakes",
        ),
        # updates flakes once a weeek
        schedulers.NightlyTriggerable(
            name="update-flake-weekly",
            builderNames=["nix-update-flake"],
            hour=3,
            minute=0,
            dayOfWeek=6,
        ),
    ]

    github_api_token = read_secret_file("github-token")
    c["services"] = [
        reporters.GitHubStatusPush(
            token=github_api_token,
            # Since we dynamically create build steps,
            # we use `virtual_builder_name` in the webinterface
            # so that we distinguish what has beeing build
            context=Interpolate("buildbot/%(prop:virtual_builder_name)s"),
        ),
        # Notify on irc
        NotifyFailedBuilds("irc://buildbot|[email protected]:6667/#xxx"),
    ]

    # Shape of this file:
    # [ { "name": "<worker-name>", "pass": "******", "cores": "<cpu-cores>" } ]
    worker_config = json.loads(read_secret_file("github-workers"))

    credentials = os.environ.get("CREDENTIALS_DIRECTORY", ".")
    enable_cachix = os.path.isfile(os.path.join(credentials, "cachix-token"))

    systemd_secrets = secrets.SecretInAFile(dirname=credentials)
    c["secretsProviders"] = [systemd_secrets]
    c["workers"] = []
    worker_names = []
    for item in worker_config:
        cores = item.get("cores", 0)
        for i in range(cores):
            worker_name = f"{item['name']}-{i}"
            c["workers"].append(worker.Worker(worker_name, item["pass"]))
            worker_names.append(worker_name)
    c["builders"] = [
        # Since all workers run on the same machine, we only assign one of them to do the evaluation.
        # This should prevent exessive memory usage.
        nix_eval_config([worker_names[0]], github_token_secret="github-token"),
        nix_build_config(worker_names, enable_cachix),
        nix_update_flake_config(
            worker_names,
            "TUM-DSE/doctor-cluster-config",
            github_token_secret="github-token",
        ),
    ]

    c["www"] = {
        "port": int(os.environ.get("PORT", "1810")),
        "auth": util.GitHubAuth(
            os.environ.get("GITHUB_OAUTH_ID"), read_secret_file("github-oauth-secret")
        ),
        "authz": util.Authz(
            roleMatchers=[
                util.RolesFromGroups(groupPrefix="")  # so we can match on TUM-DSE
            ],
            allowRules=[
                util.AnyEndpointMatcher(role="TUM-DSE", defaultDeny=False),
                util.AnyControlEndpointMatcher(role="TUM-DSE"),
            ],
        ),
        "plugins": dict(waterfall_view={}, console_view={}, grid_view={}),
        "change_hook_dialects": dict(
            github={
                "secret": read_secret_file("github-webhook-secret"),
                "strict": True,
                "token": github_api_token,
                "github_property_whitelist": "*",
            }
        ),
    }

    c["db"] = {"db_url": os.environ.get("DB_URL", "sqlite:///state.sqlite")}

    c["protocols"] = {"pb": {"port": "tcp:9989:interface=\\:\\:"}}
    c["buildbotURL"] = "https://buildbot.dse.in.tum.de/"

    return c
Esempio n. 11
0
def make_config(worker_name, worker_password, worker_port, git_repo, branch,
                poll_interval, builder_name, project_name, project_url,
                buildbot_url, buildbot_web_port, buildbot_from_email):

    return {
        'workers': [worker.Worker(worker_name, worker_password)],
        'protocols': {
            'pb': {
                'port': worker_port
            }
        },
        'change_source': [
            changes.GitPoller(
                git_repo,
                workdir='gitpoller-workdir',
                branch=branch,
                pollinterval=poll_interval,
            ),
        ],
        'schedulers': [
            schedulers.SingleBranchScheduler(
                name="all",
                change_filter=util.ChangeFilter(branch=branch),
                treeStableTimer=poll_interval,
                builderNames=[builder_name],
            ),
            schedulers.ForceScheduler(
                name="force",
                builderNames=[builder_name],
            ),
        ],
        'builders': [
            util.BuilderConfig(
                name=builder_name,
                workernames=[worker_name],
                factory=util.BuildFactory([
                    # check out the source
                    steps.Git(repourl=git_repo, mode='incremental'),
                    # run the tests
                    steps.ShellCommand(command=[
                        "direnv",
                        "allow",
                        ".",
                    ], ),
                    steps.ShellCommand(
                        command=[
                            "direnv",
                            "exec",
                            ".",
                            "make",
                            "check",
                        ],
                        env={
                            'NIX_REMOTE': 'daemon',
                        },
                        # If we have to rebuild our dependencies from scratch,
                        # we can go a long time without receiving output from
                        # the compiler. Default timeout is 20 mins, bump to
                        # 1hr.
                        timeout=60 * 60,
                    ),
                ]),
            ),
        ],
        'status': [],
        'title':
        project_name,
        'titleURL':
        project_url,
        'buildbotURL':
        buildbot_url,
        'www': {
            'port': buildbot_web_port,
            'plugins': {
                'waterfall_view': {},
            },
        },
        'db': {
            'db_url': "sqlite:///state.sqlite",
        },
        'services': [
            reporters.MailNotifier(
                fromaddr=buildbot_from_email,
                # TODO(jml): Currently sending mail for all builds. We should
                # send mail under fewer circumstances once we have a better
                # idea about what we actually want.
                #
                # http://buildbot.readthedocs.io/en/latest/manual/cfg-reporters.html?highlight=github#mailnotifier-arguments
                mode='all',
                # XXX: Temporarily hard-code until we can figure out how to
                # get these automatically from commits.
                extraRecipients=[
                    "*****@*****.**",
                    "*****@*****.**",
                ],
            )
        ],
    }