Beispiel #1
0
 def __init__(self):
     self.auth = Auth(optionsFromEnvironment(), session=aiohttp_session())
     self.secrets = Secrets(optionsFromEnvironment(),
                            session=aiohttp_session())
     self.hooks = Hooks(optionsFromEnvironment(), session=aiohttp_session())
     self.worker_manager = WorkerManager(optionsFromEnvironment(),
                                         session=aiohttp_session())
Beispiel #2
0
 def __init__(self):
     self.auth = Auth(optionsFromEnvironment(), session=aiohttp_session())
     self.hooks = Hooks(optionsFromEnvironment(), session=aiohttp_session())
     self.awsprovisioner = AwsProvisioner(optionsFromEnvironment(),
                                          session=aiohttp_session())
     self.worker_manager = WorkerManager(optionsFromEnvironment(),
                                         session=aiohttp_session())
Beispiel #3
0
def ext_pillar(minion_id, _pillar, *_args):
    if not CACHE:
        secrets = taskcluster.Secrets(taskcluster.optionsFromEnvironment())
        CACHE["credentials"] = secrets.get(
            "project/servo/tc-client/worker/macos/1")["secret"]

        url = "https://raw.githubusercontent.com/servo/saltfs/master/admin/files/ssh/%s.pub"
        CACHE["ssh_keys"] = [
            urllib.urlopen(url % name).read() for name in [
                "jdm",
                "manishearth",
                "simonsapin",
            ]
        ]

        CACHE["workers"] = {
            worker: (pool_name, config)
            for pool_name, pool in read_yaml("worker-pools.yml").items()
            if pool["kind"] == "static"
            for worker, config in pool["workers"].items()
        }

    pool_name, config = CACHE["workers"][minion_id]
    disabled = {"disabled": True, None: False}[config]
    return dict(disabled=disabled, worker_type=pool_name, **CACHE)
def main(*task_group_ids):
    for task_group_id in task_group_ids:
        print("https://community-tc.services.mozilla.com/tasks/groups/" +
              task_group_id)
        timings = {}

        def handler(result):
            for task in result["tasks"]:
                name = task["task"]["metadata"]["name"]
                for run in task["status"]["runs"]:
                    resolved = run.get("resolved")
                    if not resolved:
                        print("Not resolved yet:", name)
                        continue
                    key = task["task"]["workerType"]
                    if "WPT" in name:
                        key += " WPT"
                    # fromisoformat doesn’t like the "Z" timezone, [:-1] to remove it
                    timings.setdefault(key, []).append(
                        datetime.datetime.fromisoformat(resolved[:-1]) -
                        datetime.datetime.fromisoformat(run["started"][:-1]))

        queue = taskcluster.Queue(taskcluster.optionsFromEnvironment())
        queue.listTaskGroup(task_group_id, paginationHandler=handler)

        r = lambda d: datetime.timedelta(seconds=round(d.total_seconds()))
        for worker_type, t in sorted(timings.items()):
            print("count {}, total {}, max: {}\t{}\t{}".format(
                len(t),
                r(sum(t[1:], start=t[0])),
                r(max(t)),
                worker_type,
                ' '.join(str(r(s)) for s in t),
            ))
def get_secret(secret):
    # use proxy if configured, otherwise local credentials from env vars
    if "TASKCLUSTER_PROXY_URL" in os.environ:
        secrets_options = {"rootUrl": os.environ["TASKCLUSTER_PROXY_URL"]}
    else:
        secrets_options = taskcluster.optionsFromEnvironment()
    secrets = taskcluster.Secrets(secrets_options)
    return secrets.get(secret)["secret"]
Beispiel #6
0
async def tcClientOptions():
    """Build Taskcluster client options, supporting proxy and getting root_url
    from the appconfig"""
    if "TASKCLUSTER_PROXY_URL" in os.environ:
        return {"rootUrl": os.environ["TASKCLUSTER_PROXY_URL"]}
    else:
        options = optionsFromEnvironment()
        options["rootUrl"] = await root_url()
        return options
def main():
    # Use proxy if configured (within a task), otherwise, use local credentials from env vars
    if 'TASKCLUSTER_PROXY_URL' in os.environ:
        options = {'rootUrl': os.environ['TASKCLUSTER_PROXY_URL']}
    else:
        options = taskcluster.optionsFromEnvironment()
    secrets = taskcluster.Secrets(options)
    secret = secrets.get("project/cia/garbage/foo")
    print(secret["secret"])
def auth():
    # tests using this fixture need *some* auth service, but it actually
    # doesn't matter which one
    if "TASKCLUSTER_ROOT_URL" not in os.environ:
        msg = "TASKCLUSTER_ROOT_URL not set"
        if "NO_TEST_SKIP" in os.environ:
            pytest.fail(msg)
        else:
            pytest.skip(msg)
    return taskcluster.Auth(taskcluster.optionsFromEnvironment())
Beispiel #9
0
def email_release_drivers(
    addresses,
    product,
    version,
    build_number,
    repo,
    revision,
    task_group_id,
):
    # Send an email to the mailing after the build
    email_buglist_string = create_bugs_url(product,
                                           version,
                                           revision,
                                           repo=repo)

    content = """\
A new build has been started:

Commit: [{revision}]({repo}/rev/{revision})
Task group: [{task_group_id}]({root_url}/tasks/groups/{task_group_id})

{email_buglist_string}
""".format(
        repo=repo,
        revision=revision,
        root_url=os.environ["TASKCLUSTER_ROOT_URL"],
        task_group_id=task_group_id,
        email_buglist_string=email_buglist_string,
    )

    # On r-d, we prefix the subject of the email in order to simplify filtering
    subject_prefix = ""
    if product in {"fennec"}:
        subject_prefix = "[mobile] "
    if product in {"firefox", "devedition"}:
        subject_prefix = "[desktop] "

    subject = "{} Build of {} {} build {}".format(subject_prefix, product,
                                                  version, build_number)

    # use proxy if configured, otherwise local credentials from env vars
    if "TASKCLUSTER_PROXY_URL" in os.environ:
        notify_options = {"rootUrl": os.environ["TASKCLUSTER_PROXY_URL"]}
    else:
        notify_options = optionsFromEnvironment()

    notify = Notify(notify_options)
    for address in addresses:
        notify.email({
            "address": address,
            "subject": subject,
            "content": content,
        })
Beispiel #10
0
def main():
    with open("/etc/ssl/openssl.cnf") as f:
        config = f.read()
    config += ("[v3_req]\n"
               "basicConstraints = critical,CA:FALSE\n"
               "keyUsage = digitalSignature\n"
               "extendedKeyUsage = critical,codeSigning")
    with tempfile.TemporaryDirectory() as tmp:
        os.chdir(tmp)
        with open("config", "w") as f:
            f.write(config)
        now = datetime.datetime.now()
        run("""
            openssl req
            -x509
            -sha256
            -nodes
            -days 730
            -newkey rsa:4096
            -subj /CN=Allizom
            -extensions v3_req
            -batch

            -config config
            -keyout key.pem
            -out cert.pem
        """)
        run("""
            openssl pkcs12
            -export
            -passout pass:

            -inkey key.pem
            -in cert.pem
            -out servo.pfx
        """)
        with open("servo.pfx", "rb") as f:
            pfx = f.read()

    value = {"pfx": {"base64": base64.b64encode(pfx)}, "created": now}

    tc_options = taskcluster.optionsFromEnvironment()
    secrets = taskcluster.Secrets(tc_options)
    for name in [now.strftime("%Y-%m-%d_%H-%M-%S"), "latest"]:
        payload = {
            "secret": value,
            "expires": datetime.datetime(3000, 1, 1, 0, 0, 0)
        }
        secrets.set("project/servo/windows-codesign-cert/" + name, payload)

    print("https://community-tc.services.mozilla.com/secrets/"
          "project%2Fservo%2Fwindows-codesign-cert%2Flatest")
Beispiel #11
0
async def fetch_hooks(resources):
    hooks = Hooks(optionsFromEnvironment(), session=aiohttp_session())
    for hookGroupId in (await hooks.listHookGroups())["groups"]:
        idPrefix = "Hook={}/".format(hookGroupId)
        # if no hook with this hookGroupId is managed, skip it
        is_managed = any(m.startswith(idPrefix) for m in resources.managed)
        is_managed = is_managed or resources.is_managed(idPrefix)
        if not is_managed:
            continue
        for hook in (await hooks.listHooks(hookGroupId))["hooks"]:
            hook = Hook.from_api(hook)
            if resources.is_managed(hook.id):
                resources.add(hook)
async def fetch_aws_provisioner_workertypes(resources):
    # AWS provisioner only *exists* in this deployment:
    if root_url() != "https://taskcluster.net":
        return

    aws_provisioner = AwsProvisioner(
        optionsFromEnvironment(), session=aiohttp_session()
    )
    for workerTypeId in await aws_provisioner.listWorkerTypes():
        workerType = await aws_provisioner.workerType(workerTypeId)
        awsProvisionerWorkerType = AwsProvisionerWorkerType.from_api(workerType)
        if resources.is_managed(awsProvisionerWorkerType.id):
            resources.add(awsProvisionerWorkerType)
Beispiel #13
0
async def fetch_clients(resources):
    auth = Auth(optionsFromEnvironment(), session=aiohttp_session())
    query = {}
    while True:
        res = await auth.listClients(query=query)
        for clients in res["clients"]:
            client = Client.from_api(clients)
            if resources.is_managed(client.id):
                resources.add(client)

        if "continuationToken" in res:
            query["continuationToken"] = res["continuationToken"]
        else:
            break
Beispiel #14
0
def main(client_id):
    assert client_id.startswith("project/servo/")

    print("Client ID: `%s`" % client_id)
    print("Creating a new access token will invalidate the current one.")
    if input("Continue? [y/n] ") != "y":
        return 1

    options = taskcluster.optionsFromEnvironment()
    result = taskcluster.Auth(options).resetAccessToken(client_id)

    key = "project/servo/tc-client/" + client_id[len("project/servo/"):]
    secret = {"client_id": client_id, "access_token": result["accessToken"]}
    payload = {"secret": secret, "expires": result["expires"]}
    taskcluster.Secrets(options).set(key, payload)
Beispiel #15
0
def get_secret(name):
    secret = None
    if "TASK_ID" in os.environ:
        secrets_url = (
            "http://taskcluster/secrets/v1/secret/project/updatebot/" +
            ("3" if OPERATING_MODE == "prod" else "2") + "/" + name)
        res = requests.get(secrets_url)
        res.raise_for_status()
        secret = res.json()
    else:
        secrets = taskcluster.Secrets(taskcluster.optionsFromEnvironment())
        secret = secrets.get("project/updatebot/" + OPERATING_MODE + "/" +
                             name)
    secret = secret["secret"] if "secret" in secret else None
    secret = secret["value"] if "value" in secret else None
    return secret
Beispiel #16
0
    def get_taskcluster_options(self):
        '''
        Helper to get the Taskcluster setup options
        according to current environment (local or Taskcluster)
        '''
        options = taskcluster.optionsFromEnvironment()
        proxy_url = os.environ.get('TASKCLUSTER_PROXY_URL')

        if proxy_url is not None:
            # Always use proxy url when available
            options['rootUrl'] = proxy_url

        if 'rootUrl' not in options:
            # Always have a value in root url
            options['rootUrl'] = TASKCLUSTER_DEFAULT_URL

        return options
Beispiel #17
0
def get_taskcluster_options() -> dict:
    """
    Helper to get the Taskcluster setup options
    according to current environment (local or Taskcluster)
    """
    options = taskcluster.optionsFromEnvironment()
    proxy_url = os.environ.get("TASKCLUSTER_PROXY_URL")

    if proxy_url is not None:
        # Always use proxy url when available
        options["rootUrl"] = proxy_url

    if "rootUrl" not in options:
        # Always have a value in root url
        options["rootUrl"] = TASKCLUSTER_DEFAULT_URL

    return options
Beispiel #18
0
def inject_secrets(config):
    """
    INJECT THE SECRETS INTO THE CONFIGURATION
    :param config: CONFIG DATA

    ************************************************************************
    ** ENSURE YOU HAVE AN ENVIRONMENT VARIABLE SET:
    ** TASKCLUSTER_ROOT_URL = https://community-tc.services.mozilla.com
    ************************************************************************
    """
    with Timer("get secrets"):
        options = taskcluster.optionsFromEnvironment()
        secrets = taskcluster.Secrets(options)
        acc = Data()
        for s in listwrap(SECRET_NAMES):
            acc[s] = secrets.get(concat_field(SECRET_PREFIX, s))['secret']
        set_default(config, acc)
Beispiel #19
0
def get_taskcluster_options():
    """
    Helper to get the Taskcluster setup options
    according to current environment (local or Taskcluster)
    """
    options = taskcluster.optionsFromEnvironment()
    proxy_url = os.environ.get("TASKCLUSTER_PROXY_URL")

    if proxy_url is not None:
        # Always use proxy url when available
        options["rootUrl"] = proxy_url

    if "rootUrl" not in options:
        # Always have a value in root url
        options["rootUrl"] = TASKCLUSTER_DEFAULT_URL

    return options
Beispiel #20
0
def get_taskcluster_options():
    """
    Helper to get the Taskcluster setup options
    according to current environment (local or Taskcluster)
    """
    options = taskcluster.optionsFromEnvironment()
    proxy_url = os.environ.get("TASKCLUSTER_PROXY_URL")

    if proxy_url is not None:
        # Always use proxy url when available
        options["rootUrl"] = proxy_url

    if "rootUrl" not in options:
        # Always have a value in root url
        options["rootUrl"] = "https://community-tc.services.mozilla.com"

    return options
Beispiel #21
0
    def _trigger_action(self, action, payload):
        tc_firefox_ci_credentials = config.get("taskcluster_firefox_ci", {})
        client_id = tc_firefox_ci_credentials.get("client_id")
        access_token = tc_firefox_ci_credentials.get("access_token")
        assert (
            client_id and access_token
        ), "Missing Taskcluster Firefox CI credentials in mozci config secret"

        options = taskcluster.optionsFromEnvironment()
        options["rootUrl"] = PRODUCTION_TASKCLUSTER_ROOT_URL
        options["credentials"] = {
            "clientId": client_id,
            "accessToken": access_token,
        }
        hooks = taskcluster.Hooks(options)

        result = hooks.triggerHook(action["hookGroupId"], action["hookId"],
                                   payload)
        return result["status"]["taskId"]
Beispiel #22
0
async def fetch_secrets(resources, with_secrets):
    api = Secrets(optionsFromEnvironment(), session=aiohttp_session())
    query = {}
    while True:
        res = await api.list(query=query)
        for secret_name in res["secrets"]:
            if resources.is_managed("Secret={}".format(secret_name)):
                # only call `get` if we are managing secrets
                if with_secrets:
                    getres = await api.get(secret_name)
                    secret = Secret.from_api(secret_name, getres)
                else:
                    secret = Secret.from_api(secret_name)
                resources.add(secret)

        if "continuationToken" in res:
            query["continuationToken"] = res["continuationToken"]
        else:
            break
Beispiel #23
0
def main(worker_pool):
    wm = taskcluster.WorkerManager(taskcluster.optionsFromEnvironment())
    workers = []
    stopped = 0

    def handle(result):
        nonlocal stopped
        for w in result["workers"]:
            if w["state"] == "stopped":
                stopped += 1
            else:
                workers.append(w)

    wm.listWorkersForWorkerPool("proj-servo/" + worker_pool,
                                paginationHandler=handle)

    workers.sort(key=lambda w: w["created"])
    print("Created                  ID                  State")
    for w in workers:
        print(w["created"], w["workerId"], w["state"])
    print("… and %s stopped" % stopped)

    if not workers:
        return

    result = input("Remove all? [y/n, or ID] ").strip()
    for w in workers:
        if result == w["workerId"]:
            workers = [w]
            break
    else:
        if result != "y":
            return 1

    for w in workers:
        sys.stdout.write(".")
        sys.stdout.flush()
        wm.removeWorker("proj-servo/" + worker_pool, w["workerGroup"],
                        w["workerId"])
    print()
Beispiel #24
0
def main(task_id_or_url, *extra_client_args):
    prefix = "https://community-tc.services.mozilla.com/tasks/"
    if task_id_or_url.startswith(prefix):
        task_id = task_id_or_url[len(prefix):]
    else:
        task_id = task_id_or_url

    options = taskcluster.optionsFromEnvironment()
    queue = taskcluster.Queue(options)
    rdp_info = queue.getLatestArtifact(task_id, "project/servo/rdp-info")

    supported_clients = [
        ["xfreerdp", "/v:{host}:{port}", "/u:{username}", "/p:{password}"],
    ]
    for arg_templates in supported_clients:
        args = [template.format(**rdp_info)
                for template in arg_templates] + list(extra_client_args)
        if executable_is_in_path(args[0]):
            return subprocess.run(args).returncode

    print("Couldn’t find a supported RDP client installed.")
    print("Supported: " + ", ".join(args[0] for args in supported_clients))
Beispiel #25
0
async def fetch_worker_pools(resources):
    worker_manager = WorkerManager(optionsFromEnvironment(),
                                   session=aiohttp_session())
    query = {}
    while True:
        res = await worker_manager.listWorkerPools(query=query)
        for wp in res["workerPools"]:
            workerPool = WorkerPool.from_api(wp)

            # Worker-manager does not allow pools to be deleted; instead, they
            # are given providerId "null-provider", which provides no workers.
            # Once any pre-existing workers are gone, the service will delete
            # the pool.  So, we ignore any null-provider worker pools on the
            # assumption that they will be delete dsoon.
            if workerPool.providerId == "null-provider":
                continue

            if resources.is_managed(workerPool.id):
                resources.add(workerPool)

        if "continuationToken" in res:
            query["continuationToken"] = res["continuationToken"]
        else:
            break
import json
import os
import slugid
import taskcluster
import urllib.request
from cib import createTask

createTask(
    queue=taskcluster.Queue(taskcluster.optionsFromEnvironment()),
    image='python',
    taskId=slugid.nice(),
    taskName='00 :: create maintenance and image build tasks',
    taskDescription=
    'determine which windows cloud images should be built, where they should be deployed and trigger appropriate build tasks for the same',
    provisioner='relops',
    workerType='decision',
    features={'taskclusterProxy': True},
    env={'GITHUB_HEAD_SHA': os.getenv('TRAVIS_COMMIT')},
    commands=[
        '/bin/bash', '--login', '-c',
        'git clone https://github.com/mozilla-platform-ops/cloud-image-builder.git && cd cloud-image-builder && git reset --hard {} && pip install azure-mgmt-compute boto3 pyyaml slugid taskcluster urllib3 | grep -v "^[[:space:]]*$" && python ci/create-image-build-tasks.py'
        .format(os.getenv('TRAVIS_COMMIT'))
    ],
    scopes=[
        'generic-worker:os-group:relops/win2019/Administrators',
        'generic-worker:run-as-administrator:relops/*',
        'queue:create-task:highest:relops/*',
        'queue:create-task:very-high:relops/*',
        'queue:create-task:high:relops/*', 'queue:create-task:medium:relops/*',
        'queue:create-task:low:relops/*',
        'queue:route:index.project.relops.cloud-image-builder.*',
Beispiel #27
0
def create_task(task_id, task):
    options = taskcluster.optionsFromEnvironment()
    options["rootUrl"] = PRODUCTION_TASKCLUSTER_ROOT_URL
    queue = taskcluster.Queue(options)
    return queue.createTask(task_id, task)
Beispiel #28
0
            'network_profile': {
                'network_interfaces': [{
                    'id': networkInterface.id
                }]
            },
            'availability_set': {
                'id': availabilitySet.id
            }
        }).result()
    print('        virtual machine: {} created in resource group: {}'.format(
        virtualMachine.name, resourceGroup.name))


# init taskcluster clients
taskclusterQueueClient = taskcluster.Queue(
    taskcluster.optionsFromEnvironment())
# init azure clients
azureConfig = yaml.safe_load(
    open('{}/.azure.yaml'.format(os.getenv('HOME')), 'r'))
#azureCredentials = ServicePrincipalCredentials(
#    client_id = azureConfig['client_id'],
#    secret = azureConfig['secret'],
#    tenant = azureConfig['tenant'])
azureCredentials = ClientSecretCredential(
    tenant_id=secret['azure_beta']['tenant_id'],
    client_id=secret['azure_beta']['app_id'],
    client_secret=secret['azure_beta']['password'])
azureComputeManagementClient = ComputeManagementClient(
    azureCredentials, secret['azure_beta']['subscription_id'])
azureNetworkManagementClient = NetworkManagementClient(
    azureCredentials, secret['azure_beta']['subscription_id'])
Beispiel #29
0
                     or resource.name.endswith('-mpd001-3')
                     or resource.name.endswith('-relops')))
    else:
        print('no filter mechanism identified for {}'.format(
            resource.__class__.__name__))
        return False


if 'TASKCLUSTER_PROXY_URL' in os.environ:
    secretsClient = taskcluster.Secrets(
        {'rootUrl': os.environ['TASKCLUSTER_PROXY_URL']})
    secret = secretsClient.get(
        'project/relops/image-builder/dev')['secret']['azure']
    print('secrets fetched using taskcluster proxy')
elif 'TASKCLUSTER_ROOT_URL' in os.environ and 'TASKCLUSTER_CLIENT_ID' in os.environ and 'TASKCLUSTER_ACCESS_TOKEN' in os.environ:
    secretsClient = taskcluster.Secrets(taskcluster.optionsFromEnvironment())
    secret = secretsClient.get(
        'project/relops/image-builder/dev')['secret']['azure']
    print('secrets fetched using taskcluster environment credentials')
elif os.path.isfile('{}/.cloud-image-builder-secrets.yml'.format(
        os.environ['HOME'])):
    secret = yaml.safe_load(
        open('{}/.cloud-image-builder-secrets.yml'.format(os.environ['HOME']),
             'r'))['azure']
    print('secrets obtained from local filesystem')
else:
    print('failed to obtain taskcluster secrets')
    exit(1)

azureCredentials = ServicePrincipalCredentials(client_id=secret['id'],
                                               secret=secret['key'],
import os
import slugid
import taskcluster
import yaml
from cib import createTask, diskImageManifestHasChanged, machineImageManifestHasChanged, machineImageExists
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.compute import ComputeManagementClient


runEnvironment = 'travis' if os.getenv('TRAVIS_COMMIT') is not None else 'taskcluster' if os.getenv('TASK_ID') is not None else None
taskclusterOptions = { 'rootUrl': os.environ['TASKCLUSTER_PROXY_URL'] } if runEnvironment == 'taskcluster' else taskcluster.optionsFromEnvironment()

auth = taskcluster.Auth(taskclusterOptions)
queue = taskcluster.Queue(taskclusterOptions)
index = taskcluster.Index(taskclusterOptions)
secrets = taskcluster.Secrets(taskclusterOptions)

secret = secrets.get('project/relops/image-builder/dev')['secret']

platformClient = {
    'azure': ComputeManagementClient(
        ServicePrincipalCredentials(
            client_id = secret['azure']['id'],
            secret = secret['azure']['key'],
            tenant = secret['azure']['account']),
        secret['azure']['subscription'])
}

if runEnvironment == 'travis':
    commitSha = os.getenv('TRAVIS_COMMIT')
    taskGroupId = slugid.nice()
Beispiel #31
0
import urllib.request
import yaml
from cib import createTask, diskImageManifestHasChanged, machineImageManifestHasChanged, machineImageExists
from azure.identity import ClientSecretCredential
from azure.mgmt.compute import ComputeManagementClient


def extract_pools(config_path):
    return map(lambda p: '{}/{}'.format(p['domain'], p['variant']), yaml.safe_load(open(config_path, 'r'))['manager']['pool'])


taskclusterOptions = { 'rootUrl': os.environ['TASKCLUSTER_PROXY_URL'] }

auth = taskcluster.Auth(taskclusterOptions)
queue = taskcluster.Queue(taskclusterOptions)
index = taskcluster.Index(taskcluster.optionsFromEnvironment())
secrets = taskcluster.Secrets(taskclusterOptions)

secret = secrets.get('project/relops/image-builder/dev')['secret']

azureDeployment = 'azure_gamma'# if 'stage.taskcluster.nonprod' in os.environ['TASKCLUSTER_ROOT_URL'] else 'azure_alpha'
platformClient = {
    'azure': ComputeManagementClient(
        ClientSecretCredential(
            tenant_id=secret[azureDeployment]['tenant_id'],
            client_id=secret[azureDeployment]['app_id'],
            client_secret=secret[azureDeployment]['password']),
        secret[azureDeployment]['subscription_id'])
}

commitSha = os.getenv('GITHUB_HEAD_SHA')