Beispiel #1
0
def push_artifacts(target, args):
    '''
    Push all artifacts from dependant tasks
    '''
    assert args.task_id is not None, 'Missing task id'

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_docker_auth(), 'Missing Docker authentication'

    # Setup skopeo
    skopeo = Skopeo(
        config.docker['registry'],
        config.docker['username'],
        config.docker['password'],
    )

    # Load queue service
    queue = taskcluster.Queue(config.get_taskcluster_options())

    # Load current task description to list its dependencies
    logger.info('Loading task status {}'.format(args.task_id))
    task = queue.task(args.task_id)
    nb_deps = len(task['dependencies'])
    assert nb_deps > 0, 'No task dependencies'

    # Load dependencies artifacts
    artifacts = load_artifacts(task, queue, args.artifact_filter,
                               args.exclude_filter)

    for task_id, artifact_name in artifacts:
        push_artifact(queue, skopeo, task_id, artifact_name)

    logger.info('All found artifacts were pushed.')
Beispiel #2
0
def push_artifacts(target, args):
    """
    Push all artifacts from dependent tasks
    """
    assert args.task_id is not None, "Missing task id"

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_docker_auth(), "Missing Docker authentication"

    if args.push_tool == "skopeo":
        push_tool = Skopeo()
    elif args.push_tool == "docker":
        push_tool = Docker()
    else:
        raise ValueError("Not  supported push tool: {}".format(args.push_tool))

    push_tool.login(config.docker["registry"], config.docker["username"],
                    config.docker["password"])

    # Load queue service
    queue = taskcluster.Queue(config.get_taskcluster_options())

    # Load dependencies artifacts
    artifacts = load_artifacts(args.task_id, queue, args.artifact_filter,
                               args.exclude_filter)

    for task_id, artifact_name in artifacts:
        push_artifact(queue, push_tool, task_id, artifact_name)

    logger.info("All found artifacts were pushed.")
Beispiel #3
0
def push_s3(target, args):
    """
    Push files from a remote task on an AWS S3 bucket
    """
    assert args.task_id is not None, "Missing task id"
    assert not args.artifact_folder.endswith(
        "/"), "Artifact folder {} must not end in /".format(
            args.artifact_folder)

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_aws_auth(), "Missing AWS authentication"

    # Configure boto3 client
    s3 = boto3.client(
        "s3",
        aws_access_key_id=config.aws["access_key_id"],
        aws_secret_access_key=config.aws["secret_access_key"],
    )

    # Check the bucket is available
    try:
        s3.head_bucket(Bucket=args.bucket)
        logger.info("S3 Bucket {} is available".format(args.bucket))
    except botocore.exceptions.ClientError as e:
        logger.error("Bucket {} unavailable: {}".format(args.bucket, e))
        return

    # Load queue service
    queue = taskcluster.Queue(config.get_taskcluster_options())

    # Download all files from the specified artifact folder
    # These files are then uploaded on the bucket, stripping the artifact folder
    # from their final path
    artifacts = load_artifacts(args.task_id, queue,
                               "{}/*".format(args.artifact_folder))
    for task_id, artifact_name in artifacts:

        # Download each artifact
        assert artifact_name.startswith(args.artifact_folder)
        local_path = download_artifact(queue, task_id, artifact_name)

        # Detect mime/type to set valid content-type for web requests
        content_type, _ = mimetypes.guess_type(local_path)

        # Push that artifact on the S3 bucket, without the artifact folder
        s3_path = artifact_name[len(args.artifact_folder) + 1:]
        s3.put_object(
            Bucket=args.bucket,
            Key=s3_path,
            Body=open(local_path, "rb"),
            ContentType=content_type,
        )
        logger.info("Uploaded {} as {} on S3".format(s3_path, content_type))
Beispiel #4
0
def stage_deps(target, args):
    """Pull image dependencies into the `img` store.

    Arguments:
        target (taskboot.target.Target): Target
        args (argparse.Namespace): CLI arguments

    Returns:
        None
    """
    create_cert(CA_KEY, CA_CRT, ca=True)
    create_cert(SRV_KEY, SRV_CRT, ca_key=CA_KEY, ca_cert=CA_CRT)
    img_tool = Img(cache=args.cache)

    # retrieve image archives from dependency tasks to /images
    image_path = Path(mkdtemp(prefix="image-deps-"))
    try:
        config = Configuration(Namespace(secret=None, config=None))
        queue = taskcluster.Queue(config.get_taskcluster_options())

        # load images into the img image store via Docker registry
        with Registry():
            for task_id, artifact_name in load_artifacts(
                    args.task_id, queue, "public/**.tar.zst"):
                img = download_artifact(queue, task_id, artifact_name,
                                        image_path)
                image_name = Path(artifact_name).stem
                check_call([
                    "skopeo",
                    "copy",
                    f"docker-archive:{img}",
                    f"docker://localhost/mozillasecurity/{image_name}:latest",
                ])
                img.unlink()
                img_tool.run(
                    ["pull", f"localhost/mozillasecurity/{image_name}:latest"])
                img_tool.run([
                    "tag",
                    f"localhost/mozillasecurity/{image_name}:latest",
                    f"{args.registry}/mozillasecurity/{image_name}:latest",
                ])
                img_tool.run([
                    "tag",
                    f"localhost/mozillasecurity/{image_name}:latest",
                    (f"{args.registry}/mozillasecurity/"
                     f"{image_name}:{args.git_revision}"),
                ])
    finally:
        rmtree(image_path)

    # workaround https://github.com/genuinetools/img/issues/206
    patch_dockerfile(target.check_path(args.dockerfile),
                     img_tool.list_images())
Beispiel #5
0
def build_hook(target, args):
    """
    Read a hook definition file and either create or update the hook
    """
    hook_file_path = target.check_path(args.hook_file)

    hook_group_id = args.hook_group_id
    hook_id = args.hook_id

    with open(hook_file_path) as hook_file:
        payload = json.load(hook_file)

    # Load config from file/secret
    config = Configuration(args)

    hooks = taskcluster.Hooks(config.get_taskcluster_options())
    hooks.ping()

    hook_name = "{}/{}".format(hook_group_id, hook_id)
    logger.info("Checking if hook %s exists", hook_name)

    try:
        hooks.hook(hook_group_id, hook_id)
        hook_exists = True
        logger.info("Hook %s exists", hook_name)
    except taskcluster.exceptions.TaskclusterRestFailure:
        hook_exists = False
        logger.info("Hook %s does not exists", hook_name)

    if hook_exists:
        hooks.updateHook(hook_group_id, hook_id, payload)
        logger.info("Hook %s was successfully updated", hook_name)
    else:
        hooks.createHook(hook_group_id, hook_id, payload)
        logger.info("Hook %s was successfully created", hook_name)

    hook_url = taskcluster_urls.ui(
        config.get_root_url(), "hooks/{}/{}".format(hook_group_id, hook_id))
    logger.info("Hook URL for debugging: %r", hook_url)
Beispiel #6
0
def heroku_release(target, args):
    '''
    Push all artifacts from dependant tasks
    '''
    assert args.task_id is not None, 'Missing task id'

    # Load config from file/secret
    config = Configuration(args)

    assert 'username' in config.heroku and 'password' in config.heroku, 'Missing Heroku authentication'

    # Setup skopeo
    skopeo = Skopeo(
        HEROKU_REGISTRY,
        config.heroku['username'],
        config.heroku['password'],
    )

    # Load queue service
    queue = taskcluster.Queue(config.get_taskcluster_options())

    # Load current task description to list its dependencies
    logger.info('Loading task status {}'.format(args.task_id))
    task = queue.task(args.task_id)
    nb_deps = len(task['dependencies'])
    assert nb_deps > 0, 'No task dependencies'

    # Get the list of matching artifacts as we should get only one
    matching_artifacts = load_artifacts(task, queue, args.artifact_filter,
                                        args.exclude_filter)

    # Push the Docker image
    if len(matching_artifacts) == 0:
        raise ValueError(f"No artifact found for {args.artifact_filter}")
    elif len(matching_artifacts) > 1:
        raise ValueError(
            f"More than one artifact found for {args.artifact_filter}: {matching_artifacts!r}"
        )
    else:
        task_id, artifact_name = matching_artifacts[0]

        custom_tag_name = f"{HEROKU_REGISTRY}/{args.heroku_app}/{args.heroku_dyno_type}"

        artifact_path = download_artifact(queue, task_id, artifact_name)

        skopeo.push_archive(artifact_path, custom_tag_name)

    # Get the Docker image id
    image_id = docker_id_archive(artifact_path)

    # Trigger a release on Heroku
    logger.info(
        f"Deploying image id {image_id!r} to Heroku app {args.heroku_app!r} dyno {args.heroku_dyno_type!r}"
    )
    update = dict(
        type=args.heroku_dyno_type,
        docker_image=image_id,
    )
    r = requests.patch(
        f'https://api.heroku.com/apps/{args.heroku_app}/formation',
        json=dict(updates=[update]),
        headers={
            'Accept': 'application/vnd.heroku+json; version=3.docker-releases',
            'Authorization': f"Bearer {config.heroku['password']}",
        },
    )
    r.raise_for_status()

    logger.info(
        f'The {args.heroku_app}/{args.heroku_dyno_type} application has been updated'
    )
Beispiel #7
0
def push_s3(target: Target, args: argparse.Namespace) -> None:
    """
    Push files from a remote task on an AWS S3 bucket
    """
    assert args.task_id is not None, "Missing task id"
    assert not args.artifact_folder.endswith(
        "/"), "Artifact folder {} must not end in /".format(
            args.artifact_folder)

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_aws_auth(), "Missing AWS authentication"

    # Configure boto3 client
    s3 = boto3.client(
        "s3",
        aws_access_key_id=config.aws["access_key_id"],
        aws_secret_access_key=config.aws["secret_access_key"],
    )

    # Check the bucket is available
    try:
        s3.head_bucket(Bucket=args.bucket)
        logger.info("S3 Bucket {} is available".format(args.bucket))
    except botocore.exceptions.ClientError as e:
        logger.error("Bucket {} unavailable: {}".format(args.bucket, e))
        return

    # Load queue service
    queue = taskcluster.Queue(config.get_taskcluster_options())

    # Download all files from the specified artifact folder
    # These files are then uploaded on the bucket, stripping the artifact folder
    # from their final path
    artifacts = load_artifacts(args.task_id, queue,
                               "{}/*".format(args.artifact_folder))
    for task_id, artifact_name in artifacts:

        # Download each artifact
        assert artifact_name.startswith(args.artifact_folder)
        local_path = download_artifact(queue, task_id, artifact_name)

        # Detect mime/type to set valid content-type for web requests
        content_type, _ = mimetypes.guess_type(local_path)
        if content_type is None:
            # Use a default content type to avoid crashes on upload
            # when a file's MIME type is not detected
            content_type = "text/plain"

        # Push that artifact on the S3 bucket, without the artifact folder
        s3_path = artifact_name[len(args.artifact_folder) + 1:]
        s3.put_object(
            Bucket=args.bucket,
            Key=s3_path,
            Body=open(local_path, "rb"),
            ContentType=content_type,
        )
        logger.info("Uploaded {} as {} on S3".format(s3_path, content_type))

    cloudfront_distribution_id = config.aws.get("cloudfront_distribution_id")
    if cloudfront_distribution_id is not None:
        cloudfront_client = boto3.client(
            "cloudfront",
            aws_access_key_id=config.aws["access_key_id"],
            aws_secret_access_key=config.aws["secret_access_key"],
        )

        cloudfront_client.create_invalidation(
            DistributionId=cloudfront_distribution_id,
            InvalidationBatch={
                "Paths": {
                    "Quantity": 1,
                    "Items": [
                        "/*",
                    ],
                },
                "CallerReference": str(int(datetime.utcnow().timestamp())),
            },
        )

        logger.info("Cloudfront invalidation created")