示例#1
0
def publish_pypi(target, args):
    """
    Build and publish the target on a pypi repository
    """
    config = Configuration(args)
    assert config.has_pypi_auth(), "Missing PyPi authentication"

    # Build the project
    setup = target.check_path("setup.py")
    logger.info(f"Building Python project using {setup}")
    sandbox.run_setup(setup, ["clean", "sdist", "bdist_wheel"])

    # Check some files were produced
    dist = target.check_path("dist")
    build = glob.glob(f"{dist}/*")
    assert len(build) > 0, "No built files found"
    logger.info("Will upload {}".format(", ".join(map(os.path.basename,
                                                      build))))

    # Use default repository
    repository = args.repository or DEFAULT_REPOSITORY
    logger.info(f"Will upload on {repository}")

    # Upload it through twine
    upload_settings = Settings(
        username=config.pypi["username"],
        password=config.pypi["password"],
        repository_url=repository,
        verbose=True,
        disable_progress_bar=False,
    )
    twine_upload(upload_settings, build)

    logger.info("PyPi publication finished.")
示例#2
0
def push_artifacts(target, args):
    '''
    Push all artifacts from dependant tasks
    '''
    assert args.task_id is not None, 'Missing task id'

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_docker_auth(), 'Missing Docker authentication'

    # Setup skopeo
    skopeo = Skopeo(
        config.docker['registry'],
        config.docker['username'],
        config.docker['password'],
    )

    # Load queue service
    queue = taskcluster.Queue(config.get_taskcluster_options())

    # Load current task description to list its dependencies
    logger.info('Loading task status {}'.format(args.task_id))
    task = queue.task(args.task_id)
    nb_deps = len(task['dependencies'])
    assert nb_deps > 0, 'No task dependencies'

    # Load dependencies artifacts
    artifacts = load_artifacts(task, queue, args.artifact_filter,
                               args.exclude_filter)

    for task_id, artifact_name in artifacts:
        push_artifact(queue, skopeo, task_id, artifact_name)

    logger.info('All found artifacts were pushed.')
示例#3
0
def cargo_publish(target: Target, args: argparse.Namespace) -> None:
    """
    Publish a crate on crates.io
    """

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_cargo_auth(), "Missing Cargo authentication"

    # Build the package to publish on crates.io
    subprocess.run(["cargo", "publish", "--no-verify", "--dry-run"],
                   check=True)

    # Publish the crate on crates.io
    # stdout and stderr are captured to avoid leaking the token
    proc = subprocess.run(
        ["cargo", "publish", "--no-verify", "--token", config.cargo["token"]],
        capture_output=True,
        text=True,  # Return stdout and stderr output as strings
    )

    # If an error is occurred while publishing the crate
    # Do not fail when a `crate already uploaded` error is found and
    # the option to ignore that kind of error is enabled
    if proc.returncode != 0 and not (args.ignore_published
                                     and "is already uploaded" in proc.stderr):
        raise Exception("Failed to publish the crate on crates.io")
示例#4
0
def push_artifacts(target, args):
    """
    Push all artifacts from dependent tasks
    """
    assert args.task_id is not None, "Missing task id"

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_docker_auth(), "Missing Docker authentication"

    if args.push_tool == "skopeo":
        push_tool = Skopeo()
    elif args.push_tool == "docker":
        push_tool = Docker()
    else:
        raise ValueError("Not  supported push tool: {}".format(args.push_tool))

    push_tool.login(config.docker["registry"], config.docker["username"],
                    config.docker["password"])

    # Load queue service
    queue = taskcluster.Queue(config.get_taskcluster_options())

    # Load dependencies artifacts
    artifacts = load_artifacts(args.task_id, queue, args.artifact_filter,
                               args.exclude_filter)

    for task_id, artifact_name in artifacts:
        push_artifact(queue, push_tool, task_id, artifact_name)

    logger.info("All found artifacts were pushed.")
示例#5
0
def build_image(target, args):
    """
    Build a docker image and allow save/push
    """
    if args.build_tool == "img":
        build_tool = Img(cache=args.cache)
    elif args.build_tool == "docker":
        build_tool = Docker()
    elif args.build_tool == "dind":
        build_tool = DinD()
    else:
        raise ValueError("Unsupported build tool: {}".format(args.build_tool))

    # Load config from file/secret
    config = Configuration(args)

    # Check the dockerfile is available in target
    dockerfile = target.check_path(args.dockerfile)

    # Check the output is writable
    output = None
    if args.write:
        output = os.path.realpath(args.write)
        assert output.lower().endswith(
            ".tar"), "Destination path must ends in .tar"
        assert os.access(os.path.dirname(output),
                         os.W_OK | os.W_OK), "Destination is not writable"

    # Build the tags
    base_image = args.image or "taskboot-{}".format(uuid.uuid4())
    tags = gen_docker_images(base_image, args.tag, args.registry)

    if args.push:
        assert config.has_docker_auth(), "Missing Docker authentication"
        registry = config.docker["registry"]

        if registry != args.registry:
            msg = "The credentials are the ones for %r not %r"
            logger.warning(msg, registry, args.registry)

        # Login on docker
        build_tool.login(registry, config.docker["username"],
                         config.docker["password"])

    # Build the image
    build_tool.build(target.dir, dockerfile, tags, args.build_arg)

    # Write the produced image
    if output:
        build_tool.save(tags, output)
        zstd_compress(output)

    # Push the produced image
    if args.push:
        for tag in tags:
            build_tool.push(tag)
示例#6
0
def push_s3(target, args):
    """
    Push files from a remote task on an AWS S3 bucket
    """
    assert args.task_id is not None, "Missing task id"
    assert not args.artifact_folder.endswith(
        "/"), "Artifact folder {} must not end in /".format(
            args.artifact_folder)

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_aws_auth(), "Missing AWS authentication"

    # Configure boto3 client
    s3 = boto3.client(
        "s3",
        aws_access_key_id=config.aws["access_key_id"],
        aws_secret_access_key=config.aws["secret_access_key"],
    )

    # Check the bucket is available
    try:
        s3.head_bucket(Bucket=args.bucket)
        logger.info("S3 Bucket {} is available".format(args.bucket))
    except botocore.exceptions.ClientError as e:
        logger.error("Bucket {} unavailable: {}".format(args.bucket, e))
        return

    # Load queue service
    queue = taskcluster.Queue(config.get_taskcluster_options())

    # Download all files from the specified artifact folder
    # These files are then uploaded on the bucket, stripping the artifact folder
    # from their final path
    artifacts = load_artifacts(args.task_id, queue,
                               "{}/*".format(args.artifact_folder))
    for task_id, artifact_name in artifacts:

        # Download each artifact
        assert artifact_name.startswith(args.artifact_folder)
        local_path = download_artifact(queue, task_id, artifact_name)

        # Detect mime/type to set valid content-type for web requests
        content_type, _ = mimetypes.guess_type(local_path)

        # Push that artifact on the S3 bucket, without the artifact folder
        s3_path = artifact_name[len(args.artifact_folder) + 1:]
        s3.put_object(
            Bucket=args.bucket,
            Key=s3_path,
            Body=open(local_path, "rb"),
            ContentType=content_type,
        )
        logger.info("Uploaded {} as {} on S3".format(s3_path, content_type))
示例#7
0
def stage_deps(target, args):
    """Pull image dependencies into the `img` store.

    Arguments:
        target (taskboot.target.Target): Target
        args (argparse.Namespace): CLI arguments

    Returns:
        None
    """
    create_cert(CA_KEY, CA_CRT, ca=True)
    create_cert(SRV_KEY, SRV_CRT, ca_key=CA_KEY, ca_cert=CA_CRT)
    img_tool = Img(cache=args.cache)

    # retrieve image archives from dependency tasks to /images
    image_path = Path(mkdtemp(prefix="image-deps-"))
    try:
        config = Configuration(Namespace(secret=None, config=None))
        queue = taskcluster.Queue(config.get_taskcluster_options())

        # load images into the img image store via Docker registry
        with Registry():
            for task_id, artifact_name in load_artifacts(
                    args.task_id, queue, "public/**.tar.zst"):
                img = download_artifact(queue, task_id, artifact_name,
                                        image_path)
                image_name = Path(artifact_name).stem
                check_call([
                    "skopeo",
                    "copy",
                    f"docker-archive:{img}",
                    f"docker://localhost/mozillasecurity/{image_name}:latest",
                ])
                img.unlink()
                img_tool.run(
                    ["pull", f"localhost/mozillasecurity/{image_name}:latest"])
                img_tool.run([
                    "tag",
                    f"localhost/mozillasecurity/{image_name}:latest",
                    f"{args.registry}/mozillasecurity/{image_name}:latest",
                ])
                img_tool.run([
                    "tag",
                    f"localhost/mozillasecurity/{image_name}:latest",
                    (f"{args.registry}/mozillasecurity/"
                     f"{image_name}:{args.git_revision}"),
                ])
    finally:
        rmtree(image_path)

    # workaround https://github.com/genuinetools/img/issues/206
    patch_dockerfile(target.check_path(args.dockerfile),
                     img_tool.list_images())
示例#8
0
def build_image(target, args):
    '''
    Build a docker image and allow save/push
    '''
    docker = Docker(cache=args.cache)

    # Load config from file/secret
    config = Configuration(args)

    # Check the dockerfile is available in target
    dockerfile = target.check_path(args.dockerfile)

    # Check the output is writable
    output = None
    if args.write:
        output = os.path.realpath(args.write)
        assert output.lower().endswith(
            '.tar'), 'Destination path must ends in .tar'
        assert os.access(os.path.dirname(output), os.W_OK | os.W_OK), \
            'Destination is not writable'

    # Build the tags
    base_image = args.image or 'taskboot-{}'.format(uuid.uuid4())
    tags = gen_docker_images(base_image, args.tag, args.registry)

    if args.push:
        assert config.has_docker_auth(), 'Missing Docker authentication'
        registry = config.docker['registry']

        if registry != args.registry:
            msg = "The credentials are the ones for %r not %r"
            logger.warning(msg, registry, args.registry)

        # Login on docker
        docker.login(
            registry,
            config.docker['username'],
            config.docker['password'],
        )

    # Build the image
    docker.build(target.dir, dockerfile, tags, args.build_arg)

    # Write the produced image
    if output:
        for tag in tags:
            docker.save(tag, output)

    # Push the produced image
    if args.push:
        for tag in tags:
            docker.push(tag)
示例#9
0
def heroku_release(target, args):
    """
    Push all artifacts from dependent tasks
    """
    assert args.task_id is not None, "Missing task id"

    # Load config from file/secret
    config = Configuration(args)

    assert ("username" in config.heroku
            and "password" in config.heroku), "Missing Heroku authentication"

    # Setup skopeo
    skopeo = Skopeo()
    skopeo.login(HEROKU_REGISTRY, config.heroku["username"],
                 config.heroku["password"])

    updates_payload = []

    for heroku_dyno_name, _, artifact_path in load_named_artifacts(
            config, args.task_id, args.artifacts):

        # Push the Docker image
        custom_tag_name = f"{HEROKU_REGISTRY}/{args.heroku_app}/{heroku_dyno_name}"

        artifact_path, ext = os.path.splitext(artifact_path)
        assert ext == ".zst"
        zstd_decompress(artifact_path)

        skopeo.push_archive(artifact_path, custom_tag_name)

        # Get the Docker image id
        image_id = docker_id_archive(artifact_path)

        updates_payload.append({
            "type": heroku_dyno_name,
            "docker_image": image_id
        })

    # Trigger a release on Heroku
    logger.info(
        "Deploying update for dyno types: %r",
        list(sorted(x["type"] for x in updates_payload)),
    )

    updates_payload = {"updates": updates_payload}
    logger.debug("Using payload: %r", updates_payload)

    r = requests.patch(
        f"https://api.heroku.com/apps/{args.heroku_app}/formation",
        json=updates_payload,
        headers={
            "Accept": "application/vnd.heroku+json; version=3.docker-releases",
            "Authorization": f"Bearer {config.heroku['password']}",
        },
    )
    logger.debug("Heroku deployment answer: %s", r.text)
    r.raise_for_status()

    logger.info(f"The {args.heroku_app} application has been updated")
示例#10
0
def git_push(target: Target, args: argparse.Namespace) -> None:
    """
    Push commits on a repository
    """

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_git_auth(), "Missing Git authentication"

    # Set remote repository
    repo_link = "https://{}:{}@{}.git".format(args.user, config.git["token"],
                                              args.repository)
    subprocess.run(["git", "remote", "set-url", "origin", repo_link])

    # Push on repository
    if args.force_push:
        command = ["git", "push", "-f", "origin", args.branch]
    else:
        command = ["git", "push", "origin", args.branch]

    subprocess.run(command, check=True)
示例#11
0
def build_hook(target, args):
    """
    Read a hook definition file and either create or update the hook
    """
    hook_file_path = target.check_path(args.hook_file)

    hook_group_id = args.hook_group_id
    hook_id = args.hook_id

    with open(hook_file_path) as hook_file:
        payload = json.load(hook_file)

    # Load config from file/secret
    config = Configuration(args)

    hooks = taskcluster.Hooks(config.get_taskcluster_options())
    hooks.ping()

    hook_name = "{}/{}".format(hook_group_id, hook_id)
    logger.info("Checking if hook %s exists", hook_name)

    try:
        hooks.hook(hook_group_id, hook_id)
        hook_exists = True
        logger.info("Hook %s exists", hook_name)
    except taskcluster.exceptions.TaskclusterRestFailure:
        hook_exists = False
        logger.info("Hook %s does not exists", hook_name)

    if hook_exists:
        hooks.updateHook(hook_group_id, hook_id, payload)
        logger.info("Hook %s was successfully updated", hook_name)
    else:
        hooks.createHook(hook_group_id, hook_id, payload)
        logger.info("Hook %s was successfully created", hook_name)

    hook_url = taskcluster_urls.ui(
        config.get_root_url(), "hooks/{}/{}".format(hook_group_id, hook_id))
    logger.info("Hook URL for debugging: %r", hook_url)
示例#12
0
def cargo_publish(target: Target, args: argparse.Namespace) -> None:
    """
    Publish a crate on crates.io
    """

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_cargo_auth(), "Missing Cargo authentication"

    # Build the package to publish on crates.io
    subprocess.run(["cargo", "publish", "--no-verify", "--dry-run"],
                   check=True)

    # Publish the crate on crates.io
    # stdout and stderr are captured to avoid leaking the token
    proc = subprocess.run(
        ["cargo", "publish", "--no-verify", "--token", config.cargo["token"]],
        capture_output=True,
    )

    if proc.returncode != 0:
        raise Exception("Failed to publish the crate on crates.io")
示例#13
0
def retrieve_artifacts(target: Target, args: argparse.Namespace) -> None:
    """
    Retrieve all artifacts from a task
    """
    assert args.task_id is not None, "Missing task id"

    # Load config from file/secret
    config = Configuration(args)

    # Replace the path to the artifact with the load_named_version format
    # worker-type:artifact path
    artifacts = [
        str(pathlib.Path(artifact).stem) + ":" + artifact
        for artifact in args.artifacts
    ]

    # Load dependencies artifacts
    for _, artifact_name, artifact_path in load_named_artifacts(
            config, args.task_id, artifacts, args.output_path):
        logger.info(f"{artifact_name} has been downloaded to {artifact_path}")

    logger.info("All found artifacts were downloaded.")
示例#14
0
def github_release(target: Target, args: argparse.Namespace) -> None:
    """
    Push all artifacts from dependent tasks
    """
    assert args.task_id is not None, "Missing task id"

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_git_auth(), "Missing Github authentication"

    # Check if local or dependent task assets are used
    if args.local_asset is None:
        # Check the assets before any Github change is applied
        assets = list(load_named_artifacts(config, args.task_id, args.asset))
    else:
        # Create a list of tuples structured in this way
        # (name, artifact_name, artifact_path)
        assets = [(
            str(pathlib.Path(artifact_path).stem),
            artifact_path,
            pathlib.Path(artifact_path),
        ) for artifact_path in args.local_asset]

    # Setup GitHub API client and load repository
    github = Github(config.git["token"])
    try:
        repository = github.get_repo(args.repository)
        logger.info(
            f"Loaded Github repository {repository.full_name} #{repository.id}"
        )
    except UnknownObjectException:
        raise Exception(f"Repository {args.repository} is not available")

    # Check that tag exists, it must be created by the user manually
    # Usually this task is triggered on a github tag event
    logger.debug(f"Checking git tag {args.version}")
    try:
        tag = repository.get_git_ref(f"tags/{args.version}")
        logger.info(f"Found existing tag {args.version}")
    except UnknownObjectException:
        raise Exception(f"Tag {args.version} does not exist on {repository}")

    # Check if requested release exists
    logger.debug(f"Checking requested release {args.version}")
    try:
        release = repository.get_release(args.version)
        logger.info(f"Found existing release {args.version}")
    except UnknownObjectException:
        # Create new release
        logger.info(f"Creating new release {args.version}")
        release = repository.create_git_release(
            tag=args.version,
            name=args.version,
            message=build_release_notes(repository, tag),
            target_commitish=tag.object.sha,
        )

    # Upload every named asset
    for asset_name, _, artifact_path in assets:
        logger.info(f"Uploading asset {asset_name} using {artifact_path}")
        release.upload_asset(name=asset_name,
                             path=artifact_path,
                             label=asset_name)

    logger.info(f"Release available as {release.html_url}")
示例#15
0
def heroku_release(target, args):
    '''
    Push all artifacts from dependant tasks
    '''
    assert args.task_id is not None, 'Missing task id'

    # Load config from file/secret
    config = Configuration(args)

    assert 'username' in config.heroku and 'password' in config.heroku, 'Missing Heroku authentication'

    # Setup skopeo
    skopeo = Skopeo(
        HEROKU_REGISTRY,
        config.heroku['username'],
        config.heroku['password'],
    )

    # Load queue service
    queue = taskcluster.Queue(config.get_taskcluster_options())

    # Load current task description to list its dependencies
    logger.info('Loading task status {}'.format(args.task_id))
    task = queue.task(args.task_id)
    nb_deps = len(task['dependencies'])
    assert nb_deps > 0, 'No task dependencies'

    # Get the list of matching artifacts as we should get only one
    matching_artifacts = load_artifacts(task, queue, args.artifact_filter,
                                        args.exclude_filter)

    # Push the Docker image
    if len(matching_artifacts) == 0:
        raise ValueError(f"No artifact found for {args.artifact_filter}")
    elif len(matching_artifacts) > 1:
        raise ValueError(
            f"More than one artifact found for {args.artifact_filter}: {matching_artifacts!r}"
        )
    else:
        task_id, artifact_name = matching_artifacts[0]

        custom_tag_name = f"{HEROKU_REGISTRY}/{args.heroku_app}/{args.heroku_dyno_type}"

        artifact_path = download_artifact(queue, task_id, artifact_name)

        skopeo.push_archive(artifact_path, custom_tag_name)

    # Get the Docker image id
    image_id = docker_id_archive(artifact_path)

    # Trigger a release on Heroku
    logger.info(
        f"Deploying image id {image_id!r} to Heroku app {args.heroku_app!r} dyno {args.heroku_dyno_type!r}"
    )
    update = dict(
        type=args.heroku_dyno_type,
        docker_image=image_id,
    )
    r = requests.patch(
        f'https://api.heroku.com/apps/{args.heroku_app}/formation',
        json=dict(updates=[update]),
        headers={
            'Accept': 'application/vnd.heroku+json; version=3.docker-releases',
            'Authorization': f"Bearer {config.heroku['password']}",
        },
    )
    r.raise_for_status()

    logger.info(
        f'The {args.heroku_app}/{args.heroku_dyno_type} application has been updated'
    )
示例#16
0
def push_s3(target: Target, args: argparse.Namespace) -> None:
    """
    Push files from a remote task on an AWS S3 bucket
    """
    assert args.task_id is not None, "Missing task id"
    assert not args.artifact_folder.endswith(
        "/"), "Artifact folder {} must not end in /".format(
            args.artifact_folder)

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_aws_auth(), "Missing AWS authentication"

    # Configure boto3 client
    s3 = boto3.client(
        "s3",
        aws_access_key_id=config.aws["access_key_id"],
        aws_secret_access_key=config.aws["secret_access_key"],
    )

    # Check the bucket is available
    try:
        s3.head_bucket(Bucket=args.bucket)
        logger.info("S3 Bucket {} is available".format(args.bucket))
    except botocore.exceptions.ClientError as e:
        logger.error("Bucket {} unavailable: {}".format(args.bucket, e))
        return

    # Load queue service
    queue = taskcluster.Queue(config.get_taskcluster_options())

    # Download all files from the specified artifact folder
    # These files are then uploaded on the bucket, stripping the artifact folder
    # from their final path
    artifacts = load_artifacts(args.task_id, queue,
                               "{}/*".format(args.artifact_folder))
    for task_id, artifact_name in artifacts:

        # Download each artifact
        assert artifact_name.startswith(args.artifact_folder)
        local_path = download_artifact(queue, task_id, artifact_name)

        # Detect mime/type to set valid content-type for web requests
        content_type, _ = mimetypes.guess_type(local_path)
        if content_type is None:
            # Use a default content type to avoid crashes on upload
            # when a file's MIME type is not detected
            content_type = "text/plain"

        # Push that artifact on the S3 bucket, without the artifact folder
        s3_path = artifact_name[len(args.artifact_folder) + 1:]
        s3.put_object(
            Bucket=args.bucket,
            Key=s3_path,
            Body=open(local_path, "rb"),
            ContentType=content_type,
        )
        logger.info("Uploaded {} as {} on S3".format(s3_path, content_type))

    cloudfront_distribution_id = config.aws.get("cloudfront_distribution_id")
    if cloudfront_distribution_id is not None:
        cloudfront_client = boto3.client(
            "cloudfront",
            aws_access_key_id=config.aws["access_key_id"],
            aws_secret_access_key=config.aws["secret_access_key"],
        )

        cloudfront_client.create_invalidation(
            DistributionId=cloudfront_distribution_id,
            InvalidationBatch={
                "Paths": {
                    "Quantity": 1,
                    "Items": [
                        "/*",
                    ],
                },
                "CallerReference": str(int(datetime.utcnow().timestamp())),
            },
        )

        logger.info("Cloudfront invalidation created")