Exemplo n.º 1
0
def push_artifacts(target, args):
    """
    Push all artifacts from dependent tasks
    """
    assert args.task_id is not None, "Missing task id"

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_docker_auth(), "Missing Docker authentication"

    if args.push_tool == "skopeo":
        push_tool = Skopeo()
    elif args.push_tool == "docker":
        push_tool = Docker()
    else:
        raise ValueError("Not  supported push tool: {}".format(args.push_tool))

    push_tool.login(config.docker["registry"], config.docker["username"],
                    config.docker["password"])

    # Load queue service
    queue = taskcluster.Queue(config.get_taskcluster_options())

    # Load dependencies artifacts
    artifacts = load_artifacts(args.task_id, queue, args.artifact_filter,
                               args.exclude_filter)

    for task_id, artifact_name in artifacts:
        push_artifact(queue, push_tool, task_id, artifact_name)

    logger.info("All found artifacts were pushed.")
Exemplo n.º 2
0
def push_s3(target, args):
    """
    Push files from a remote task on an AWS S3 bucket
    """
    assert args.task_id is not None, "Missing task id"
    assert not args.artifact_folder.endswith(
        "/"), "Artifact folder {} must not end in /".format(
            args.artifact_folder)

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_aws_auth(), "Missing AWS authentication"

    # Configure boto3 client
    s3 = boto3.client(
        "s3",
        aws_access_key_id=config.aws["access_key_id"],
        aws_secret_access_key=config.aws["secret_access_key"],
    )

    # Check the bucket is available
    try:
        s3.head_bucket(Bucket=args.bucket)
        logger.info("S3 Bucket {} is available".format(args.bucket))
    except botocore.exceptions.ClientError as e:
        logger.error("Bucket {} unavailable: {}".format(args.bucket, e))
        return

    # Load queue service
    queue = taskcluster.Queue(config.get_taskcluster_options())

    # Download all files from the specified artifact folder
    # These files are then uploaded on the bucket, stripping the artifact folder
    # from their final path
    artifacts = load_artifacts(args.task_id, queue,
                               "{}/*".format(args.artifact_folder))
    for task_id, artifact_name in artifacts:

        # Download each artifact
        assert artifact_name.startswith(args.artifact_folder)
        local_path = download_artifact(queue, task_id, artifact_name)

        # Detect mime/type to set valid content-type for web requests
        content_type, _ = mimetypes.guess_type(local_path)

        # Push that artifact on the S3 bucket, without the artifact folder
        s3_path = artifact_name[len(args.artifact_folder) + 1:]
        s3.put_object(
            Bucket=args.bucket,
            Key=s3_path,
            Body=open(local_path, "rb"),
            ContentType=content_type,
        )
        logger.info("Uploaded {} as {} on S3".format(s3_path, content_type))
Exemplo n.º 3
0
def stage_deps(target, args):
    """Pull image dependencies into the `img` store.

    Arguments:
        target (taskboot.target.Target): Target
        args (argparse.Namespace): CLI arguments

    Returns:
        None
    """
    create_cert(CA_KEY, CA_CRT, ca=True)
    create_cert(SRV_KEY, SRV_CRT, ca_key=CA_KEY, ca_cert=CA_CRT)
    img_tool = Img(cache=args.cache)

    # retrieve image archives from dependency tasks to /images
    image_path = Path(mkdtemp(prefix="image-deps-"))
    try:
        config = Configuration(Namespace(secret=None, config=None))
        queue = taskcluster.Queue(config.get_taskcluster_options())

        # load images into the img image store via Docker registry
        with Registry():
            for task_id, artifact_name in load_artifacts(
                    args.task_id, queue, "public/**.tar.zst"):
                img = download_artifact(queue, task_id, artifact_name,
                                        image_path)
                image_name = Path(artifact_name).stem
                check_call([
                    "skopeo",
                    "copy",
                    f"docker-archive:{img}",
                    f"docker://localhost/mozillasecurity/{image_name}:latest",
                ])
                img.unlink()
                img_tool.run(
                    ["pull", f"localhost/mozillasecurity/{image_name}:latest"])
                img_tool.run([
                    "tag",
                    f"localhost/mozillasecurity/{image_name}:latest",
                    f"{args.registry}/mozillasecurity/{image_name}:latest",
                ])
                img_tool.run([
                    "tag",
                    f"localhost/mozillasecurity/{image_name}:latest",
                    (f"{args.registry}/mozillasecurity/"
                     f"{image_name}:{args.git_revision}"),
                ])
    finally:
        rmtree(image_path)

    # workaround https://github.com/genuinetools/img/issues/206
    patch_dockerfile(target.check_path(args.dockerfile),
                     img_tool.list_images())
Exemplo n.º 4
0
def push_s3(target: Target, args: argparse.Namespace) -> None:
    """
    Push files from a remote task on an AWS S3 bucket
    """
    assert args.task_id is not None, "Missing task id"
    assert not args.artifact_folder.endswith(
        "/"), "Artifact folder {} must not end in /".format(
            args.artifact_folder)

    # Load config from file/secret
    config = Configuration(args)
    assert config.has_aws_auth(), "Missing AWS authentication"

    # Configure boto3 client
    s3 = boto3.client(
        "s3",
        aws_access_key_id=config.aws["access_key_id"],
        aws_secret_access_key=config.aws["secret_access_key"],
    )

    # Check the bucket is available
    try:
        s3.head_bucket(Bucket=args.bucket)
        logger.info("S3 Bucket {} is available".format(args.bucket))
    except botocore.exceptions.ClientError as e:
        logger.error("Bucket {} unavailable: {}".format(args.bucket, e))
        return

    # Load queue service
    queue = taskcluster.Queue(config.get_taskcluster_options())

    # Download all files from the specified artifact folder
    # These files are then uploaded on the bucket, stripping the artifact folder
    # from their final path
    artifacts = load_artifacts(args.task_id, queue,
                               "{}/*".format(args.artifact_folder))
    for task_id, artifact_name in artifacts:

        # Download each artifact
        assert artifact_name.startswith(args.artifact_folder)
        local_path = download_artifact(queue, task_id, artifact_name)

        # Detect mime/type to set valid content-type for web requests
        content_type, _ = mimetypes.guess_type(local_path)
        if content_type is None:
            # Use a default content type to avoid crashes on upload
            # when a file's MIME type is not detected
            content_type = "text/plain"

        # Push that artifact on the S3 bucket, without the artifact folder
        s3_path = artifact_name[len(args.artifact_folder) + 1:]
        s3.put_object(
            Bucket=args.bucket,
            Key=s3_path,
            Body=open(local_path, "rb"),
            ContentType=content_type,
        )
        logger.info("Uploaded {} as {} on S3".format(s3_path, content_type))

    cloudfront_distribution_id = config.aws.get("cloudfront_distribution_id")
    if cloudfront_distribution_id is not None:
        cloudfront_client = boto3.client(
            "cloudfront",
            aws_access_key_id=config.aws["access_key_id"],
            aws_secret_access_key=config.aws["secret_access_key"],
        )

        cloudfront_client.create_invalidation(
            DistributionId=cloudfront_distribution_id,
            InvalidationBatch={
                "Paths": {
                    "Quantity": 1,
                    "Items": [
                        "/*",
                    ],
                },
                "CallerReference": str(int(datetime.utcnow().timestamp())),
            },
        )

        logger.info("Cloudfront invalidation created")