Beispiel #1
0
def build_context(name, outputFile, args=None):
    """Build a context.tar for image with specified name."""
    if not name:
        raise ValueError("must provide a Docker image name")
    if not outputFile:
        raise ValueError("must provide a outputFile")

    image_dir = docker.image_path(name)
    if not os.path.isdir(image_dir):
        raise Exception("image directory does not exist: %s" % image_dir)

    docker.create_context_tar(".", image_dir, outputFile, args)
Beispiel #2
0
    def test_create_context_missing_extra(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test-image')
            os.mkdir(d)

            with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
                fh.write(b'# %include does/not/exist\n')

            with self.assertRaisesRegexp(Exception, 'path does not exist'):
                docker.create_context_tar(tmp, d, os.path.join(tmp, 'tar'), 'test')
        finally:
            shutil.rmtree(tmp)
Beispiel #3
0
def build_context(name, outputFile):
    """Build a context.tar for image with specified name.
    """
    if not name:
        raise ValueError('must provide a Docker image name')
    if not outputFile:
        raise ValueError('must provide a outputFile')

    image_dir = os.path.join(docker.IMAGE_DIR, name)
    if not os.path.isdir(image_dir):
        raise Exception('image directory does not exist: %s' % image_dir)

    docker.create_context_tar(GECKO, image_dir, outputFile, "")
Beispiel #4
0
    def test_create_context_outside_topsrcdir(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, "test-image")
            os.mkdir(d)

            with open(os.path.join(d, "Dockerfile"), "wb") as fh:
                fh.write(b"# %include foo/../../../etc/shadow\n")

            with self.assertRaisesRegexp(Exception, "path outside topsrcdir"):
                docker.create_context_tar(tmp, d, os.path.join(tmp, "tar"), "test", {})
        finally:
            shutil.rmtree(tmp)
Beispiel #5
0
    def test_create_context_missing_extra(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, "test-image")
            os.mkdir(d)

            with open(os.path.join(d, "Dockerfile"), "wb") as fh:
                fh.write(b"# %include does/not/exist\n")

            with self.assertRaisesRegexp(Exception, "path does not exist"):
                docker.create_context_tar(tmp, d, os.path.join(tmp, "tar"), "test", {})
        finally:
            shutil.rmtree(tmp)
Beispiel #6
0
def build_context(name, outputFile):
    """Build a context.tar for image with specified name.
    """
    if not name:
        raise ValueError('must provide a Docker image name')
    if not outputFile:
        raise ValueError('must provide a outputFile')

    image_dir = os.path.join(IMAGE_DIR, name)
    if not os.path.isdir(image_dir):
        raise Exception('image directory does not exist: %s' % image_dir)

    docker.create_context_tar(GECKO, image_dir, outputFile, "")
Beispiel #7
0
    def test_create_context_outside_topsrcdir(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test-image')
            os.mkdir(d)

            with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
                fh.write(b'# %include foo/../../../etc/shadow\n')

            with self.assertRaisesRegexp(Exception, 'path outside topsrcdir'):
                docker.create_context_tar(tmp, d, os.path.join(tmp, 'tar'), 'test')
        finally:
            shutil.rmtree(tmp)
Beispiel #8
0
    def test_create_context_outside_topsrcdir(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test-image')
            os.mkdir(d)

            with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
                fh.write(b'# %include foo/../../../etc/shadow\n')

            with self.assertRaisesRegexp(Exception, 'path outside topsrcdir'):
                docker.create_context_tar(tmp, d, os.path.join(tmp, 'tar'), 'test')
        finally:
            shutil.rmtree(tmp)
Beispiel #9
0
    def test_create_context_missing_extra(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test-image')
            os.mkdir(d)

            with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
                fh.write(b'# %include does/not/exist\n')

            with self.assertRaisesRegexp(Exception, 'path does not exist'):
                docker.create_context_tar(tmp, d, os.path.join(tmp, 'tar'), 'test')
        finally:
            shutil.rmtree(tmp)
Beispiel #10
0
    def test_create_context_absolute_path(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, "test-image")
            os.mkdir(d)

            # Absolute paths in %include syntax are not allowed.
            with open(os.path.join(d, "Dockerfile"), "wb") as fh:
                fh.write(b"# %include /etc/shadow\n")

            with self.assertRaisesRegexp(Exception, "cannot be absolute"):
                docker.create_context_tar(tmp, d, os.path.join(tmp, "tar"), "test", {})
        finally:
            shutil.rmtree(tmp)
Beispiel #11
0
    def test_create_context_absolute_path(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test-image')
            os.mkdir(d)

            # Absolute paths in %include syntax are not allowed.
            with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
                fh.write(b'# %include /etc/shadow\n')

            with self.assertRaisesRegexp(Exception, 'cannot be absolute'):
                docker.create_context_tar(tmp, d, os.path.join(tmp, 'tar'), 'test')
        finally:
            shutil.rmtree(tmp)
Beispiel #12
0
    def test_create_context_absolute_path(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test-image')
            os.mkdir(d)

            # Absolute paths in %include syntax are not allowed.
            with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
                fh.write(b'# %include /etc/shadow\n')

            with self.assertRaisesRegexp(Exception, 'cannot be absolute'):
                docker.create_context_tar(tmp, d, os.path.join(tmp, 'tar'), 'test')
        finally:
            shutil.rmtree(tmp)
Beispiel #13
0
    def test_create_context_topsrcdir_files(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test-image')
            os.mkdir(d)
            with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
                fh.write(b'# %include extra/file0\n')
            os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)

            extra = os.path.join(tmp, 'extra')
            os.mkdir(extra)
            with open(os.path.join(extra, 'file0'), 'a'):
                pass
            os.chmod(os.path.join(extra, 'file0'), MODE_STANDARD)

            tp = os.path.join(tmp, 'tar')
            h = docker.create_context_tar(tmp, d, tp, 'test_image', {})
            self.assertEqual(
                h,
                '49dc3827530cd344d7bcc52e1fdd4aefc632568cf442cffd3dd9633a58f271bf'
            )

            with tarfile.open(tp, 'r:gz') as tf:
                self.assertEqual(tf.getnames(), [
                    'Dockerfile',
                    'topsrcdir/extra/file0',
                ])
        finally:
            shutil.rmtree(tmp)
Beispiel #14
0
    def test_create_context_tar_basic(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test_image')
            os.mkdir(d)
            with open(os.path.join(d, 'Dockerfile'), 'a'):
                pass
            os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)

            with open(os.path.join(d, 'extra'), 'a'):
                pass
            os.chmod(os.path.join(d, 'extra'), MODE_STANDARD)

            tp = os.path.join(tmp, 'tar')
            h = docker.create_context_tar(tmp, d, tp, 'my_image', {})
            self.assertEqual(
                h,
                'eae3ad00936085eb3e5958912f79fb06ee8e14a91f7157c5f38625f7ddacb9c7'
            )

            # File prefix should be "my_image"
            with tarfile.open(tp, 'r:gz') as tf:
                self.assertEqual(tf.getnames(), [
                    'Dockerfile',
                    'extra',
                ])
        finally:
            shutil.rmtree(tmp)
Beispiel #15
0
    def test_create_context_topsrcdir_files(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test-image')
            os.mkdir(d)
            with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
                fh.write(b'# %include extra/file0\n')
            os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)

            extra = os.path.join(tmp, 'extra')
            os.mkdir(extra)
            with open(os.path.join(extra, 'file0'), 'a'):
                pass
            os.chmod(os.path.join(extra, 'file0'), MODE_STANDARD)

            tp = os.path.join(tmp, 'tar')
            h = docker.create_context_tar(tmp, d, tp, 'test_image')
            self.assertEqual(h, 'e7f14044b8ec1ba42e251d4b293af212ad08b30ec8ab6613abbdbe73c3c2b61f')

            with tarfile.open(tp, 'r:gz') as tf:
                self.assertEqual(tf.getnames(), [
                    'Dockerfile',
                    'topsrcdir/extra/file0',
                ])
        finally:
            shutil.rmtree(tmp)
    def test_create_context_tar_basic(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test_image')
            os.mkdir(d)
            with open(os.path.join(d, 'Dockerfile'), 'a'):
                pass
            os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)

            with open(os.path.join(d, 'extra'), 'a'):
                pass
            os.chmod(os.path.join(d, 'extra'), MODE_STANDARD)

            tp = os.path.join(tmp, 'tar')
            h = docker.create_context_tar(tmp, d, tp, 'my_image')
            self.assertEqual(
                h,
                '2a6d7f1627eba60daf85402418e041d728827d309143c6bc1c6bb3035bde6717'
            )

            # File prefix should be "my_image"
            with tarfile.open(tp, 'r:gz') as tf:
                self.assertEqual(tf.getnames(), [
                    'my_image/Dockerfile',
                    'my_image/extra',
                ])
        finally:
            shutil.rmtree(tmp)
    def test_create_context_topsrcdir_files(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test-image')
            os.mkdir(d)
            with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
                fh.write(b'# %include extra/file0\n')
            os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)

            extra = os.path.join(tmp, 'extra')
            os.mkdir(extra)
            with open(os.path.join(extra, 'file0'), 'a'):
                pass
            os.chmod(os.path.join(extra, 'file0'), MODE_STANDARD)

            tp = os.path.join(tmp, 'tar')
            h = docker.create_context_tar(tmp, d, tp, 'test_image')
            self.assertEqual(
                h,
                '20faeb7c134f21187b142b5fadba94ae58865dc929c6c293d8cbc0a087269338'
            )

            with tarfile.open(tp, 'r:gz') as tf:
                self.assertEqual(tf.getnames(), [
                    'test_image/Dockerfile',
                    'test_image/topsrcdir/extra/file0',
                ])
        finally:
            shutil.rmtree(tmp)
Beispiel #18
0
    def test_create_context_tar_basic(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test_image')
            os.mkdir(d)
            with open(os.path.join(d, 'Dockerfile'), 'a'):
                pass
            os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)

            with open(os.path.join(d, 'extra'), 'a'):
                pass
            os.chmod(os.path.join(d, 'extra'), MODE_STANDARD)

            tp = os.path.join(tmp, 'tar')
            h = docker.create_context_tar(tmp, d, tp, 'my_image')
            self.assertEqual(h, '2a6d7f1627eba60daf85402418e041d728827d309143c6bc1c6bb3035bde6717')

            # File prefix should be "my_image"
            with tarfile.open(tp, 'r:gz') as tf:
                self.assertEqual(tf.getnames(), [
                    'my_image/Dockerfile',
                    'my_image/extra',
                ])
        finally:
            shutil.rmtree(tmp)
Beispiel #19
0
    def test_create_context_topsrcdir_files(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, "test-image")
            os.mkdir(d)
            with open(os.path.join(d, "Dockerfile"), "wb") as fh:
                fh.write(b"# %include extra/file0\n")
            os.chmod(os.path.join(d, "Dockerfile"), MODE_STANDARD)

            extra = os.path.join(tmp, "extra")
            os.mkdir(extra)
            with open(os.path.join(extra, "file0"), "a"):
                pass
            os.chmod(os.path.join(extra, "file0"), MODE_STANDARD)

            tp = os.path.join(tmp, "tar")
            h = docker.create_context_tar(tmp, d, tp, "test_image", {})
            self.assertEqual(
                h,
                "49dc3827530cd344d7bcc52e1fdd4aefc632568cf442cffd3dd9633a58f271bf"
            )

            with tarfile.open(tp, "r:gz") as tf:
                self.assertEqual(
                    tf.getnames(),
                    [
                        "Dockerfile",
                        "topsrcdir/extra/file0",
                    ],
                )
        finally:
            shutil.rmtree(tmp)
Beispiel #20
0
    def test_create_context_tar_basic(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, "test_image")
            os.mkdir(d)
            with open(os.path.join(d, "Dockerfile"), "a"):
                pass
            os.chmod(os.path.join(d, "Dockerfile"), MODE_STANDARD)

            with open(os.path.join(d, "extra"), "a"):
                pass
            os.chmod(os.path.join(d, "extra"), MODE_STANDARD)

            tp = os.path.join(tmp, "tar")
            h = docker.create_context_tar(tmp, d, tp, "my_image", {})
            self.assertEqual(
                h,
                "eae3ad00936085eb3e5958912f79fb06ee8e14a91f7157c5f38625f7ddacb9c7"
            )

            # File prefix should be "my_image"
            with tarfile.open(tp, "r:gz") as tf:
                self.assertEqual(
                    tf.getnames(),
                    [
                        "Dockerfile",
                        "extra",
                    ],
                )
        finally:
            shutil.rmtree(tmp)
Beispiel #21
0
    def test_create_context_topsrcdir_files(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test-image')
            os.mkdir(d)
            with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
                fh.write(b'# %include extra/file0\n')
            os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)

            extra = os.path.join(tmp, 'extra')
            os.mkdir(extra)
            with open(os.path.join(extra, 'file0'), 'a'):
                pass
            os.chmod(os.path.join(extra, 'file0'), MODE_STANDARD)

            tp = os.path.join(tmp, 'tar')
            h = docker.create_context_tar(tmp, d, tp, 'test_image')
            self.assertEqual(h, '20faeb7c134f21187b142b5fadba94ae58865dc929c6c293d8cbc0a087269338')

            with tarfile.open(tp, 'r:gz') as tf:
                self.assertEqual(tf.getnames(), [
                    'test_image/Dockerfile',
                    'test_image/topsrcdir/extra/file0',
                ])
        finally:
            shutil.rmtree(tmp)
Beispiel #22
0
    def test_create_context_tar_basic(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test_image')
            os.mkdir(d)
            with open(os.path.join(d, 'Dockerfile'), 'a'):
                pass
            os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)

            with open(os.path.join(d, 'extra'), 'a'):
                pass
            os.chmod(os.path.join(d, 'extra'), MODE_STANDARD)

            tp = os.path.join(tmp, 'tar')
            h = docker.create_context_tar(tmp, d, tp, 'my_image')
            self.assertEqual(h, '6c1cc23357625f64f775a08eace7bbc3877dd08d2f3546e0f2e308bac8491865')

            # File prefix should be "my_image"
            with tarfile.open(tp, 'r:gz') as tf:
                self.assertEqual(tf.getnames(), [
                    'Dockerfile',
                    'extra',
                ])
        finally:
            shutil.rmtree(tmp)
Beispiel #23
0
def build_image(name):
    """Build a Docker image of specified name.

    Output from image building process will be printed to stdout.
    """
    if not name:
        raise ValueError('must provide a Docker image name')

    image_dir = os.path.join(IMAGE_DIR, name)
    if not os.path.isdir(image_dir):
        raise Exception('image directory does not exist: %s' % image_dir)

    tag = docker.docker_image(name, default_version='latest')

    docker_bin = which.which('docker')

    # Verify that Docker is working.
    try:
        subprocess.check_output([docker_bin, '--version'])
    except subprocess.CalledProcessError:
        raise Exception('Docker server is unresponsive. Run `docker ps` and '
                        'check that Docker is running')

    # We obtain a context archive and build from that. Going through the
    # archive creation is important: it normalizes things like file owners
    # and mtimes to increase the chances that image generation is
    # deterministic.
    fd, context_path = tempfile.mkstemp()
    os.close(fd)
    try:
        docker.create_context_tar(GECKO, image_dir, context_path, name)
        docker.build_from_context(docker_bin, context_path, name, tag)
    finally:
        os.unlink(context_path)

    print('Successfully built %s and tagged with %s' % (name, tag))

    if tag.endswith(':latest'):
        print('*' * 50)
        print('WARNING: no VERSION file found in image directory.')
        print('Image is not suitable for deploying/pushing.')
        print('Create an image suitable for deploying/pushing by creating')
        print('a VERSION file in the image directory.')
        print('*' * 50)
Beispiel #24
0
def build_image(name):
    """Build a Docker image of specified name.

    Output from image building process will be printed to stdout.
    """
    if not name:
        raise ValueError('must provide a Docker image name')

    image_dir = os.path.join(docker.IMAGE_DIR, name)
    if not os.path.isdir(image_dir):
        raise Exception('image directory does not exist: %s' % image_dir)

    tag = docker.docker_image(name, by_tag=True)

    docker_bin = which.which('docker')

    # Verify that Docker is working.
    try:
        subprocess.check_output([docker_bin, '--version'])
    except subprocess.CalledProcessError:
        raise Exception('Docker server is unresponsive. Run `docker ps` and '
                        'check that Docker is running')

    # We obtain a context archive and build from that. Going through the
    # archive creation is important: it normalizes things like file owners
    # and mtimes to increase the chances that image generation is
    # deterministic.
    fd, context_path = tempfile.mkstemp()
    os.close(fd)
    try:
        docker.create_context_tar(GECKO, image_dir, context_path, name)
        docker.build_from_context(docker_bin, context_path, name, tag)
    finally:
        os.unlink(context_path)

    print('Successfully built %s and tagged with %s' % (name, tag))

    if tag.endswith(':latest'):
        print('*' * 50)
        print('WARNING: no VERSION file found in image directory.')
        print('Image is not suitable for deploying/pushing.')
        print('Create an image suitable for deploying/pushing by creating')
        print('a VERSION file in the image directory.')
        print('*' * 50)
Beispiel #25
0
    def test_create_context_extra_directory(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, "test-image")
            os.mkdir(d)

            with open(os.path.join(d, "Dockerfile"), "wb") as fh:
                fh.write(b"# %include extra\n")
                fh.write(b"# %include file0\n")
            os.chmod(os.path.join(d, "Dockerfile"), MODE_STANDARD)

            extra = os.path.join(tmp, "extra")
            os.mkdir(extra)
            for i in range(3):
                p = os.path.join(extra, "file%d" % i)
                with open(p, "wb") as fh:
                    fh.write(b"file%d" % i)
                os.chmod(p, MODE_STANDARD)

            with open(os.path.join(tmp, "file0"), "a"):
                pass
            os.chmod(os.path.join(tmp, "file0"), MODE_STANDARD)

            tp = os.path.join(tmp, "tar")
            h = docker.create_context_tar(tmp, d, tp, "my_image", {})

            self.assertEqual(
                h,
                "a392f23cd6606ae43116390a4d0113354cff1e688a41d46f48b0fb25e90baa13"
            )

            with tarfile.open(tp, "r:gz") as tf:
                self.assertEqual(
                    tf.getnames(),
                    [
                        "Dockerfile",
                        "topsrcdir/extra/file0",
                        "topsrcdir/extra/file1",
                        "topsrcdir/extra/file2",
                        "topsrcdir/file0",
                    ],
                )
        finally:
            shutil.rmtree(tmp)
    def test_create_context_extra_directory(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test-image')
            os.mkdir(d)

            with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
                fh.write(b'# %include extra\n')
                fh.write(b'# %include file0\n')
            os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)

            extra = os.path.join(tmp, 'extra')
            os.mkdir(extra)
            for i in range(3):
                p = os.path.join(extra, 'file%d' % i)
                with open(p, 'wb') as fh:
                    fh.write(b'file%d' % i)
                os.chmod(p, MODE_STANDARD)

            with open(os.path.join(tmp, 'file0'), 'a'):
                pass
            os.chmod(os.path.join(tmp, 'file0'), MODE_STANDARD)

            tp = os.path.join(tmp, 'tar')
            h = docker.create_context_tar(tmp, d, tp, 'my_image')

            self.assertEqual(
                h,
                'e5440513ab46ae4c1d056269e1c6715d5da7d4bd673719d360411e35e5b87205'
            )

            with tarfile.open(tp, 'r:gz') as tf:
                self.assertEqual(tf.getnames(), [
                    'my_image/Dockerfile',
                    'my_image/topsrcdir/extra/file0',
                    'my_image/topsrcdir/extra/file1',
                    'my_image/topsrcdir/extra/file2',
                    'my_image/topsrcdir/file0',
                ])
        finally:
            shutil.rmtree(tmp)
Beispiel #27
0
    def test_create_context_extra_directory(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test-image')
            os.mkdir(d)

            with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
                fh.write(b'# %include extra\n')
                fh.write(b'# %include file0\n')
            os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)

            extra = os.path.join(tmp, 'extra')
            os.mkdir(extra)
            for i in range(3):
                p = os.path.join(extra, 'file%d' % i)
                with open(p, 'wb') as fh:
                    fh.write(b'file%d' % i)
                os.chmod(p, MODE_STANDARD)

            with open(os.path.join(tmp, 'file0'), 'a'):
                pass
            os.chmod(os.path.join(tmp, 'file0'), MODE_STANDARD)

            tp = os.path.join(tmp, 'tar')
            h = docker.create_context_tar(tmp, d, tp, 'my_image', {})

            self.assertEqual(
                h,
                'a392f23cd6606ae43116390a4d0113354cff1e688a41d46f48b0fb25e90baa13'
            )

            with tarfile.open(tp, 'r:gz') as tf:
                self.assertEqual(tf.getnames(), [
                    'Dockerfile',
                    'topsrcdir/extra/file0',
                    'topsrcdir/extra/file1',
                    'topsrcdir/extra/file2',
                    'topsrcdir/file0',
                ])
        finally:
            shutil.rmtree(tmp)
Beispiel #28
0
    def test_create_context_extra_directory(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test-image')
            os.mkdir(d)

            with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
                fh.write(b'# %include extra\n')
                fh.write(b'# %include file0\n')
            os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)

            extra = os.path.join(tmp, 'extra')
            os.mkdir(extra)
            for i in range(3):
                p = os.path.join(extra, 'file%d' % i)
                with open(p, 'wb') as fh:
                    fh.write(b'file%d' % i)
                os.chmod(p, MODE_STANDARD)

            with open(os.path.join(tmp, 'file0'), 'a'):
                pass
            os.chmod(os.path.join(tmp, 'file0'), MODE_STANDARD)

            tp = os.path.join(tmp, 'tar')
            h = docker.create_context_tar(tmp, d, tp, 'my_image')

            self.assertEqual(h, 'e5440513ab46ae4c1d056269e1c6715d5da7d4bd673719d360411e35e5b87205')

            with tarfile.open(tp, 'r:gz') as tf:
                self.assertEqual(tf.getnames(), [
                    'my_image/Dockerfile',
                    'my_image/topsrcdir/extra/file0',
                    'my_image/topsrcdir/extra/file1',
                    'my_image/topsrcdir/extra/file2',
                    'my_image/topsrcdir/file0',
                ])
        finally:
            shutil.rmtree(tmp)
Beispiel #29
0
    def test_create_context_extra_directory(self):
        tmp = tempfile.mkdtemp()
        try:
            d = os.path.join(tmp, 'test-image')
            os.mkdir(d)

            with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
                fh.write(b'# %include extra\n')
                fh.write(b'# %include file0\n')
            os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)

            extra = os.path.join(tmp, 'extra')
            os.mkdir(extra)
            for i in range(3):
                p = os.path.join(extra, 'file%d' % i)
                with open(p, 'wb') as fh:
                    fh.write(b'file%d' % i)
                os.chmod(p, MODE_STANDARD)

            with open(os.path.join(tmp, 'file0'), 'a'):
                pass
            os.chmod(os.path.join(tmp, 'file0'), MODE_STANDARD)

            tp = os.path.join(tmp, 'tar')
            h = docker.create_context_tar(tmp, d, tp, 'my_image')

            self.assertEqual(h, 'd2a3363b15d0eb547a6c81a72ddf3980e2f6e6360c29b4fb6818102896f43180')

            with tarfile.open(tp, 'r:gz') as tf:
                self.assertEqual(tf.getnames(), [
                    'Dockerfile',
                    'topsrcdir/extra/file0',
                    'topsrcdir/extra/file1',
                    'topsrcdir/extra/file2',
                    'topsrcdir/file0',
                ])
        finally:
            shutil.rmtree(tmp)
Beispiel #30
0
def fill_template(config, tasks):
    available_packages = set()
    for task in config.kind_dependencies_tasks:
        if task.kind != "packages":
            continue
        name = task.label.replace("packages-", "")
        available_packages.add(name)

    context_hashes = {}

    tasks = list(tasks)

    if not taskgraph.fast and config.write_artifacts:
        if not os.path.isdir(CONTEXTS_DIR):
            os.makedirs(CONTEXTS_DIR)

    for task in tasks:
        image_name = task.pop("name")
        job_symbol = task.pop("symbol", None)
        args = task.pop("args", {})
        definition = task.pop("definition", image_name)
        packages = task.pop("packages", [])
        parent = task.pop("parent", None)

        for p in packages:
            if p not in available_packages:
                raise Exception(
                    "Missing package job for {}-{}: {}".format(
                        config.kind, image_name, p
                    )
                )

        if not taskgraph.fast:
            context_path = os.path.join("taskcluster", "docker", definition)
            topsrcdir = os.path.dirname(config.graph_config.taskcluster_yml)
            if config.write_artifacts:
                context_file = os.path.join(CONTEXTS_DIR, f"{image_name}.tar.gz")
                logger.info(f"Writing {context_file} for docker image {image_name}")
                context_hash = create_context_tar(
                    topsrcdir,
                    context_path,
                    context_file,
                    args,
                )
            else:
                context_hash = generate_context_hash(topsrcdir, context_path, args)
        else:
            if config.write_artifacts:
                raise Exception("Can't write artifacts if `taskgraph.fast` is set.")
            context_hash = "0" * 40
        digest_data = [context_hash]
        digest_data += [json.dumps(args, sort_keys=True)]
        context_hashes[image_name] = context_hash

        description = "Build the docker image {} for use by dependent tasks".format(
            image_name
        )

        args["DOCKER_IMAGE_PACKAGES"] = " ".join(f"<{p}>" for p in packages)

        # Adjust the zstandard compression level based on the execution level.
        # We use faster compression for level 1 because we care more about
        # end-to-end times. We use slower/better compression for other levels
        # because images are read more often and it is worth the trade-off to
        # burn more CPU once to reduce image size.
        zstd_level = "3" if int(config.params["level"]) == 1 else "10"

        # include some information that is useful in reconstructing this task
        # from JSON
        taskdesc = {
            "label": "build-docker-image-" + image_name,
            "description": description,
            "attributes": {
                "image_name": image_name,
                "artifact_prefix": "public",
            },
            "expires-after": "28 days" if config.params.is_try() else "1 year",
            "scopes": [],
            "run-on-projects": [],
            "worker-type": "images",
            "worker": {
                "implementation": "docker-worker",
                "os": "linux",
                "artifacts": [
                    {
                        "type": "file",
                        "path": "/workspace/image.tar.zst",
                        "name": "public/image.tar.zst",
                    }
                ],
                "env": {
                    "CONTEXT_TASK_ID": {"task-reference": "<decision>"},
                    "CONTEXT_PATH": "public/docker-contexts/{}.tar.gz".format(
                        image_name
                    ),
                    "HASH": context_hash,
                    "PROJECT": config.params["project"],
                    "IMAGE_NAME": image_name,
                    "DOCKER_IMAGE_ZSTD_LEVEL": zstd_level,
                    "DOCKER_BUILD_ARGS": {
                        "task-reference": json.dumps(args),
                    },
                    "VCS_BASE_REPOSITORY": config.params["base_repository"],
                    "VCS_HEAD_REPOSITORY": config.params["head_repository"],
                    "VCS_HEAD_REV": config.params["head_rev"],
                    "VCS_REPOSITORY_TYPE": config.params["repository_type"],
                },
                "chain-of-trust": True,
                "max-run-time": 7200,
            },
        }
        if "index" in task:
            taskdesc["index"] = task["index"]
        if job_symbol:
            taskdesc["treeherder"] = {
                "symbol": job_symbol,
                "platform": "taskcluster-images/opt",
                "kind": "other",
                "tier": 1,
            }

        worker = taskdesc["worker"]

        worker["docker-image"] = IMAGE_BUILDER_IMAGE
        digest_data.append(f"image-builder-image:{IMAGE_BUILDER_IMAGE}")

        if packages:
            deps = taskdesc.setdefault("dependencies", {})
            for p in sorted(packages):
                deps[p] = f"packages-{p}"

        if parent:
            deps = taskdesc.setdefault("dependencies", {})
            deps["parent"] = f"build-docker-image-{parent}"
            worker["env"]["PARENT_TASK_ID"] = {
                "task-reference": "<parent>",
            }

        if task.get("cache", True) and not taskgraph.fast:
            taskdesc["cache"] = {
                "type": "docker-images.v2",
                "name": image_name,
                "digest-data": digest_data,
            }

        yield taskdesc
Beispiel #31
0
    def load_tasks(cls, kind, path, config, params, loaded_tasks):
        pushdate = time.strftime('%Y%m%d%H%M%S',
                                 time.gmtime(params['pushdate']))

        parameters = {
            'pushlog_id':
            params.get('pushlog_id', 0),
            'pushdate':
            pushdate,
            'pushtime':
            pushdate[8:],
            'year':
            pushdate[0:4],
            'month':
            pushdate[4:6],
            'day':
            pushdate[6:8],
            'project':
            params['project'],
            'docker_image':
            docker_image,
            'base_repository':
            params['base_repository'] or params['head_repository'],
            'head_repository':
            params['head_repository'],
            'head_ref':
            params['head_ref'] or params['head_rev'],
            'head_rev':
            params['head_rev'],
            'owner':
            params['owner'],
            'level':
            params['level'],
            'source':
            '{repo}file/{rev}/taskcluster/ci/docker-image/image.yml'.format(
                repo=params['head_repository'], rev=params['head_rev']),
        }

        tasks = []
        templates = Templates(path)
        for image_name in config['images']:
            context_path = os.path.join('testing', 'docker', image_name)

            image_parameters = dict(parameters)
            image_parameters['context_path'] = context_path
            image_parameters['artifact_path'] = 'public/image.tar'
            image_parameters['image_name'] = image_name

            image_artifact_path = \
                "public/docker_image_contexts/{}/context.tar.gz".format(image_name)
            if os.environ.get('TASK_ID'):
                # We put image context tar balls in a different artifacts folder
                # on the Gecko decision task in order to have longer expiration
                # dates for smaller artifacts.
                destination = os.path.join(
                    os.environ['HOME'],
                    "docker_image_contexts/{}/context.tar.gz".format(
                        image_name))
                image_parameters['context_url'] = ARTIFACT_URL.format(
                    os.environ['TASK_ID'], image_artifact_path)

                destination = os.path.abspath(destination)
                if not os.path.exists(os.path.dirname(destination)):
                    os.makedirs(os.path.dirname(destination))

                context_hash = create_context_tar(GECKO, context_path,
                                                  destination, image_name)
            else:
                # skip context generation since this isn't a decision task
                # TODO: generate context tarballs using subdirectory clones in
                # the image-building task so we don't have to worry about this.
                image_parameters[
                    'context_url'] = 'file:///tmp/' + image_artifact_path
                context_hash = generate_context_hash(GECKO, context_path,
                                                     image_name)

            image_parameters['context_hash'] = context_hash

            image_task = templates.load('image.yml', image_parameters)

            attributes = {'image_name': image_name}

            # As an optimization, if the context hash exists for mozilla-central, that image
            # task ID will be used.  The reasoning behind this is that eventually everything ends
            # up on mozilla-central at some point if most tasks use this as a common image
            # for a given context hash, a worker within Taskcluster does not need to contain
            # the same image per branch.
            index_paths = [
                'docker.images.v1.{}.{}.hash.{}'.format(
                    project, image_name, context_hash)
                for project in ['mozilla-central', params['project']]
            ]

            tasks.append(
                cls(kind,
                    'build-docker-image-' + image_name,
                    task=image_task['task'],
                    attributes=attributes,
                    index_paths=index_paths))

        return tasks
Beispiel #32
0
def fill_template(config, tasks):
    if not taskgraph.fast and config.write_artifacts:
        if not os.path.isdir(CONTEXTS_DIR):
            os.makedirs(CONTEXTS_DIR)

    for task in tasks:
        image_name = task.pop('name')
        job_symbol = task.pop('symbol')
        args = task.pop('args', {})
        packages = task.pop('packages', [])
        parent = task.pop('parent', None)

        for p in packages:
            if "packages-{}".format(p) not in config.kind_dependencies_tasks:
                raise Exception('Missing package job for {}-{}: {}'.format(
                    config.kind, image_name, p))

        if not taskgraph.fast:
            context_path = mozpath.relpath(image_path(image_name), GECKO)
            if config.write_artifacts:
                context_file = os.path.join(CONTEXTS_DIR,
                                            '{}.tar.gz'.format(image_name))
                logger.info("Writing {} for docker image {}".format(
                    context_file, image_name))
                context_hash = create_context_tar(GECKO, context_path,
                                                  context_file, image_name,
                                                  args)
            else:
                context_hash = generate_context_hash(GECKO, context_path,
                                                     image_name, args)
        else:
            if config.write_artifacts:
                raise Exception(
                    "Can't write artifacts if `taskgraph.fast` is set.")
            context_hash = '0' * 40
        digest_data = [context_hash]
        digest_data += [json.dumps(args, sort_keys=True)]

        description = 'Build the docker image {} for use by dependent tasks'.format(
            image_name)

        args['DOCKER_IMAGE_PACKAGES'] = ' '.join('<{}>'.format(p)
                                                 for p in packages)

        # Adjust the zstandard compression level based on the execution level.
        # We use faster compression for level 1 because we care more about
        # end-to-end times. We use slower/better compression for other levels
        # because images are read more often and it is worth the trade-off to
        # burn more CPU once to reduce image size.
        zstd_level = '3' if int(config.params['level']) == 1 else '10'

        # include some information that is useful in reconstructing this task
        # from JSON
        taskdesc = {
            'label': '{}-{}'.format(config.kind, image_name),
            'description': description,
            'attributes': {
                'image_name': image_name,
                'artifact_prefix': 'public',
            },
            'expires-after': '28 days' if config.params.is_try() else '1 year',
            'scopes': [],
            'treeherder': {
                'symbol': job_symbol,
                'platform': 'taskcluster-images/opt',
                'kind': 'other',
                'tier': 1,
            },
            'run-on-projects': [],
            'worker-type': 'images',
            'worker': {
                'implementation':
                'docker-worker',
                'os':
                'linux',
                'artifacts': [{
                    'type': 'file',
                    'path': '/workspace/image.tar.zst',
                    'name': 'public/image.tar.zst',
                }],
                'env': {
                    'CONTEXT_TASK_ID': {
                        'task-reference': "<decision>"
                    },
                    'CONTEXT_PATH':
                    "public/docker-contexts/{}.tar.gz".format(image_name),
                    'HASH':
                    context_hash,
                    'PROJECT':
                    config.params['project'],
                    'IMAGE_NAME':
                    image_name,
                    'DOCKER_IMAGE_ZSTD_LEVEL':
                    zstd_level,
                    'DOCKER_BUILD_ARGS': {
                        'task-reference': six.ensure_text(json.dumps(args))
                    },
                    'GECKO_BASE_REPOSITORY':
                    config.params['base_repository'],
                    'GECKO_HEAD_REPOSITORY':
                    config.params['head_repository'],
                    'GECKO_HEAD_REV':
                    config.params['head_rev'],
                },
                'chain-of-trust':
                True,
                'max-run-time':
                7200,
                # FIXME: We aren't currently propagating the exit code
            },
        }
        # Retry for 'funsize-update-generator' if exit status code is -1
        if image_name in ['funsize-update-generator']:
            taskdesc['worker']['retry-exit-status'] = [-1]

        worker = taskdesc['worker']

        if image_name == 'image_builder':
            worker['docker-image'] = IMAGE_BUILDER_IMAGE
            digest_data.append(
                "image-builder-image:{}".format(IMAGE_BUILDER_IMAGE))
        else:
            worker['docker-image'] = {'in-tree': 'image_builder'}
            deps = taskdesc.setdefault('dependencies', {})
            deps['docker-image'] = '{}-image_builder'.format(config.kind)

        if packages:
            deps = taskdesc.setdefault('dependencies', {})
            for p in sorted(packages):
                deps[p] = 'packages-{}'.format(p)

        if parent:
            deps = taskdesc.setdefault('dependencies', {})
            deps['parent'] = '{}-{}'.format(config.kind, parent)
            worker['env']['PARENT_TASK_ID'] = {
                'task-reference': '<parent>',
            }
        if 'index' in task:
            taskdesc['index'] = task['index']

        if task.get('cache', True) and not taskgraph.fast:
            taskdesc['cache'] = {
                'type': 'docker-images.v2',
                'name': image_name,
                'digest-data': digest_data,
            }

        yield taskdesc
Beispiel #33
0
    def load_tasks(cls, kind, path, config, params, loaded_tasks):
        # TODO: make this match the pushdate (get it from a parameter rather than vcs)
        pushdate = time.strftime('%Y%m%d%H%M%S', time.gmtime())

        parameters = {
            'pushlog_id': params.get('pushlog_id', 0),
            'pushdate': pushdate,
            'pushtime': pushdate[8:],
            'year': pushdate[0:4],
            'month': pushdate[4:6],
            'day': pushdate[6:8],
            'project': params['project'],
            'docker_image': docker_image,
            'base_repository': params['base_repository'] or params['head_repository'],
            'head_repository': params['head_repository'],
            'head_ref': params['head_ref'] or params['head_rev'],
            'head_rev': params['head_rev'],
            'owner': params['owner'],
            'level': params['level'],
            'source': '{repo}file/{rev}/taskcluster/ci/docker-image/image.yml'
                      .format(repo=params['head_repository'], rev=params['head_rev']),
        }

        tasks = []
        templates = Templates(path)
        for image_name in config['images']:
            context_path = os.path.join('testing', 'docker', image_name)

            image_parameters = dict(parameters)
            image_parameters['context_path'] = context_path
            image_parameters['artifact_path'] = 'public/image.tar'
            image_parameters['image_name'] = image_name

            image_artifact_path = \
                "public/decision_task/image_contexts/{}/context.tar.gz".format(image_name)
            if os.environ.get('TASK_ID'):
                destination = os.path.join(
                    os.environ['HOME'],
                    "artifacts/decision_task/image_contexts/{}/context.tar.gz".format(image_name))
                image_parameters['context_url'] = ARTIFACT_URL.format(
                    os.environ['TASK_ID'], image_artifact_path)

                destination = os.path.abspath(destination)
                if not os.path.exists(os.path.dirname(destination)):
                    os.makedirs(os.path.dirname(destination))

                context_hash = create_context_tar(GECKO, context_path,
                                                  destination, image_name)
            else:
                # skip context generation since this isn't a decision task
                # TODO: generate context tarballs using subdirectory clones in
                # the image-building task so we don't have to worry about this.
                image_parameters['context_url'] = 'file:///tmp/' + image_artifact_path
                context_hash = generate_context_hash(GECKO, context_path, image_name)

            image_parameters['context_hash'] = context_hash

            image_task = templates.load('image.yml', image_parameters)

            attributes = {'image_name': image_name}

            # As an optimization, if the context hash exists for mozilla-central, that image
            # task ID will be used.  The reasoning behind this is that eventually everything ends
            # up on mozilla-central at some point if most tasks use this as a common image
            # for a given context hash, a worker within Taskcluster does not need to contain
            # the same image per branch.
            index_paths = ['docker.images.v1.{}.{}.hash.{}'.format(
                                project, image_name, context_hash)
                           for project in ['mozilla-central', params['project']]]

            tasks.append(cls(kind, 'build-docker-image-' + image_name,
                             task=image_task['task'], attributes=attributes,
                             index_paths=index_paths))

        return tasks
Beispiel #34
0
def fill_template(config, tasks):
    if not taskgraph.fast and config.write_artifacts:
        if not os.path.isdir(CONTEXTS_DIR):
            os.makedirs(CONTEXTS_DIR)

    for task in tasks:
        image_name = task.pop("name")
        job_symbol = task.pop("symbol")
        args = task.pop("args", {})
        packages = task.pop("packages", [])
        parent = task.pop("parent", None)

        for p in packages:
            if "packages-{}".format(p) not in config.kind_dependencies_tasks:
                raise Exception("Missing package job for {}-{}: {}".format(
                    config.kind, image_name, p))

        if not taskgraph.fast:
            context_path = mozpath.relpath(image_path(image_name), GECKO)
            if config.write_artifacts:
                context_file = os.path.join(CONTEXTS_DIR,
                                            "{}.tar.gz".format(image_name))
                logger.info("Writing {} for docker image {}".format(
                    context_file, image_name))
                context_hash = create_context_tar(GECKO, context_path,
                                                  context_file, image_name,
                                                  args)
            else:
                context_hash = generate_context_hash(GECKO, context_path,
                                                     image_name, args)
        else:
            if config.write_artifacts:
                raise Exception(
                    "Can't write artifacts if `taskgraph.fast` is set.")
            context_hash = "0" * 40
        digest_data = [context_hash]
        digest_data += [json.dumps(args, sort_keys=True)]

        description = "Build the docker image {} for use by dependent tasks".format(
            image_name)

        args["DOCKER_IMAGE_PACKAGES"] = " ".join("<{}>".format(p)
                                                 for p in packages)

        # Adjust the zstandard compression level based on the execution level.
        # We use faster compression for level 1 because we care more about
        # end-to-end times. We use slower/better compression for other levels
        # because images are read more often and it is worth the trade-off to
        # burn more CPU once to reduce image size.
        zstd_level = "3" if int(config.params["level"]) == 1 else "10"

        # include some information that is useful in reconstructing this task
        # from JSON
        taskdesc = {
            "label": "{}-{}".format(config.kind, image_name),
            "description": description,
            "attributes": {
                "image_name": image_name,
                "artifact_prefix": "public",
            },
            "expires-after": "1 year",
            "scopes": [],
            "treeherder": {
                "symbol": job_symbol,
                "platform": "taskcluster-images/opt",
                "kind": "other",
                "tier": 1,
            },
            "run-on-projects": [],
            "worker-type": "images",
            "worker": {
                "implementation":
                "docker-worker",
                "os":
                "linux",
                "artifacts": [{
                    "type": "file",
                    "path": "/workspace/image.tar.zst",
                    "name": "public/image.tar.zst",
                }],
                "env": {
                    "CONTEXT_TASK_ID": {
                        "task-reference": "<decision>"
                    },
                    "CONTEXT_PATH":
                    "public/docker-contexts/{}.tar.gz".format(image_name),
                    "HASH":
                    context_hash,
                    "PROJECT":
                    config.params["project"],
                    "IMAGE_NAME":
                    image_name,
                    "DOCKER_IMAGE_ZSTD_LEVEL":
                    zstd_level,
                    "DOCKER_BUILD_ARGS": {
                        "task-reference": six.ensure_text(json.dumps(args))
                    },
                    "GECKO_BASE_REPOSITORY":
                    config.params["base_repository"],
                    "GECKO_HEAD_REPOSITORY":
                    config.params["head_repository"],
                    "GECKO_HEAD_REV":
                    config.params["head_rev"],
                },
                "chain-of-trust":
                True,
                "max-run-time":
                7200,
                # FIXME: We aren't currently propagating the exit code
            },
        }
        # Retry for 'funsize-update-generator' if exit status code is -1
        if image_name in ["funsize-update-generator"]:
            taskdesc["worker"]["retry-exit-status"] = [-1]

        worker = taskdesc["worker"]

        if image_name == "image_builder":
            worker["docker-image"] = IMAGE_BUILDER_IMAGE
            digest_data.append(
                "image-builder-image:{}".format(IMAGE_BUILDER_IMAGE))
        else:
            worker["docker-image"] = {"in-tree": "image_builder"}
            deps = taskdesc.setdefault("dependencies", {})
            deps["docker-image"] = "{}-image_builder".format(config.kind)

        if packages:
            deps = taskdesc.setdefault("dependencies", {})
            for p in sorted(packages):
                deps[p] = "packages-{}".format(p)

        if parent:
            deps = taskdesc.setdefault("dependencies", {})
            deps["parent"] = "{}-{}".format(config.kind, parent)
            worker["env"]["PARENT_TASK_ID"] = {
                "task-reference": "<parent>",
            }
        if "index" in task:
            taskdesc["index"] = task["index"]

        if task.get("cache", True) and not taskgraph.fast:
            taskdesc["cache"] = {
                "type": "docker-images.v2",
                "name": image_name,
                "digest-data": digest_data,
            }

        yield taskdesc