def testAddingDirectoriesForFileManually(self): with archive.TarFileWriter(self.tempfile) as f: f.add_file("d", tarfile.DIRTYPE) f.add_file("d/f") f.add_file("a", tarfile.DIRTYPE) f.add_file("a/b", tarfile.DIRTYPE) f.add_file("a/b", tarfile.DIRTYPE) f.add_file("a/b/", tarfile.DIRTYPE) f.add_file("a/b/c/f") f.add_file("x/y/f") f.add_file("x", tarfile.DIRTYPE) content = [ { "name": ".", "mode": 0o755 }, { "name": "./d", "mode": 0o755 }, { "name": "./d/f" }, { "name": "./a", "mode": 0o755 }, { "name": "./a/b", "mode": 0o755 }, { "name": "./a/b/c", "mode": 0o755 }, { "name": "./a/b/c/f" }, { "name": "./x", "mode": 0o755 }, { "name": "./x/y", "mode": 0o755 }, { "name": "./x/y/f" }, ] self.assertTarFileContent(self.tempfile, content)
def testChangingRootDirectory(self): with archive.TarFileWriter(self.tempfile, root_directory="root") as f: f.add_file("d", tarfile.DIRTYPE) f.add_file("d/f") f.add_file("a", tarfile.DIRTYPE) f.add_file("a/b", tarfile.DIRTYPE) f.add_file("a/b", tarfile.DIRTYPE) f.add_file("a/b/", tarfile.DIRTYPE) f.add_file("a/b/c/f") f.add_file("x/y/f") f.add_file("x", tarfile.DIRTYPE) content = [ { "name": "root", "mode": 0o755 }, { "name": "root/d", "mode": 0o755 }, { "name": "root/d/f" }, { "name": "root/a", "mode": 0o755 }, { "name": "root/a/b", "mode": 0o755 }, { "name": "root/a/b/c", "mode": 0o755 }, { "name": "root/a/b/c/f" }, { "name": "root/x", "mode": 0o755 }, { "name": "root/x/y", "mode": 0o755 }, { "name": "root/x/y/f" }, ] self.assertTarFileContent(self.tempfile, content)
def assertSimpleFileContent(self, names): with archive.TarFileWriter(self.tempfile) as f: for n in names: f.add_file(n, content=n) content = ([{ "name": "." }] + [{ "name": n, "size": len(n.encode("utf-8")), "data": n.encode("utf-8") } for n in names]) self.assertTarFileContent(self.tempfile, content)
def create_image(output, layers, repositories=None): """Creates a Docker image from a list of layers. Args: output: the name of the docker image file to create. layers: the layers (tar files) to join to the image. repositories: the repositories two-level dictionary, which is keyed by repo names at the top-level, and tag names at the second level pointing to layer ids. """ with archive.TarFileWriter(output) as tar: _create_image(tar, layers, repositories)
def testDottedFiles(self): with archive.TarFileWriter(self.tempfile) as f: f.add_file("a") f.add_file("/b") f.add_file("./c") f.add_file("./.d") f.add_file("..e") f.add_file(".f") content = [ {"name": "."}, {"name": "./a"}, {"name": "/b"}, {"name": "./c"}, {"name": "./.d"}, {"name": "./..e"}, {"name": "./.f"} ] self.assertTarFileContent(self.tempfile, content)
def create_image(output, identifier, base=None, layer=None, metadata=None, name=None, repository=None): """Creates a Docker image. Args: output: the name of the docker image file to create. identifier: the identifier of the top layer for this image. base: a base layer (optional) to merge to current layer. layer: the layer content (a tar file). metadata: the json metadata file for the top layer. name: symbolic name for this docker image. repository: repository name for this docker image. """ tar = archive.TarFileWriter(output) # Write our id to 'top' as we are now the topmost layer. tar.add_file('top', content=identifier) # Each layer is encoded as a directory in the larger tarball of the form: # {id}\ # layer.tar # VERSION # json # Create the directory for us to now fill in. tar.add_file(identifier + '/', tarfile.DIRTYPE) # VERSION generally seems to contain 1.0, not entirely sure # what the point of this is. tar.add_file(identifier + '/VERSION', content=DATA_FORMAT_VERSION) # Add the layer file tar.add_file(identifier + '/layer.tar', file_content=layer) # Now the json metadata tar.add_file(identifier + '/json', file_content=metadata) # Merge the base if any if base: tar.add_tar(base, name_filter=_base_name_filter) # In addition to N layers of the form described above, there is # a single file at the top of the image called repositories. # This file contains a JSON blob of the form: # { # 'repo':{ # 'tag-name': 'top-most layer hex', # ... # }, # ... # } if repository: tar.add_file('repositories', content='\n'.join([ '{', ' "%s": {' % repository, ' "%s": "%s"' % (name, identifier), ' }', '}']))
def create_image(output, identifier, layers, config, tags=None, base=None): """Creates a container image. Args: output: the name of the docker image file to create. identifier: the identifier for this image (sha256 of the metadata). layers: the layer content (a sha256 and a tar file). config: the configuration file for the image. tags: tags that apply to this image. base: a base layer (optional) to build on top of. """ tar = archive.TarFileWriter(output) # add the image config referenced by the Config section in the manifest # the name can be anything but docker uses the format below config_file_name = identifier + '.json' tar.add_file(config_file_name, file_content=config) layer_file_names = [] for layer in layers: # layers can be called anything, so just name them by their sha256 layer_file_name = identifier + '/' + layer['name'] + '.tar' layer_file_names.append(layer_file_name) tar.add_file(layer_file_name, file_content=layer['layer']) base_layer_file_names = [] parent = None if base: latest_item = utils.GetLatestManifestFromTar(base) if latest_item: base_layer_file_names = latest_item.get('Layers', []) config_file = latest_item['Config'] parent_search = re.search('^(.+)\\.json$', config_file) if parent_search: parent = parent_search.group(1) manifest_item = { 'Config': config_file_name, 'Layers': base_layer_file_names + layer_file_names, 'RepoTags': tags or [] } if parent: manifest_item['Parent'] = 'sha256:' + parent manifest = [manifest_item] manifest_content = json.dumps(manifest, sort_keys=True) tar.add_file('manifest.json', content=manifest_content)
def testAddingDirectoriesForFileSeparately(self): d_dir = os.path.join(os.environ["TEST_TMPDIR"], "d_dir") os.makedirs(d_dir) with open(os.path.join(d_dir, "dir_file"), "w"): pass a_dir = os.path.join(os.environ["TEST_TMPDIR"], "a_dir") os.makedirs(a_dir) with open(os.path.join(a_dir, "dir_file"), "w"): pass with archive.TarFileWriter(self.tempfile) as f: f.add_dir("d", d_dir) f.add_file("d/f") f.add_dir("a", a_dir) f.add_file("a/b/f") content = [ { "name": ".", "mode": 0o755 }, { "name": "./d", "mode": 0o755 }, { "name": "./d/dir_file" }, { "name": "./d/f" }, { "name": "./a", "mode": 0o755 }, { "name": "./a/dir_file" }, { "name": "./a/b", "mode": 0o755 }, { "name": "./a/b/f" }, ] self.assertTarFileContent(self.tempfile, content)
def testAddingDirectoriesForFile(self): with archive.TarFileWriter(self.tempfile) as f: f.add_file("d/f") content = [ { "name": ".", "mode": 0o755 }, { "name": "./d", "mode": 0o755 }, { "name": "./d/f" }, ] self.assertTarFileContent(self.tempfile, content)
def testMergeTar(self): content = [ { "name": "./a", "data": b"a" }, { "name": "./ab", "data": b"ab" }, ] for ext in ["", ".gz", ".bz2", ".xz"]: with archive.TarFileWriter(self.tempfile) as f: f.add_tar(os.path.join(testenv.TESTDATA_PATH, "tar_test.tar" + ext), name_filter=lambda n: n != "./b") self.assertTarFileContent(self.tempfile, content)
def assemble_image(output, images): """Creates a container image from a list of partial images. Merges all manifests from each image and combine the image tars. Args: output: the name of the container image file to create. images: the images (tar files) to join together. """ manifest = [] tar = archive.TarFileWriter(output) for image in images: tar.add_tar(image, name_filter=_image_filter) manifest += GetManifestFromTar(image) manifest_content = json.dumps(manifest, sort_keys=True) tar.add_file('manifest.json', content=manifest_content)
def create_image(output, layers, identifier=None, name=None, repository=None): """Creates a Docker image from a list of layers. Args: output: the name of the docker image file to create. layers: the layers (tar files) to join to the image. identifier: the identifier of the top layer for this image. name: symbolic name for this docker image. repository: repository name for this docker image. """ manifest = [] tar = archive.TarFileWriter(output) for layer in layers: tar.add_tar(layer, name_filter=_layer_filter) manifest += utils.GetManifestFromTar(layer) manifest_content = json.dumps(manifest, sort_keys=True) tar.add_file('manifest.json', content=manifest_content) # In addition to N layers of the form described above, there might be # a single file at the top of the image called repositories. # This file contains a JSON blob of the form: # { # 'repo':{ # 'tag-name': 'top-most layer hex', # ... # }, # ... # } if identifier: # If the identifier is not provided, then the resulted layer will be # created without a 'top' file. Docker doesn't needs that file nor # the repository to load the image and for intermediate layer, # docker_build store the name of the layer in a separate artifact so # this 'top' file is not needed. tar.add_file('top', content=identifier) if repository and name: tar.add_file( 'repositories', content='\n'.join([ '{', ' "%s": {' % repository, ' "%s": "%s"' % (name, identifier), ' }', '}' ]))
def testAddDir(self): # For some strange reason, ending slash is stripped by the test content = [ {"name": ".", "mode": 0o755}, {"name": "./a", "mode": 0o755}, {"name": "./a/b", "data": b"ab", "mode": 0o644}, {"name": "./a/c", "mode": 0o755}, {"name": "./a/c/d", "data": b"acd", "mode": 0o644}, ] tempdir = os.path.join(os.environ["TEST_TMPDIR"], "test_dir") # Iterate over the `content` array to create the directory # structure it describes. for c in content: if "data" in c: p = os.path.join(tempdir, c["name"][2:]) os.makedirs(os.path.dirname(p)) with open(p, "wb") as f: f.write(c["data"]) with archive.TarFileWriter(self.tempfile) as f: f.add_dir("./", tempdir, mode=0o644) self.assertTarFileContent(self.tempfile, content)
def testMergeTarRelocated(self): content = [ { "name": ".", "mode": 0o755 }, { "name": "./foo", "mode": 0o755 }, { "name": "./foo/a", "data": b"a" }, { "name": "./foo/ab", "data": b"ab" }, ] with archive.TarFileWriter(self.tempfile) as f: f.add_tar(os.path.join(testenv.TESTDATA_PATH, "tar_test.tar"), name_filter=lambda n: n != "./b", root="/foo") self.assertTarFileContent(self.tempfile, content)
def testEmptyTarFile(self): with archive.TarFileWriter(self.tempfile): pass self.assertTarFileContent(self.tempfile, [])
def testPortableMtime(self): with archive.TarFileWriter(self.tempfile, default_mtime="portable") as f: self.assertEqual(f.default_mtime, 946684800)
def testDefaultMtimeProvided(self): with archive.TarFileWriter(self.tempfile, default_mtime=1234) as f: self.assertEqual(f.default_mtime, 1234)
def testDefaultMtimeNotProvided(self): with archive.TarFileWriter(self.tempfile) as f: self.assertEqual(f.default_mtime, 0)
def __enter__(self): self.tarfile = archive.TarFileWriter(self.output, self.compression) return self
def create_image(output, identifier, layers, config, tags=None, base=None, legacy_base=None, metadata_id=None, metadata=None, name=None, repository=None): """Creates a Docker image. Args: output: the name of the docker image file to create. identifier: the identifier for this image (sha256 of the metadata). layers: the layer content (a sha256 and a tar file). config: the configuration file for the image. tags: tags that apply to this image. base: a base layer (optional) to build on top of. legacy_base: a base layer (optional) to build on top of. metadata_id: the identifier of the top layer for this image. metadata: the json metadata file for the top layer. name: symbolic name for this docker image. repository: repository name for this docker image. """ tar = archive.TarFileWriter(output) # add the image config referenced by the Config section in the manifest # the name can be anything but docker uses the format below config_file_name = identifier + '.json' tar.add_file(config_file_name, file_content=config) layer_file_names = [] if metadata_id: # Write our id to 'top' as we are now the topmost layer. tar.add_file('top', content=metadata_id) # Each layer is encoded as a directory in the larger tarball of the form: # {id}\ # layer.tar # VERSION # json # Create the directory for us to now fill in. tar.add_file(metadata_id + '/', tarfile.DIRTYPE) # VERSION generally seems to contain 1.0, not entirely sure # what the point of this is. tar.add_file(metadata_id + '/VERSION', content=DATA_FORMAT_VERSION) # Add the layer file layer_file_name = metadata_id + '/layer.tar' layer_file_names.append(layer_file_name) tar.add_file(layer_file_name, file_content=layers[0]['layer']) # Now the json metadata tar.add_file(metadata_id + '/json', file_content=metadata) # Merge the base if any if legacy_base: tar.add_tar(legacy_base, name_filter=_base_name_filter) else: for layer in layers: # layers can be called anything, so just name them by their sha256 layer_file_name = identifier + '/' + layer['name'] + '.tar' layer_file_names.append(layer_file_name) tar.add_file(layer_file_name, file_content=layer['layer']) base_layer_file_names = [] parent = None if base: latest_item = utils.GetLatestManifestFromTar(base) if latest_item: base_layer_file_names = latest_item.get('Layers', []) config_file = latest_item['Config'] parent_search = re.search('^(.+)\\.json$', config_file) if parent_search: parent = parent_search.group(1) manifest_item = { 'Config': config_file_name, 'Layers': base_layer_file_names + layer_file_names, 'RepoTags': tags or [] } if parent: manifest_item['Parent'] = 'sha256:' + parent manifest = [manifest_item] manifest_content = json.dumps(manifest, sort_keys=True) tar.add_file('manifest.json', content=manifest_content) # In addition to N layers of the form described above, there is # a single file at the top of the image called repositories. # This file contains a JSON blob of the form: # { # 'repo':{ # 'tag-name': 'top-most layer hex', # ... # }, # ... # } if repository: tar.add_file('repositories', content='\n'.join([ '{', ' "%s": {' % repository, ' "%s": "%s"' % (name, identifier), ' }', '}']))
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A simple cross-platform helper to create a timestamped tar file.""" import datetime import sys import tarfile from tools.build_defs.pkg import archive if __name__ == '__main__': mtime = int(datetime.datetime.now().strftime('%s')) with archive.TarFileWriter(sys.argv[1]) as f: f.add_file('./', tarfile.DIRTYPE, uname='root', gname='root', mtime=mtime) f.add_file('./usr/', tarfile.DIRTYPE, uname='root', gname='root', mtime=mtime) f.add_file('./usr/bin/', tarfile.DIRTYPE, uname='root', gname='root', mtime=mtime)
def testPreserveTarMtimesFalse(self): with archive.TarFileWriter(self.tempfile, preserve_tar_mtimes=False) as f: f.add_tar(os.path.join(testenv.TESTDATA_PATH, "tar_test.tar")) for output_file in f.tar: self.assertEqual(output_file.mtime, 0)
def __enter__(self): self.tarfile = archive.TarFileWriter(self.output, self.compression, self.root_directory) return self
def assertSimpleFileContent(self, names): with archive.TarFileWriter(self.tempfile) as f: for n in names: f.add_file(n, content=n) content = [{"name": n, "size": len(n), "data": n} for n in names] self.assertTarFileContent(self.tempfile, content)