def test_normalizeimage(self): images = [{ 'Created': '2015-05-01T12:34:56.789012345Z', 'Id': '0123456789ABCDEFfedcba98765432100123456789abcdefFEDCBA9876543210', 'Parent': 'FEDCBA98765432100123456789abcdeffedcba98765432100123456789ABCDEF', 'RepoTags': ['<none>:<none>'], 'Size': 0, 'VirtualSize': 0, }, { 'Created': 1431216000, 'Id': 'fedcba9876543210FEDCBA98765432100123456789abcdef0123456789ABCDEF', 'ParentId': '0123456789abcdef0123456789ABCDEFfedcba9876543210FEDCBA9876543210', 'RepoTags': ['foo:bar', 'baz:latest'], 'Size': 0, 'VirtualSize': 0, }] normalized_images = [{ ':created_dt': datetime(2015, 5, 1, 12, 34, 56, 789012, TZ_UTC), ':id': '0123456789abcdeffedcba98765432100123456789abcdeffedcba9876543210', ':parent_id': 'fedcba98765432100123456789abcdeffedcba98765432100123456789abcdef', ':short_id': '0123456789ab', ':repo_tags': [], }, { ':created_dt': datetime(2015, 5, 10, 0, 0, 0, 0, TZ_UTC), ':id': 'fedcba9876543210fedcba98765432100123456789abcdef0123456789abcdef', ':parent_id': '0123456789abcdef0123456789abcdeffedcba9876543210fedcba9876543210', ':short_id': 'fedcba987654', ':repo_tags': ['foo:bar', 'baz', 'baz:latest'], }] for i, ni in zip(images, normalized_images): ni.update(i) normalized_image = normalizeimage(i, copy=True) self.assertIsNot(normalized_image, i) denormalized_image = denormalizeimage(normalized_image, copy=True) self.assertIsNot(denormalized_image, normalized_image) self.assertNotEqual(normalized_image, i) self.assertEqual(normalized_image, ni) self.assertNotEqual(denormalized_image, ni) self.assertEqual(denormalized_image, i) image = deepcopy(images[0]) self.assertIs(normalizeimage(image), image) self.assertIs(denormalizeimage(image), image)
def test_normalizeimage(self): images = [ { 'Created': '2015-05-01T12:34:56.789012345Z', 'Id': '0123456789ABCDEFfedcba98765432100123456789abcdefFEDCBA9876543210', 'Parent': 'FEDCBA98765432100123456789abcdeffedcba98765432100123456789ABCDEF', 'RepoTags': [ '<none>:<none>' ], 'Size': 0, 'VirtualSize': 0, }, { 'Created': 1431216000, 'Id': 'fedcba9876543210FEDCBA98765432100123456789abcdef0123456789ABCDEF', 'ParentId': '0123456789abcdef0123456789ABCDEFfedcba9876543210FEDCBA9876543210', 'RepoTags': [ 'foo:bar', 'baz:latest' ], 'Size': 0, 'VirtualSize': 0, } ] normalized_images = [ { ':created_dt': datetime(2015, 5, 1, 12, 34, 56, 789012, TZ_UTC), ':id': '0123456789abcdeffedcba98765432100123456789abcdeffedcba9876543210', ':parent_id': 'fedcba98765432100123456789abcdeffedcba98765432100123456789abcdef', ':short_id': '0123456789ab', ':repo_tags': [], }, { ':created_dt': datetime(2015, 5, 10, 0, 0, 0, 0, TZ_UTC), ':id': 'fedcba9876543210fedcba98765432100123456789abcdef0123456789abcdef', ':parent_id': '0123456789abcdef0123456789abcdeffedcba9876543210fedcba9876543210', ':short_id': 'fedcba987654', ':repo_tags': [ 'foo:bar', 'baz', 'baz:latest' ], } ] for i, ni in zip(images, normalized_images): ni.update(i) normalized_image = normalizeimage(i, copy=True) self.assertIsNot(normalized_image, i) denormalized_image = denormalizeimage(normalized_image, copy=True) self.assertIsNot(denormalized_image, normalized_image) self.assertNotEqual(normalized_image, i) self.assertEqual(normalized_image, ni) self.assertNotEqual(denormalized_image, ni) self.assertEqual(denormalized_image, i) image = deepcopy(images[0]) self.assertIs(normalizeimage(image), image) self.assertIs(denormalizeimage(image), image)
def get_image(self, image): if not image: raise APIError(HTTPError('500 Server Error'), None, explanation='Usage: image_export IMAGE [IMAGE...]') layers = [] next_layer_id = image while next_layer_id: layer = normalizeimage(self._findlayer(next_layer_id), copy=True) layers.append(layer) next_layer_id = layers[-1][':parent_id'] image_file = BytesIO() mtime = time() with tarfile_open(mode='w', fileobj=image_file) as image_tar_file: for layer in layers: ti_dir = TarInfo(layer[':id']) ti_dir.mtime = mtime ti_dir.mode = 0o755 ti_dir.type = DIRTYPE image_tar_file.addfile(ti_dir) layer_tar_src_path = ospath_join(self._my_dir, 'data', layer[':short_id'], 'layer.tar') with open(layer_tar_src_path, 'rb') as layer_tar_src_file: layer_tar_dst_path = '{}/layer.tar'.format(layer[':id']) ti_layer = image_tar_file.gettarinfo( layer_tar_src_path, layer_tar_dst_path) ti_layer.mtime = mtime ti_layer.mode = 0o644 ti_layer.uid = ti_layer.gid = 0 ti_layer.uname = ti_layer.gname = '' image_tar_file.addfile(ti_layer, fileobj=layer_tar_src_file) image_file.seek(0) return image_file
def get_image(self, image): if not image: raise APIError(HTTPError('500 Server Error'), None, explanation='Usage: image_export IMAGE [IMAGE...]') layers = [] next_layer_id = image while next_layer_id: layer = normalizeimage(self._findlayer(next_layer_id), copy=True) layers.append(layer) next_layer_id = layers[-1][':parent_id'] image_file = BytesIO() mtime = time() with tarfile_open(mode='w', fileobj=image_file) as image_tar_file: for layer in layers: ti_dir = TarInfo(layer[':id']) ti_dir.mtime = mtime ti_dir.mode = 0o755 ti_dir.type = DIRTYPE image_tar_file.addfile(ti_dir) layer_tar_src_path = ospath_join(self._my_dir, 'data', layer[':short_id'], 'layer.tar') with open(layer_tar_src_path, 'rb') as layer_tar_src_file: layer_tar_dst_path = '{}/layer.tar'.format(layer[':id']) ti_layer = image_tar_file.gettarinfo(layer_tar_src_path, layer_tar_dst_path) ti_layer.mtime = mtime ti_layer.mode = 0o644 ti_layer.uid = ti_layer.gid = 0 ti_layer.uname = ti_layer.gname = '' image_tar_file.addfile(ti_layer, fileobj=layer_tar_src_file) image_file.seek(0) return image_file
def __init__(self, always_raise=None): super().__init__() self._always_raise = always_raise self._my_dir = dirname(getframeinfo(currentframe()).filename) self.layers = [] self.layers_by_id = {} self.layers_by_tag = {} num_paths = len( FauxDockerClient.SHORT_IDS_BY_PATH) # should not exceed 0x100 path_depth = len( FauxDockerClient.SHORT_IDS_BY_PATH[0]) # should not exceed 0x100 max_path_idx = path_depth - 1 for j in range(num_paths): for i in range(path_depth): layer_sig = '{:02x}{:02x}'.format(j, i) layer_id = sha256(sha256(unhexlify(layer_sig * 16)).digest()).hexdigest() short_id = layer_id[:8] + layer_sig layer_id = short_id + layer_id[12:60] + layer_sig last_layer_id = '' if i == 0 else self.layers[-1]['Id'] layer_tar_src_path = ospath_join(self._my_dir, 'data', layer_id[:12], 'layer.tar') try: # This isn't quite right, but it doesn't matter for # our purposes layer_tar_src_stat = stat(layer_tar_src_path) layer_size = layer_tar_src_stat.st_size except OSError: layer_size = 0 self.layers.append({ 'Created': '2015-04-{:02d}T{:02d}:00:{:02d}.000000000Z'.format( i + 10, i, j), 'Id': layer_id, 'Parent': last_layer_id, 'Size': layer_size, 'RepoTags': ['<none>:<none>'], 'VirtualSize': layer_size if i == 0 else layer_size + self.layers[-1]['Size'], }) assert short_id == FauxDockerClient.SHORT_IDS_BY_PATH[j][ max_path_idx - i] self.layers[max_path_idx]['RepoTags'] = ['getto:dachoppa'] self.layers[-1]['RepoTags'] = ['greatest:hits', 'greatest:latest'] for layer in self.layers: normalizeimage(layer) self.layers_by_id[layer[':id']] = layer self.layers_by_id[layer[':short_id']] = layer for repo_tag in layer[':repo_tags']: self.layers_by_tag[repo_tag] = layer for k, v in iteritems(self.layers_by_id): assert k == v['Id'][:len(k)].lower() assert self.layers_by_tag['getto:dachoppa'] == self.layers[ max_path_idx] assert self.layers_by_tag['getto:dachoppa'][ 'Id'][:12] == FauxDockerClient.SHORT_IDS_BY_PATH[0][0] assert self.layers_by_tag['greatest'] == self.layers[-1] assert self.layers_by_tag['greatest:latest'] == self.layers[-1] assert self.layers_by_tag['greatest:hits'] == self.layers[-1] assert self.layers_by_tag['greatest:hits'][ 'Id'][:12] == FauxDockerClient.SHORT_IDS_BY_PATH[1][0] self.layers.sort(key=imagekey, reverse=True)
def __init__(self, always_raise=None): super().__init__() self._always_raise = always_raise self._my_dir = dirname(getframeinfo(currentframe()).filename) self.layers = [] self.layers_by_id = {} self.layers_by_tag = {} num_paths = len(FauxDockerClient.SHORT_IDS_BY_PATH) # should not exceed 0x100 path_depth = len(FauxDockerClient.SHORT_IDS_BY_PATH[0]) # should not exceed 0x100 max_path_idx = path_depth - 1 for j in range(num_paths): for i in range(path_depth): layer_sig = '{:02x}{:02x}'.format(j, i) layer_id = sha256(sha256(unhexlify(layer_sig * 16)).digest()).hexdigest() short_id = layer_id[:8] + layer_sig layer_id = short_id + layer_id[12:60] + layer_sig last_layer_id = '' if i == 0 else self.layers[-1]['Id'] layer_tar_src_path = ospath_join(self._my_dir, 'data', layer_id[:12], 'layer.tar') try: # This isn't quite right, but it doesn't matter for # our purposes layer_tar_src_stat = stat(layer_tar_src_path) layer_size = layer_tar_src_stat.st_size except OSError: layer_size = 0 self.layers.append({ 'Created': '2015-04-{:02d}T{:02d}:00:{:02d}.000000000Z'.format(i + 10, i, j), 'Id': layer_id, 'Parent': last_layer_id, 'Size': layer_size, 'RepoTags': [ '<none>:<none>' ], 'VirtualSize': layer_size if i == 0 else layer_size + self.layers[-1]['Size'], }) assert short_id == FauxDockerClient.SHORT_IDS_BY_PATH[j][max_path_idx - i] self.layers[max_path_idx]['RepoTags'] = [ 'getto:dachoppa' ] self.layers[-1]['RepoTags'] = [ 'greatest:hits', 'greatest:latest' ] for layer in self.layers: normalizeimage(layer) self.layers_by_id[layer[':id']] = layer self.layers_by_id[layer[':short_id']] = layer for repo_tag in layer[':repo_tags']: self.layers_by_tag[repo_tag] = layer for k, v in iteritems(self.layers_by_id): assert k == v['Id'][:len(k)].lower() assert self.layers_by_tag['getto:dachoppa'] == self.layers[max_path_idx] assert self.layers_by_tag['getto:dachoppa']['Id'][:12] == FauxDockerClient.SHORT_IDS_BY_PATH[0][0] assert self.layers_by_tag['greatest'] == self.layers[-1] assert self.layers_by_tag['greatest:latest'] == self.layers[-1] assert self.layers_by_tag['greatest:hits'] == self.layers[-1] assert self.layers_by_tag['greatest:hits']['Id'][:12] == FauxDockerClient.SHORT_IDS_BY_PATH[1][0] self.layers.sort(key=imagekey, reverse=True)