def sort_annotations(self, all_annotations): sorted_digests = {} all_platforms = set(all_annotations) for plat, annotation in all_annotations.items(): for digest in annotation['digests']: hostname = registry_hostname(digest['registry']) media_type = get_manifest_media_type(digest['version']) if not self.valid_media_type(media_type): continue platforms = sorted_digests.setdefault(hostname, {}) repos = platforms.setdefault(plat, []) repos.append(digest) sources = {} for registry in self.registries: hostname = registry_hostname(registry) platforms = sorted_digests.get(hostname, {}) if set(platforms) != all_platforms: raise RuntimeError("Missing platforms for registry {}: found {}, expected {}" .format(registry, sorted(platforms), sorted(all_platforms))) selected_digests = {} for p, repos in platforms.items(): selected_digests[p] = sorted(repos, key=lambda d: d['repository'])[0] sources[registry] = selected_digests return sources
def sort_annotations(self): """ Return a map of maps to look up a single "worker digest" that has information about where to find an image manifest for each registry/architecture combination: worker_digest = <result>[registry][architecture] """ all_annotations = self.workflow.build_result.annotations[ 'worker-builds'] all_platforms = set(all_annotations) if len(all_platforms) == 0: raise RuntimeError("No worker builds found, cannot group them") sorted_digests = {} for plat, annotation in all_annotations.items(): for digest in annotation['digests']: hostname = registry_hostname(digest['registry']) media_type = get_manifest_media_type(digest['version']) if media_type not in self.manifest_media_types: continue platforms = sorted_digests.setdefault(hostname, {}) repos = platforms.setdefault(plat, []) repos.append(digest) sources = {} for registry in self.registries: registry_conf = self.registries[registry] if registry_conf.get('version') == 'v1': continue hostname = registry_hostname(registry) platforms = sorted_digests.get(hostname, {}) if set(platforms) != all_platforms: raise RuntimeError( "Missing platforms for registry {}: found {}, expected {}". format(registry, sorted(platforms), sorted(all_platforms))) selected_digests = {} for p, repos in platforms.items(): selected_digests[p] = sorted(repos, key=lambda d: d['repository'])[0] sources[registry] = selected_digests return sources
def handle_worker_digests(self, session, worker_digests, deleted_digests): registry_noschema = registry_hostname(session.registry) if registry_noschema not in worker_digests: return False # Remove manifest list first to avoid broken lists in case an error occurs self.delete_manifest_lists(session, registry_noschema, deleted_digests) digests = worker_digests[registry_noschema] for digest in digests: if digest['digest'] in deleted_digests: # Manifest schema version 2 uses the same digest # for all tags self.log.info('digest already deleted %s', digest['digest']) return True url = self.make_url(digest['repository'], digest['digest']) manifest = self.make_manifest(registry_noschema, digest['repository'], digest['digest']) if self.request_delete(session, url, manifest): deleted_digests.add(digest['digest']) return True
def handle_worker_digests(self, session, worker_digests, deleted_digests): registry_noschema = registry_hostname(session.registry) if registry_noschema not in worker_digests: return False # Remove manifest list first to avoid broken lists in case an error occurs self.delete_manifest_lists(session, registry_noschema, deleted_digests) digests = worker_digests[registry_noschema] for digest in digests: if digest['digest'] in deleted_digests: # Manifest schema version 2 uses the same digest # for all tags self.log.info('digest already deleted %s', digest['digest']) return True url = self.make_url(digest['repository'], digest['digest']) manifest = self.make_manifest(registry_noschema, digest['repository'], digest['digest']) if self.request_delete(session, url, manifest): deleted_digests.add(digest['digest']) return True
def test_registry_session(tmpdir, registry, insecure, method, responses_method): temp_dir = mkdtemp(dir=str(tmpdir)) with open(os.path.join(temp_dir, '.dockercfg'), 'w+') as dockerconfig: dockerconfig.write(json.dumps({ registry_hostname(registry): { 'username': '******', 'password': '******' } })) session = RegistrySession(registry, insecure=insecure, dockercfg_path=temp_dir) path = '/v2/test/image/manifests/latest' if registry.startswith('http'): url = registry + path elif insecure: https_url = 'https://' + registry + path responses.add(responses_method, https_url, body=ConnectionError()) url = 'http://' + registry + path else: url = 'https://' + registry + path def request_callback(request, all_headers=True): assert request.headers.get('Authorization') is not None return (200, {}, 'A-OK') responses.add_callback(responses_method, url, request_callback) res = method(session, path) assert res.text == 'A-OK'
def test_registry_session(tmpdir, registry, insecure, method, responses_method): temp_dir = mkdtemp(dir=str(tmpdir)) with open(os.path.join(temp_dir, '.dockercfg'), 'w+') as dockerconfig: dockerconfig.write( json.dumps({ registry_hostname(registry): { 'username': '******', 'password': '******' } })) session = RegistrySession(registry, insecure=insecure, dockercfg_path=temp_dir) path = '/v2/test/image/manifests/latest' if registry.startswith('http'): url = registry + path elif insecure: https_url = 'https://' + registry + path responses.add(responses_method, https_url, body=ConnectionError()) url = 'http://' + registry + path else: url = 'https://' + registry + path def request_callback(request, all_headers=True): assert request.headers.get('Authorization') is not None return (200, {}, 'A-OK') responses.add_callback(responses_method, url, request_callback) res = method(session, path) assert res.text == 'A-OK'
def sort_annotations(self): """ Return a map of maps to look up a single "worker digest" that has information about where to find an image manifest for each registry/architecture combination: worker_digest = <result>[registry][architecture] """ all_annotations = self.workflow.build_result.annotations['worker-builds'] all_platforms = set(all_annotations) if len(all_platforms) == 0: raise RuntimeError("No worker builds found, cannot group them") sorted_digests = {} for plat, annotation in all_annotations.items(): for digest in annotation['digests']: hostname = registry_hostname(digest['registry']) media_type = get_manifest_media_type(digest['version']) if media_type not in self.manifest_media_types: continue platforms = sorted_digests.setdefault(hostname, {}) repos = platforms.setdefault(plat, []) repos.append(digest) sources = {} for registry in self.registries: registry_conf = self.registries[registry] if registry_conf.get('version') == 'v1': continue hostname = registry_hostname(registry) platforms = sorted_digests.get(hostname, {}) if set(platforms) != all_platforms: raise RuntimeError("Missing platforms for registry {}: found {}, expected {}" .format(registry, sorted(platforms), sorted(all_platforms))) selected_digests = {} for p, repos in platforms.items(): selected_digests[p] = sorted(repos, key=lambda d: d['repository'])[0] sources[registry] = selected_digests return sources
def run(self): deleted_digests = set() worker_digests = self.get_worker_digests() for registry, registry_conf in self.registries.items(): registry_noschema = registry_hostname(registry) push_conf_registry = self.find_registry(registry_noschema, self.workflow) try: insecure = registry_conf['insecure'] except KeyError: # 'insecure' didn't used to be set in the registry config passed to this # plugin - it would simply be inherited from the push_conf. To handle # orchestrated builds, we need to have it configured for this plugin, # but, if not set, check in the push_conf for compat. if push_conf_registry: insecure = push_conf_registry.insecure else: insecure = False secret_path = registry_conf.get('secret') session = RegistrySession(registry, insecure=insecure, dockercfg_path=secret_path) # orchestrator builds use worker_digests orchestrator_delete = self.handle_worker_digests( session, worker_digests, deleted_digests) if not push_conf_registry: # only warn if we're not running in the orchestrator if not orchestrator_delete: self.log.warning( "requested deleting image from %s but we haven't pushed there", registry_noschema) continue # worker node and manifests use push_conf_registry if self.handle_registry(session, push_conf_registry, deleted_digests): # delete these temp registries self.workflow.push_conf.remove_docker_registry( push_conf_registry) return deleted_digests
def __init__(self, registry): self.hostname = registry_hostname(registry) self.repos = {} self._add_pattern(responses.GET, r'/v2/(.*)/manifests/([^/]+)', self._get_manifest) self._add_pattern(responses.HEAD, r'/v2/(.*)/manifests/([^/]+)', self._get_manifest) self._add_pattern(responses.PUT, r'/v2/(.*)/manifests/([^/]+)', self._put_manifest) self._add_pattern(responses.GET, r'/v2/(.*)/blobs/([^/]+)', self._get_blob) self._add_pattern(responses.HEAD, r'/v2/(.*)/blobs/([^/]+)', self._get_blob) self._add_pattern(responses.POST, r'/v2/(.*)/blobs/uploads/\?mount=([^&]+)&from=(.+)', self._mount_blob)
def __init__(self, registry): self.hostname = registry_hostname(registry) self.repos = {} self._add_pattern(responses.GET, r'/v2/(.*)/manifests/([^/]+)', self._get_manifest) self._add_pattern(responses.HEAD, r'/v2/(.*)/manifests/([^/]+)', self._get_manifest) self._add_pattern(responses.PUT, r'/v2/(.*)/manifests/([^/]+)', self._put_manifest) self._add_pattern(responses.GET, r'/v2/(.*)/blobs/([^/]+)', self._get_blob) self._add_pattern(responses.HEAD, r'/v2/(.*)/blobs/([^/]+)', self._get_blob) self._add_pattern(responses.POST, r'/v2/(.*)/blobs/uploads/\?mount=([^&]+)&from=(.+)', self._mount_blob)
def run(self): deleted_digests = set() worker_digests = self.get_worker_digests() for registry, registry_conf in self.registries.items(): registry_noschema = registry_hostname(registry) push_conf_registry = self.find_registry(registry_noschema, self.workflow) try: insecure = registry_conf['insecure'] except KeyError: # 'insecure' didn't used to be set in the registry config passed to this # plugin - it would simply be inherited from the push_conf. To handle # orchestrated builds, we need to have it configured for this plugin, # but, if not set, check in the push_conf for compat. if push_conf_registry: insecure = push_conf_registry.insecure else: insecure = False secret_path = registry_conf.get('secret') session = RegistrySession(registry, insecure=insecure, dockercfg_path=secret_path) # orchestrator builds use worker_digests orchestrator_delete = self.handle_worker_digests(session, worker_digests, deleted_digests) if not push_conf_registry: # only warn if we're not running in the orchestrator if not orchestrator_delete: self.log.warning("requested deleting image from %s but we haven't pushed there", registry_noschema) continue # worker node and manifests use push_conf_registry if self.handle_registry(session, push_conf_registry, deleted_digests): # delete these temp registries self.workflow.push_conf.remove_docker_registry(push_conf_registry) return deleted_digests
def handle_registry(self, session, push_conf_registry, deleted_digests): registry_noschema = registry_hostname(session.registry) deleted = False for tag, digests in push_conf_registry.digests.items(): digest = digests.default if digest in deleted_digests: # Manifest schema version 2 uses the same digest # for all tags self.log.info('digest already deleted %s', digest) deleted = True continue repo = tag.split(':')[0] url = self.make_url(repo, digest) manifest = self.make_manifest(registry_noschema, repo, digest) if self.request_delete(session, url, manifest): deleted_digests.add(digest) deleted = True return deleted
def handle_registry(self, session, push_conf_registry, deleted_digests): registry_noschema = registry_hostname(session.registry) deleted = False for tag, digests in push_conf_registry.digests.items(): digest = digests.default if digest in deleted_digests: # Manifest schema version 2 uses the same digest # for all tags self.log.info('digest already deleted %s', digest) deleted = True continue repo = tag.split(':')[0] url = self.make_url(repo, digest) manifest = self.make_manifest(registry_noschema, repo, digest) if self.request_delete(session, url, manifest): deleted_digests.add(digest) deleted = True return deleted
def get_worker_digests(self): """ If we are being called from an orchestrator build, collect the worker node data and recreate the data locally. """ try: builds = self.workflow.build_result.annotations['worker-builds'] except(TypeError, KeyError): # This annotation is only set for the orchestrator build. # It's not present, so this is a worker build. return {} worker_digests = {} for plat, annotation in builds.items(): digests = annotation['digests'] self.log.debug("build %s has digests: %s", plat, digests) for digest in digests: reg = registry_hostname(digest['registry']) worker_digests.setdefault(reg, []) worker_digests[reg].append(digest) return worker_digests
def get_worker_digests(self): """ If we are being called from an orchestrator build, collect the worker node data and recreate the data locally. """ try: builds = self.workflow.build_result.annotations['worker-builds'] except (TypeError, KeyError): # This annotation is only set for the orchestrator build. # It's not present, so this is a worker build. return {} worker_digests = {} for plat, annotation in builds.items(): digests = annotation['digests'] self.log.debug("build %s has digests: %s", plat, digests) for digest in digests: reg = registry_hostname(digest['registry']) worker_digests.setdefault(reg, []) worker_digests[reg].append(digest) return worker_digests
def test_registry_hostname(registry, expected): assert registry_hostname(registry) == expected
def __init__(self, registry): self.hostname = registry_hostname(registry) self.repos = {} self._add_pattern(responses.PUT, r'/v2/(.*)/manifests/([^/]+)', self._put_manifest)
def test_registry_hostname(registry, expected): assert registry_hostname(registry) == expected