def _get_manifest_list(self, image: ImageName) -> requests.Response: """try to figure out manifest list""" if image in self.manifest_list_cache: return self.manifest_list_cache[image] reg_client = self._get_registry_client(image.registry) manifest_list = reg_client.get_manifest_list(image) if '@sha256:' in str(image) and not manifest_list: # we want to adjust the tag only for manifest list fetching image = image.copy() try: config_blob = reg_client.get_config_from_registry( image, image.tag) except (HTTPError, RetryError, Timeout) as ex: self.log.warning('Unable to fetch config for %s, got error %s', image, ex.response.status_code) raise RuntimeError( 'Unable to fetch config for base image') from ex release = config_blob['config']['Labels']['release'] version = config_blob['config']['Labels']['version'] docker_tag = "%s-%s" % (version, release) image.tag = docker_tag manifest_list = reg_client.get_manifest_list(image) self.manifest_list_cache[image] = manifest_list return self.manifest_list_cache[image]
def _store_manifest_digest(self, image: ImageName, use_original_tag: bool) -> None: """Store media type and digest for manifest list or v2 schema 2 manifest digest""" image_str = image.to_str() manifest_list = self._get_manifest_list(image) reg_client = self._get_registry_client(image.registry) if manifest_list: digest_dict = get_checksums(BytesIO(manifest_list.content), ['sha256']) media_type = get_manifest_media_type('v2_list') else: digests_dict = reg_client.get_all_manifests(image, versions=('v2',)) media_type = get_manifest_media_type('v2') try: manifest_digest_response = digests_dict['v2'] except KeyError as exc: raise RuntimeError( 'Unable to fetch manifest list or ' 'v2 schema 2 digest for {} (Does image exist?)'.format(image_str) ) from exc digest_dict = get_checksums(BytesIO(manifest_digest_response.content), ['sha256']) manifest_digest = 'sha256:{}'.format(digest_dict['sha256sum']) parent_digests = {media_type: manifest_digest} if use_original_tag: # image tag may have been replaced with a ref for autorebuild; use original tag # to simplify fetching parent_images_digests data in other plugins image = image.copy() base_image_key: ImageName = self.workflow.data.dockerfile_images.base_image_key image.tag = base_image_key.tag image_str = image.to_str() self.workflow.data.parent_images_digests[image_str] = parent_digests
def test_privileged_gitrepo_build(caplog, source_params): if MOCK: mock_docker() image_name = ImageName(repo="atomic-reactor-test-ssh-image") remote_image = image_name.copy() remote_image.registry = LOCALHOST_REGISTRY m = PrivilegedBuildManager("buildroot-fedora", { "source": source_params, "image": remote_image.to_str(), "parent_registry": LOCALHOST_REGISTRY, # faster "target_registries_insecure": True, "parent_registry_insecure": True, }) results = m.build() dt = DockerTasker() dt.pull_image(remote_image, insecure=True) assert len(results.build_logs) > 0 dt.remove_container(results.container_id) dt.remove_image(remote_image)
class BuildManager(BuilderStateMachine): """ initiates build and waits for it to finish, then it collects data """ def __init__(self, build_image, build_args): BuilderStateMachine.__init__(self) self.build_image = build_image self.build_args = build_args self.image = build_args['image'] self.uri = build_args['source']['uri'] self.temp_dir = None self.build_container_id = None # build image after build self.buildroot_image_id = None self.buildroot_image_name = None self.dt = DockerTasker() def _build(self, build_method): """ build image from provided build_args :return: BuildResults """ logger.info("building image '%s'", self.image) self.ensure_not_built() self.temp_dir = tempfile.mkdtemp() temp_path = os.path.join(self.temp_dir, BUILD_JSON) try: with open(temp_path, 'w') as build_json: json.dump(self.build_args, build_json) self.build_container_id = build_method(self.build_image, self.temp_dir) try: logs_gen = self.dt.logs(self.build_container_id, stream=True) wait_for_command(logs_gen) return_code = self.dt.wait(self.build_container_id) except KeyboardInterrupt: logger.info("killing build container on user's request") self.dt.remove_container(self.build_container_id, force=True) results = BuildResults() results.return_code = 1 return results else: results = self._load_results(self.build_container_id) results.return_code = return_code return results finally: shutil.rmtree(self.temp_dir) def _load_results(self, container_id): """ load results from recent build :return: BuildResults """ if self.temp_dir: dt = DockerTasker() # results_path = os.path.join(self.temp_dir, RESULTS_JSON) # df_path = os.path.join(self.temp_dir, 'Dockerfile') # try: # with open(results_path, 'r') as results_fp: # results = json.load(results_fp, cls=BuildResultsJSONDecoder) # except (IOError, OSError) as ex: # logger.error("Can't open results: '%s'", ex) # for l in self.dt.logs(self.build_container_id, stream=False): # logger.debug(l.strip()) # raise RuntimeError("Can't open results: '%s'" % ex) # results.dockerfile = open(df_path, 'r').read() results = BuildResults() results.build_logs = dt.logs(container_id, stream=False) results.container_id = container_id return results def commit_buildroot(self): """ create image from buildroot :return: """ logger.info("committing buildroot") self.ensure_is_built() commit_message = "docker build of '%s' (%s)" % (self.image, self.uri) self.buildroot_image_name = ImageName( repo="buildroot-%s" % self.image, # save the time when image was built tag=datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')) self.buildroot_image_id = self.dt.commit_container(self.build_container_id, commit_message) return self.buildroot_image_id def push_buildroot(self, registry): logger.info("pushing buildroot to registry") self.ensure_is_built() image_name_with_registry = self.buildroot_image_name.copy() image_name_with_registry.registry = registry return self.dt.tag_and_push_image( self.buildroot_image_id, image_name_with_registry)