def test_dummy_source_dockerfile(): """Test of DummySource used for source container builds Test if fake Dockerfile was properly injected to meet expectations of inner and core codebase """ ds = DummySource(None, None) assert ds.get() assert os.path.exists(os.path.join(ds.get(), 'Dockerfile'))
def test_parse_dockerfile_again_after_data_is_loaded(context_dir, build_dir, tmpdir): context_dir = ContextDir(Path(tmpdir.join("context_dir"))) wf_data = ImageBuildWorkflowData.load_from_dir(context_dir) # Note that argument source is None, that causes a DummySource is created # and "FROM scratch" is included in the Dockerfile. workflow = DockerBuildWorkflow(context_dir, build_dir, NAMESPACE, PIPELINE_RUN_NAME, wf_data) assert ["scratch"] == workflow.data.dockerfile_images.original_parents # Now, save the workflow data and load it again wf_data.save(context_dir) another_source = DummySource("git", "https://git.host/") dfp = DockerfileParser(another_source.source_path) dfp.content = 'FROM fedora:35\nCMD ["bash", "--version"]' wf_data = ImageBuildWorkflowData.load_from_dir(context_dir) flexmock(DockerBuildWorkflow).should_receive( "_parse_dockerfile_images").never() flexmock(wf_data.dockerfile_images).should_receive( "set_source_registry").never() workflow = DockerBuildWorkflow(context_dir, build_dir, NAMESPACE, PIPELINE_RUN_NAME, wf_data, source=another_source) assert ["scratch"] == workflow.data.dockerfile_images.original_parents, \ "The dockerfile_images should not be changed."
def test_no_base_image(context_dir, build_dir): source = DummySource("git", "https://git.host/") dfp = DockerfileParser(source.source_path) dfp.content = "# no FROM\nADD spam /eggs" with pytest.raises(RuntimeError, match="no base image specified"): DockerBuildWorkflow(context_dir, build_dir, namespace=NAMESPACE, pipeline_run_name=PIPELINE_RUN_NAME, source=source)
def __init__(self, image, source=None, prebuild_plugins=None, prepublish_plugins=None, postbuild_plugins=None, exit_plugins=None, plugin_files=None, openshift_build_selflink=None, client_version=None, buildstep_plugins=None, **kwargs): """ :param source: dict, where/how to get source code to put in image :param image: str, tag for built image ([registry/]image_name[:tag]) :param prebuild_plugins: list of dicts, arguments for pre-build plugins :param prepublish_plugins: list of dicts, arguments for test-build plugins :param postbuild_plugins: list of dicts, arguments for post-build plugins :param exit_plugins: list of dicts, arguments for exit plugins :param plugin_files: list of str, load plugins also from these files :param openshift_build_selflink: str, link to openshift build (if we're actually running on openshift) without the actual hostname/IP address :param client_version: str, osbs-client version used to render build json :param buildstep_plugins: list of dicts, arguments for build-step plugins """ tmp_dir = tempfile.mkdtemp() if source is None: self.source = DummySource(None, None, tmpdir=tmp_dir) else: self.source = get_source_instance_for(source, tmpdir=tmp_dir) self.image = image self.prebuild_plugins_conf = prebuild_plugins self.buildstep_plugins_conf = buildstep_plugins self.prepublish_plugins_conf = prepublish_plugins self.postbuild_plugins_conf = postbuild_plugins self.exit_plugins_conf = exit_plugins self.prebuild_results = {} self.buildstep_result = {} self.postbuild_results = {} self.prepub_results = {} self.exit_results = {} self.build_result = BuildResult(fail_reason="not built") self.plugin_workspace = {} self.plugins_timestamps = {} self.plugins_durations = {} self.plugins_errors = {} self.autorebuild_canceled = False self.build_canceled = False self.plugin_failed = False self.plugin_files = plugin_files self.fs_watcher = FSWatcher() self.kwargs = kwargs self.builder = None self.built_image_inspect = None self.layer_sizes = [] self.default_image_build_method = CONTAINER_DEFAULT_BUILD_METHOD self.storage_transport = DOCKER_STORAGE_TRANSPORT_NAME # list of images pulled during the build, to be deleted after the build self.pulled_base_images = set() # When an image is exported into tarball, it can then be processed by various plugins. # Each plugin that transforms the image should save it as a new file and append it to # the end of exported_image_sequence. Other plugins should then operate with last # member of this structure. Example: # [{'path': '/tmp/foo.tar', 'size': 12345678, 'md5sum': '<md5>', 'sha256sum': '<sha256>'}] # You can use util.get_exported_image_metadata to create a dict to append to this list. self.exported_image_sequence = [] self.tag_conf = TagConf() self.push_conf = PushConf() # mapping of downloaded files; DON'T PUT ANYTHING BIG HERE! # "path/to/file" -> "content" self.files = {} self.openshift_build_selflink = openshift_build_selflink # List of RPMs that go into the final result, as per utils.rpm.parse_rpm_output self.image_components = None # List of all yum repos. The provided repourls might be changed (by resolve_composes) when # inheritance is enabled. This property holds the updated list of repos, allowing # post-build plugins (such as koji_import) to record them. self.all_yum_repourls = None # info about pre-declared build, build-id and token self.reserved_build_id = None self.reserved_token = None self.triggered_after_koji_task = None self.koji_source_nvr = {} self.koji_source_source_url = None self.koji_source_manifest = None # Plugins can store info here using the @annotation, @annotation_map, # @label and @label_map decorators from atomic_reactor.metadata self.annotations = {} self.labels = {} if client_version: logger.debug("build json was built by osbs-client %s", client_version) if kwargs: logger.warning("unprocessed keyword arguments: %s", kwargs)
class DockerBuildWorkflow(object): """ This class defines a workflow for building images: 1. pull image from registry 2. tag it properly if needed 3. obtain source 4. build image 5. tag it 6. push it to registries """ def __init__(self, image, source=None, prebuild_plugins=None, prepublish_plugins=None, postbuild_plugins=None, exit_plugins=None, plugin_files=None, openshift_build_selflink=None, client_version=None, buildstep_plugins=None, **kwargs): """ :param source: dict, where/how to get source code to put in image :param image: str, tag for built image ([registry/]image_name[:tag]) :param prebuild_plugins: list of dicts, arguments for pre-build plugins :param prepublish_plugins: list of dicts, arguments for test-build plugins :param postbuild_plugins: list of dicts, arguments for post-build plugins :param exit_plugins: list of dicts, arguments for exit plugins :param plugin_files: list of str, load plugins also from these files :param openshift_build_selflink: str, link to openshift build (if we're actually running on openshift) without the actual hostname/IP address :param client_version: str, osbs-client version used to render build json :param buildstep_plugins: list of dicts, arguments for build-step plugins """ tmp_dir = tempfile.mkdtemp() if source is None: self.source = DummySource(None, None, tmpdir=tmp_dir) else: self.source = get_source_instance_for(source, tmpdir=tmp_dir) self.image = image self.prebuild_plugins_conf = prebuild_plugins self.buildstep_plugins_conf = buildstep_plugins self.prepublish_plugins_conf = prepublish_plugins self.postbuild_plugins_conf = postbuild_plugins self.exit_plugins_conf = exit_plugins self.prebuild_results = {} self.buildstep_result = {} self.postbuild_results = {} self.prepub_results = {} self.exit_results = {} self.build_result = BuildResult(fail_reason="not built") self.plugin_workspace = {} self.plugins_timestamps = {} self.plugins_durations = {} self.plugins_errors = {} self.autorebuild_canceled = False self.build_canceled = False self.plugin_failed = False self.plugin_files = plugin_files self.fs_watcher = FSWatcher() self.kwargs = kwargs self.builder = None self.built_image_inspect = None self.layer_sizes = [] self.default_image_build_method = CONTAINER_DEFAULT_BUILD_METHOD self.storage_transport = DOCKER_STORAGE_TRANSPORT_NAME # list of images pulled during the build, to be deleted after the build self.pulled_base_images = set() # When an image is exported into tarball, it can then be processed by various plugins. # Each plugin that transforms the image should save it as a new file and append it to # the end of exported_image_sequence. Other plugins should then operate with last # member of this structure. Example: # [{'path': '/tmp/foo.tar', 'size': 12345678, 'md5sum': '<md5>', 'sha256sum': '<sha256>'}] # You can use util.get_exported_image_metadata to create a dict to append to this list. self.exported_image_sequence = [] self.tag_conf = TagConf() self.push_conf = PushConf() # mapping of downloaded files; DON'T PUT ANYTHING BIG HERE! # "path/to/file" -> "content" self.files = {} self.openshift_build_selflink = openshift_build_selflink # List of RPMs that go into the final result, as per utils.rpm.parse_rpm_output self.image_components = None # List of all yum repos. The provided repourls might be changed (by resolve_composes) when # inheritance is enabled. This property holds the updated list of repos, allowing # post-build plugins (such as koji_import) to record them. self.all_yum_repourls = None # info about pre-declared build, build-id and token self.reserved_build_id = None self.reserved_token = None self.triggered_after_koji_task = None self.koji_source_nvr = {} self.koji_source_source_url = None self.koji_source_manifest = None # Plugins can store info here using the @annotation, @annotation_map, # @label and @label_map decorators from atomic_reactor.metadata self.annotations = {} self.labels = {} if client_version: logger.debug("build json was built by osbs-client %s", client_version) if kwargs: logger.warning("unprocessed keyword arguments: %s", kwargs) def get_orchestrate_build_plugin(self): """ Get the orchestrate_build plugin configuration for this workflow if present (will be present for orchestrator, not for worker). :return: orchestrate_build plugin configuration dict :raises: ValueError if the orchestrate_build plugin is not present """ for plugin in self.buildstep_plugins_conf or []: if plugin['name'] == PLUGIN_BUILD_ORCHESTRATE_KEY: return plugin # Not an orchestrator build raise ValueError('Not an orchestrator build') def is_orchestrator_build(self): """ Check if the plugin configuration for this workflow is for an orchestrator build or a worker build. :return: True if orchestrator build, False if worker build """ try: self.get_orchestrate_build_plugin() return True except ValueError: return False @property def build_process_failed(self): """ Has any aspect of the build process failed? """ return self.build_result.is_failed() or self.plugin_failed def throw_canceled_build_exception(self, *args, **kwargs): self.build_canceled = True raise BuildCanceledException("Build was canceled") def build_docker_image(self): """ build docker image :return: BuildResult """ exception_being_handled = False self.builder = InsideBuilder(self.source, self.image) # Make sure exit_runner is defined for finally block exit_runner = None try: self.fs_watcher.start() signal.signal(signal.SIGTERM, self.throw_canceled_build_exception) prebuild_runner = PreBuildPluginsRunner( self.builder.tasker, self, self.prebuild_plugins_conf, plugin_files=self.plugin_files) prepublish_runner = PrePublishPluginsRunner( self.builder.tasker, self, self.prepublish_plugins_conf, plugin_files=self.plugin_files) postbuild_runner = PostBuildPluginsRunner( self.builder.tasker, self, self.postbuild_plugins_conf, plugin_files=self.plugin_files) # time to run pre-build plugins, so they can access cloned repo logger.info("running pre-build plugins") try: prebuild_runner.run() except PluginFailedException as ex: logger.error("one or more prebuild plugins failed: %s", ex) raise except AutoRebuildCanceledException as ex: logger.info(str(ex)) self.autorebuild_canceled = True raise # we are delaying initialization, because prebuild plugin reactor_config # might change build method buildstep_runner = BuildStepPluginsRunner( self.builder.tasker, self, self.buildstep_plugins_conf, plugin_files=self.plugin_files) logger.info("running buildstep plugins") try: self.build_result = buildstep_runner.run() if self.build_result.is_failed(): raise PluginFailedException(self.build_result.fail_reason) except PluginFailedException as ex: self.builder.is_built = False logger.error('buildstep plugin failed: %s', ex) raise self.builder.is_built = True if self.build_result.is_image_available(): self.builder.image_id = self.build_result.image_id # run prepublish plugins try: prepublish_runner.run() except PluginFailedException as ex: logger.error("one or more prepublish plugins failed: %s", ex) raise if self.build_result.is_image_available(): self.built_image_inspect = self.builder.inspect_built_image() history = self.builder.tasker.get_image_history( self.builder.image_id) diff_ids = self.built_image_inspect[INSPECT_ROOTFS][ INSPECT_ROOTFS_LAYERS] # diff_ids is ordered oldest first # history is ordered newest first # We want layer_sizes to be ordered oldest first self.layer_sizes = [{ "diff_id": diff_id, "size": layer['Size'] } for (diff_id, layer) in zip(diff_ids, reversed(history))] try: postbuild_runner.run() except PluginFailedException as ex: logger.error("one or more postbuild plugins failed: %s", ex) raise return self.build_result except Exception as ex: logger.debug("caught exception (%s) so running exit plugins", exception_message(ex)) exception_being_handled = True raise finally: # We need to make sure all exit plugins are executed signal.signal(signal.SIGTERM, lambda *args: None) exit_runner = ExitPluginsRunner(self.builder.tasker, self, self.exit_plugins_conf, keep_going=True, plugin_files=self.plugin_files) try: exit_runner.run(keep_going=True) except PluginFailedException as ex: logger.error("one or more exit plugins failed: %s", ex) # raise exception only in case that there is no previous exception being already # handled to prevent replacing original exceptions (root cause) with exceptions # from exit plugins if not exception_being_handled: raise ex finally: self.source.remove_tmpdir() self.fs_watcher.finish() signal.signal(signal.SIGTERM, signal.SIG_DFL)
def __init__( self, context_dir: ContextDir, build_dir: RootBuildDir, namespace: str, pipeline_run_name: str, data: Optional[ImageBuildWorkflowData] = None, source: Source = None, plugins: PluginsDef = None, user_params: dict = None, reactor_config_path: str = REACTOR_CONFIG_FULL_PATH, plugin_files: List[str] = None, client_version: str = None, ): """ :param context_dir: the directory passed to task --context-dir argument. :type context_dir: ContextDir :param build_dir: a directory holding all the artifacts to build an image. :type build_dir: RootBuildDir :param data: :type data: ImageBuildWorkflowData :param source: where/how to get source code to put in image :param namespace: OpenShift namespace of the task :param pipeline_run_name: PipelineRun name to reference PipelineRun :param plugins: the plugins to be executed in this workflow :param user_params: user (and other) params that control various aspects of the build :param reactor_config_path: path to atomic-reactor configuration file :param plugin_files: load plugins also from these files :param client_version: osbs-client version used to render build json """ self.context_dir = context_dir self.build_dir = build_dir self.data = data or ImageBuildWorkflowData() self.namespace = namespace self.pipeline_run_name = pipeline_run_name self.source = source or DummySource(None, None) self.plugins = plugins or PluginsDef() self.user_params = user_params or self._default_user_params.copy() self.plugin_files = plugin_files self.fs_watcher = FSWatcher() self.storage_transport = DOCKER_STORAGE_TRANSPORT_NAME if client_version: logger.debug("build json was built by osbs-client %s", client_version) # get info about base image from dockerfile build_file_path, build_file_dir = self.source.get_build_file_path() self.df_dir = build_file_dir self._df_path: Optional[str] = None self.conf = Configuration(config_path=reactor_config_path) # If the Dockerfile will be entirely generated from the container.yaml # (in the Flatpak case, say), then a plugin needs to create the Dockerfile # and set the base image if build_file_path.endswith(DOCKERFILE_FILENAME): self.reset_dockerfile_images(build_file_path)
def mock_source(source_dir): source = DummySource("git", "https://git.host/app-operator", workdir=str(source_dir)) source.get() return source
def dummy_source(source_dir): return DummySource(None, None, workdir=source_dir)