def test_osbs_copy_artifacts_to_dist_git(mocker, tmpdir, artifact, src, target): os.makedirs(os.path.join(str(tmpdir), 'image')) copy_mock = mocker.patch('cekit.builders.osbs.shutil.copy') dist_git_class = mocker.patch('cekit.builders.osbs.DistGit') dist_git_class.return_value = DistGitMock() config.cfg['common'] = {'redhat': True, 'work_dir': str(tmpdir)} image_descriptor = { 'schema_version': 1, 'from': 'centos:latest', 'name': 'test/image', 'version': '1.0', 'labels': [{ 'name': 'foo', 'value': 'bar' }, { 'name': 'labela', 'value': 'a' }], 'osbs': { 'repository': { 'name': 'repo', 'branch': 'branch' } }, 'artifacts': [artifact] } image = Image(image_descriptor, os.path.dirname(os.path.abspath(str(tmpdir)))) # TODO Rewrite this # This is only to mark that the plain artifact was not available in koji # So we need to add it to lookaside cache. This does not hurt non-plain artifacts, so we # can add it for all artifacts image.get('artifacts')[0]['lookaside'] = True builder = create_osbs_build_object(mocker, 'osbs', {}) builder.target = str(tmpdir) builder.prepare(image) dist_git_class.assert_called_once_with( os.path.join(str(tmpdir), 'osbs', 'repo'), str(tmpdir), 'repo', 'branch') calls = [ mocker.call('Dockerfile', os.path.join(str(tmpdir), 'osbs/repo/Dockerfile')), mocker.call(os.path.join(str(tmpdir), src), os.path.join(str(tmpdir), target)) ] copy_mock.assert_has_calls(calls)
def test_module_processing_fail_when_no_modules_of_specified_name_can_be_found( ): image = Image( yaml.safe_load(""" from: foo name: test/foo version: 1.9 """), 'foo') module_a = Module( yaml.safe_load(""" name: org.test.module.a version: 1.0 """), 'path', 'artifact_path') module_registry = ModuleRegistry() module_registry.add_module(module_a) resulting_install_list = OrderedDict() to_install_list = [ Map({ 'name': 'org.test.module.a', 'version': '1.0' }), Map({'name': 'org.test.module.b'}) ] with pytest.raises(CekitError) as excinfo: image.process_install_list(image, to_install_list, resulting_install_list, module_registry) assert "There are no modules with 'org.test.module.b' name available" in str( excinfo.value)
def test_image_overrides_with_content_sets_file_none(mocker): with mocker.mock_module.patch( 'cekit.descriptor.packages.os.path.exists') as exists_mock: exists_mock.return_value = True with mocker.mock_module.patch( 'cekit.descriptor.packages.open', mocker.mock_open(read_data='{"arch": ["a", "b"]}')): image = Image( yaml.safe_load(""" from: foo name: test/foo version: 1.9 packages: install: - abc def content_sets_file: cs.yaml """), 'foo') assert image.packages.content_sets == {'arch': ['a', 'b']} assert 'content_sets_file' not in image.packages image.apply_image_overrides( [Overrides({'packages': { 'content_sets_file': None }}, "a/path")]) assert 'content_sets' not in image.packages assert 'content_sets_file' not in image.packages
def test_image_overrides_with_content_sets(): image = Image( yaml.safe_load(""" from: foo name: test/foo version: 1.9 packages: install: - abc - def content_sets: arch: - namea - nameb """), 'foo') assert image.packages.content_sets == {'arch': ['namea', 'nameb']} assert 'content_sets_file' not in image.packages image.apply_image_overrides([ Overrides({'packages': { 'content_sets': { 'arch': ['new-arch'] } }}, "a/path") ]) assert image.packages.content_sets == {'arch': ['new-arch']} assert 'content_sets_file' not in image.packages
def __init__(self, descriptor_path, target, builder, overrides, params): self._type = builder descriptor = tools.load_descriptor(descriptor_path) # if there is a local modules directory and no modules are defined # we will inject it for a backward compatibility local_mod_path = os.path.join( os.path.abspath(os.path.dirname(descriptor_path)), 'modules') if os.path.exists(local_mod_path) and 'modules' in descriptor: modules = descriptor.get('modules') if not modules.get('repositories'): modules['repositories'] = [{ 'path': local_mod_path, 'name': 'modules' }] self.image = Image(descriptor, os.path.dirname(os.path.abspath(descriptor_path))) self.target = target self._params = params self._fetch_repos = False if overrides: for override in overrides: self.image = self.override(override) logger.info("Initializing image descriptor...")
def test_image_overrides_packages_repositories_replace(): image = Image( yaml.safe_load(""" from: foo name: test/foo version: 1.9 packages: repositories: - name: scl rpm: centos-release-scl """), 'foo') assert len(image.packages.repositories) == 1 assert image.packages.repositories[0].rpm == 'centos-release-scl' assert image.packages.repositories[0].name == 'scl' image.apply_image_overrides([ Overrides( { 'packages': { 'repositories': [{ "id": "rhel7-extras-rpm", "name": "scl" }] } }, "a/path") ]) assert len(image.packages.repositories) == 1 assert image.packages.repositories[0].name == 'scl' assert image.packages.repositories[0].id == 'rhel7-extras-rpm' assert 'rpm' not in image.packages.repositories[0]
def test_module_processing_fail_when_module_not_found_for_specific_version(): image = Image( yaml.safe_load(""" from: foo name: test/foo version: 1.9 """), 'foo') module_a = Module( yaml.safe_load(""" name: org.test.module.a version: 1.0 """), 'path', 'artifact_path') module_registry = ModuleRegistry() module_registry.add_module(module_a) resulting_install_list = OrderedDict() to_install_list = [Map({'name': 'org.test.module.a', 'version': '1.1'})] with pytest.raises(CekitError) as excinfo: image.process_install_list(image, to_install_list, resulting_install_list, module_registry) assert "Module 'org.test.module.a' with version '1.1' could not be found, available versions: 1.0" in str( excinfo.value)
def init(self): """ Initializes the image object. """ LOGGER.debug("Removing old target directory") shutil.rmtree(self.target, ignore_errors=True) os.makedirs(os.path.join(self.target, 'image')) # Read the main image descriptor and create an Image object from it descriptor = tools.load_descriptor(self._descriptor_path) if isinstance(descriptor, list): LOGGER.info( "Descriptor contains multiple elements, assuming multi-stage image" ) LOGGER.info( "Found {} builder image(s) and one target image".format( len(descriptor[:-1]))) # Iterate over images defined in image descriptor and # create Image objects out of them for image_descriptor in descriptor[:-1]: self.builder_images.append( Image( image_descriptor, os.path.dirname(os.path.abspath( self._descriptor_path)))) descriptor = descriptor[-1] self.image = Image( descriptor, os.path.dirname(os.path.abspath(self._descriptor_path))) # Construct list of all images (builder images + main one) self.images = [self.image] + self.builder_images for image in self.images: # Apply overrides to all image definitions: # intermediate (builder) images and target image as well # It is required to build the module registry image.apply_image_overrides(self._overrides) # Load definitions of modules # We need to load it after we apply overrides so that any changes to modules # will be reflected there as well self.build_module_registry() for image in self.images: # Process included modules image.apply_module_overrides(self._module_registry) image.process_defaults() # Add build labels self.add_build_labels()
def test_module_processing_modules_with_multiple_versions(caplog): caplog.set_level(logging.DEBUG, logger="cekit") image = Image( yaml.safe_load(""" from: foo name: test/foo version: 1.9 """), 'foo') module_a = Module( yaml.safe_load(""" name: org.test.module.a version: 1.0 """), 'path', 'artifact_path') module_b = Module( yaml.safe_load(""" name: org.test.module.b version: 1.0 """), 'path', 'artifact_path') module_b_1 = Module( yaml.safe_load(""" name: org.test.module.b version: 1.1 """), 'path', 'artifact_path') module_registry = ModuleRegistry() module_registry.add_module(module_a) module_registry.add_module(module_b) module_registry.add_module(module_b_1) resulting_install_list = OrderedDict() to_install_list = [ Map({ 'name': 'org.test.module.a', 'version': '1.0' }), Map({'name': 'org.test.module.b'}) ] image.process_install_list(image, to_install_list, resulting_install_list, module_registry) assert resulting_install_list == OrderedDict([('org.test.module.a', { 'name': 'org.test.module.a', 'version': '1.0' }), ('org.test.module.b', { 'name': 'org.test.module.b' })]) assert "Module version not specified for 'org.test.module.b' module, using '1.1' default version" in caplog.text
def test_remove_none_key(): image = Image(yaml.safe_load(""" from: foo name: test/foo version: 1.9 envs: - name: foo value: ~ """), 'foo') image.remove_none_keys() assert 'envs' in image assert 'value' not in image['envs'][0]
def test_remove_none_key(): tools.cfg['common'] = {'work_dir': '/tmp'} image = Image( yaml.safe_load(""" from: foo name: test/foo version: 1.9 envs: - name: foo value: ~ """), 'foo') image.remove_none_keys() assert 'envs' in image assert 'value' not in image['envs'][0]
def init(self): """ Initializes the image object. """ LOGGER.debug("Removing old target directory") shutil.rmtree(self.target, ignore_errors=True) os.makedirs(os.path.join(self.target, 'image')) # Read the main image descriptor and create an Image object from it descriptor = tools.load_descriptor(self._descriptor_path) if isinstance(descriptor, list): LOGGER.info( "Descriptor contains multiple elements, assuming multi-stage image" ) LOGGER.info( "Found {} builder image(s) and one target image".format( len(descriptor[:-1]))) # Iterate over images defined in image descriptor and # create Image objects out of them for image_descriptor in descriptor[:-1]: self.builder_images.append( Image( image_descriptor, os.path.dirname(os.path.abspath( self._descriptor_path)))) descriptor = descriptor[-1] self.image = Image( descriptor, os.path.dirname(os.path.abspath(self._descriptor_path))) # apply overrides to the image definition self.image.apply_image_overrides(self._overrides) for builder in self.builder_images: builder.apply_image_overrides(self._overrides) # add build labels self.add_build_labels() # load the definitions of the modules self.build_module_registry() # process included modules self.image.apply_module_overrides(self._module_registry) self.image.process_defaults()
def test_overide_resource_remove_chksum(): image = Image( yaml.safe_load(""" from: foo name: test/foo version: 1.9 artifacts: - name: abs path: /tmp/abs md5: 'foo' sha1: 'foo' sha256: 'foo' """), 'foo') overrides = Overrides( yaml.safe_load(""" artifacts: - name: abs path: /tmp/over """), 'foo') overrides.merge(image) assert overrides['from'] == 'foo' assert overrides['artifacts'][0]['path'] == '/tmp/over' assert 'md5' not in overrides['artifacts'][0] assert 'sha1' not in overrides['artifacts'][0] assert 'sha256' not in overrides['artifacts'][0]
def test_buildah_builder_run_with_generator(mocker): params = Map({'tags': []}) check_call = mocker.patch.object(subprocess, 'check_call') builder = create_builder_object(mocker, 'buildah', params) builder.generator = DockerGenerator("", "", {}) builder.generator.image = Image(yaml.safe_load(""" name: foo version: 1.9 labels: - name: test value: val1 - name: label2 value: val2 envs: - name: env1 value: env1val """), 'foo') builder.run() check_call.assert_called_once_with(['/usr/bin/buildah', 'build-using-dockerfile', '--squash', '-t', 'foo:1.9', '-t', 'foo:latest', 'something/image'])
def test_merging_description_image(): desc1 = Image({'name': 'foo', 'version': 1}, None) desc2 = Module({'name': 'mod1', 'description': 'mod_desc'}, None, None) merged = _merge_descriptors(desc1, desc2) assert 'description' not in merged
def test_merging_description_override(): desc1 = Image({'name': 'foo', 'version': 1}, None) desc2 = Overrides({'name': 'mod1', 'description': 'mod_desc'}, None) merged = _merge_descriptors(desc2, desc1) assert 'description' in merged
def test_image_artifacts(caplog): image = Image( yaml.safe_load(""" from: foo name: test/foo version: 1.9 labels: - name: test value: val1 - name: label2 value: val2 artifacts: - url: https://archive.apache.org/dist/tomcat/tomcat-8/v8.5.24/bin/apache-tomcat-8.5.24.tar.gz md5: 080075877a66adf52b7f6d0013fa9730 - path: /foo/bar md5: 080075877a66adf52b7f6d0013fa9730 envs: - name: env1 value: env1val """), 'foo') assert image['name'] == 'test/foo' assert type(image['labels'][0]) == Label assert image['labels'][0]['name'] == 'test' assert "No value found for 'name' in '{\"md5\": \"080075877a66adf52b7f6d0013fa9730\", \"url\": \"https://archive.apache.org/dist/tomcat/tomcat-8/v8.5.24/bin/apache-tomcat-8.5.24.tar.gz\"}' artifact; using auto-generated value of 'apache-tomcat-8.5.24.tar.gz'" \ in caplog.text assert "No value found for 'name' in '{\"md5\": \"080075877a66adf52b7f6d0013fa9730\", \"path\": \"/foo/bar\"}' artifact; using auto-generated value of 'bar'" \ in caplog.text
def init(self): """ Initializes the image object. """ # Read the main image descriptor and create an Image object from it descriptor = tools.load_descriptor(self._descriptor_path) self.image = Image(descriptor, os.path.dirname(os.path.abspath(self._descriptor_path))) # apply overrides to the image definition self.image.apply_image_overrides(self._overrides) # add build labels self.add_build_labels() # load the definitions of the modules self.build_module_registry() # process included modules self.apply_module_overrides() self.image.process_defaults()
def test_module_processing_simple_modules_order_to_install(): image = Image( yaml.safe_load(""" from: foo name: test/foo version: 1.9 """), 'foo') module_a = Module( yaml.safe_load(""" name: org.test.module.a version: 1.0 """), 'path', 'artifact_path') module_b = Module( yaml.safe_load(""" name: org.test.module.b version: 1.0 """), 'path', 'artifact_path') module_registry = ModuleRegistry() module_registry.add_module(module_a) module_registry.add_module(module_b) resulting_install_list = OrderedDict() to_install_list = [ Map({ 'name': 'org.test.module.a', 'version': '1.0' }), Map({'name': 'org.test.module.b'}) ] image.process_install_list(image, to_install_list, resulting_install_list, module_registry) assert resulting_install_list == OrderedDict([('org.test.module.a', { 'name': 'org.test.module.a', 'version': '1.0' }), ('org.test.module.b', { 'name': 'org.test.module.b' })])
def test_image_descriptor_with_execute(caplog): with pytest.raises(CekitError) as excinfo: image = Image(yaml.safe_load(""" from: foo name: test/foo version: 1.9 execute: - script: build.sh """), 'foo') assert "Cannot validate schema: Image" in excinfo.value.message
def test_image_plain_artifacts(caplog): with pytest.raises(CekitError) as excinfo: Image(yaml.safe_load(""" from: foo name: test/foo version: 1.9 artifacts: - target: jolokia.jar md5: 080075877a66adf52b7f6d0013fa9730 """), 'foo') assert "Cannot validate schema: _PlainResource" in excinfo.value.message assert "Cannot find required key 'name'" in caplog.text
def test_large_labels_should_break_lines(tmpdir): image = Image(yaml.safe_load(""" from: foo name: test/foo version: 1.9 labels: - name: 'the.large.label' value: Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec pretium finibus lorem vitae pellentesque. Maecenas tincidunt amet. """), 'foo') with docker_generator(tmpdir) as generator: generator.image = image with cekit_config(redhat=True): generator.add_build_labels() assert image.labels[0].value == "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec pretium finibus lorem vitae pellentesque. Maecenas tincidunt amet\\\n.\\\n"
def init(self): """ Initializes the image object. """ LOGGER.debug("Removing old target directory") shutil.rmtree(self.target, ignore_errors=True) os.makedirs(os.path.join(self.target, 'image')) # Read the main image descriptor and create an Image object from it descriptor = tools.load_descriptor(self._descriptor_path) self.image = Image( descriptor, os.path.dirname(os.path.abspath(self._descriptor_path))) # apply overrides to the image definition self.image.apply_image_overrides(self._overrides) # add build labels self.add_build_labels() # load the definitions of the modules self.build_module_registry() # process included modules self.apply_module_overrides() self.image.process_defaults()
def test_image_no_name(): with pytest.raises(CekitError) as excinfo: Image( yaml.safe_load(""" version: 1.9 labels: - name: test value: val1 - name: label2 value: val2 envs: - name: env1 value: env1val """), 'foo') assert 'Cannot validate schema' in str(excinfo.value)
def test_image(): image = Image(yaml.safe_load(""" from: foo name: test/foo version: 1.9 labels: - name: test value: val1 - name: label2 value: val2 envs: - name: env1 value: env1val """), 'foo') assert image['name'] == 'test/foo' assert type(image['labels'][0]) == Label assert image['labels'][0]['name'] == 'test'
def test_image_modules_git_repo(caplog): image = Image(yaml.safe_load(""" from: foo name: test/foo version: 1.9 modules: repositories: - git: url: "https://github.com/company/foobar-project-modules" ref: "release-3.1.0" - name: "another-module" git: url: "https://github.com/company/another-git-module" ref: "release-1.1" install: - name: "org.company.project.feature" """), 'foo') assert image['name'] == 'test/foo' assert "No value found for 'name' in '{\"git\": {\"ref\": \"release-3.1.0\", \"url\": \"https://github.com/company/foobar-project-modules\"}}' artifact; using auto-generated value of 'foobar-project-modules'" \ in caplog.text
class Generator(object): """ This class process Image descriptor(self.image) and uses it to generate target directory by fetching all dependencies and artifacts Args: descriptor_path - path to an image descriptor target - path to target directory overrides - path to overrides file (can be None) """ ODCS_HIDDEN_REPOS_FLAG = 'include_unpublished_pulp_repos' def __init__(self, descriptor_path, target, overrides): self._descriptor_path = descriptor_path self._overrides = [] self.target = target self._fetch_repos = False self._module_registry = ModuleRegistry() self.image = None self.builder_images = [] self.images = [] if overrides: for override in overrides: LOGGER.debug("Loading override '{}'".format(override)) override_artifact_dir = os.path.dirname(os.path.abspath(override)) if not os.path.exists(override): override_artifact_dir = os.path.dirname(os.path.abspath(descriptor_path)) self._overrides.append(Overrides(tools.load_descriptor( override), override_artifact_dir)) LOGGER.info("Initializing image descriptor...") @staticmethod def dependencies(params=None): deps = {} deps['odcs-client'] = { 'package': 'python3-odcs-client', 'library': 'odcs', 'rhel': { 'package': 'python2-odcs-client' } } if CONFIG.get('common', 'redhat'): deps['brew'] = { 'package': 'brewkoji', 'executable': '/usr/bin/brew' } return deps def init(self): """ Initializes the image object. """ LOGGER.debug("Removing old target directory") shutil.rmtree(self.target, ignore_errors=True) os.makedirs(os.path.join(self.target, 'image')) # Read the main image descriptor and create an Image object from it descriptor = tools.load_descriptor(self._descriptor_path) if isinstance(descriptor, list): LOGGER.info("Descriptor contains multiple elements, assuming multi-stage image") LOGGER.info("Found {} builder image(s) and one target image".format( len(descriptor[:-1]))) # Iterate over images defined in image descriptor and # create Image objects out of them for image_descriptor in descriptor[:-1]: self.builder_images.append( Image(image_descriptor, os.path.dirname(os.path.abspath(self._descriptor_path)))) descriptor = descriptor[-1] self.image = Image(descriptor, os.path.dirname(os.path.abspath(self._descriptor_path))) # Construct list of all images (builder images + main one) self.images = [self.image] + self.builder_images for image in self.images: # Apply overrides to all image definitions: # intermediate (builder) images and target image as well # It is required to build the module registry image.apply_image_overrides(self._overrides) # Load definitions of modules # We need to load it after we apply overrides so that any changes to modules # will be reflected there as well self.build_module_registry() for image in self.images: # Process included modules image.apply_module_overrides(self._module_registry) image.process_defaults() # Add build labels self.add_build_labels() def generate(self, builder): # pylint: disable=unused-argument self.copy_modules() self.prepare_artifacts() self.prepare_repositories() self.image.remove_none_keys() self.image.write(os.path.join(self.target, 'image.yaml')) self.render_dockerfile() self.render_help() def add_redhat_overrides(self): self._overrides.append(self.get_redhat_overrides()) def add_build_labels(self): image_labels = self.image.labels # we will persist cekit version in a label here, so we know which version of cekit # was used to build the image image_labels.append(Label({'name': 'io.cekit.version', 'value': cekit_version})) for label in image_labels: if len(label.value) > 128: # breaks the line each time it reaches 128 characters label.value = "\\\n".join(re.findall("(?s).{,128}", label.value))[:] # If we define the label in the image descriptor # we should *not* override it with value from # the root's key if self.image.description and not self.image.label('description'): image_labels.append(Label({'name': 'description', 'value': self.image.description})) # Last - if there is no 'summary' label added to image descriptor # we should use the value of the 'description' key and create # a 'summary' label with it's content. If there is even that # key missing - we should not add anything. description = self.image.label('description') if not self.image.label('summary') and description: image_labels.append(Label({'name': 'summary', 'value': description['value']})) def _modules(self): """ Returns list of modules used in all builder images as well as the target image. """ modules = [] for image in self.images: if image.modules: modules += [image.modules] return modules def _module_repositories(self): """ Prepares list of all module repositories. This includes repositories defined in builder images as well as target image. """ repositories = [] for module in self._modules(): for repo in module.repositories: if repo in repositories: LOGGER.warning(( "Module repository '{0}' already added, please check your image configuration, " + "skipping module repository '{0}'").format(repo.name)) continue # If the repository already exists, skip it repositories.append(repo) return repositories def build_module_registry(self): base_dir = os.path.join(self.target, 'repo') if not os.path.exists(base_dir): os.makedirs(base_dir) for repo in self._module_repositories(): LOGGER.debug("Downloading module repository: '{}'".format(repo.name)) repo.copy(base_dir) self.load_repository(os.path.join(base_dir, repo.target)) def load_repository(self, repo_dir): for modules_dir, _, files in os.walk(repo_dir): if 'module.yaml' in files: module_descriptor_path = os.path.abspath(os.path.expanduser( os.path.normcase(os.path.join(modules_dir, 'module.yaml')))) module = Module(tools.load_descriptor(module_descriptor_path), modules_dir, os.path.dirname(module_descriptor_path)) LOGGER.debug("Adding module '{}', path: '{}'".format(module.name, module.path)) self._module_registry.add_module(module) def get_tags(self): return ["%s:%s" % (self.image['name'], self.image[ 'version']), "%s:latest" % self.image['name']] def copy_modules(self): """Prepare module to be used for Dockerfile generation. This means: 1. Place module to args.target/image/modules/ directory """ modules_to_install = [] for module in self._modules(): if module.install: modules_to_install += module.install target = os.path.join(self.target, 'image', 'modules') for module in modules_to_install: module = self._module_registry.get_module( module.name, module.version, suppress_warnings=True) LOGGER.debug("Copying module '{}' required by '{}'.".format( module.name, self.image.name)) dest = os.path.join(target, module.name) if not os.path.exists(dest): LOGGER.debug("Copying module '{}' to: '{}'".format(module.name, dest)) shutil.copytree(module.path, dest) # write out the module with any overrides module.write(os.path.join(dest, "module.yaml")) def get_redhat_overrides(self): class RedHatOverrides(Overrides): def __init__(self, generator): super(RedHatOverrides, self).__init__({}, None) self._generator = generator @property def envs(self): return [ Env({'name': 'JBOSS_IMAGE_NAME', 'value': '%s' % self._generator.image['name']}), Env({'name': 'JBOSS_IMAGE_VERSION', 'value': '%s' % self._generator.image['version']}) ] @property def labels(self): labels = [ Label({'name': 'name', 'value': '%s' % self._generator.image['name']}), Label({'name': 'version', 'value': '%s' % self._generator.image['version']}) ] return labels return RedHatOverrides(self) def render_dockerfile(self): """Renders Dockerfile to $target/image/Dockerfile""" LOGGER.info("Rendering Dockerfile...") template_file = os.path.join(os.path.dirname(__file__), '..', 'templates', 'template.jinja') loader = FileSystemLoader(os.path.dirname(template_file)) env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True) env.globals['helper'] = TemplateHelper(self._module_registry) env.globals['image'] = self.image env.globals['builders'] = self.builder_images template = env.get_template(os.path.basename(template_file)) dockerfile = os.path.join(self.target, 'image', 'Dockerfile') if not os.path.exists(os.path.dirname(dockerfile)): os.makedirs(os.path.dirname(dockerfile)) with open(dockerfile, 'wb') as f: f.write(template.render( self.image).encode('utf-8')) LOGGER.debug("Dockerfile rendered") def render_help(self): """ If requested, renders image help page based on the image descriptor. It is generated to the $target/image/help.md file and added later to the root of the image (/). """ if not self.image.get('help', {}).get('add', False): return LOGGER.info("Rendering help.md page...") # Set default help template help_template_path = os.path.join(os.path.dirname(__file__), '..', 'templates', 'help.jinja') # If custom template is requested, use it if self.image.get('help', {}).get('template', ""): help_template_path = self.image['help']['template'] # If the path provided is absolute, use it # If it's a relative path, make it relative to the image descriptor if not os.path.isabs(help_template_path): help_template_path = os.path.join(os.path.dirname( self._descriptor_path), help_template_path) help_dirname, help_basename = os.path.split(help_template_path) loader = FileSystemLoader(help_dirname) env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True) env.globals['helper'] = TemplateHelper(self._module_registry) env.globals['image'] = self.image help_template = env.get_template(help_basename) helpfile = os.path.join(self.target, 'image', 'help.md') with open(helpfile, 'wb') as f: f.write(help_template.render(self.image).encode('utf-8')) LOGGER.debug("help.md rendered") def prepare_repositories(self): """ Prepare repositories for build time injection. """ if 'packages' not in self.image: return if self.image.get('packages').get('content_sets'): LOGGER.warning( 'The image has ContentSets repositories specified, all other repositories are removed!') self.image['packages']['repositories'] = [] repos = self.image.get('packages').get('repositories', []) injected_repos = [] for repo in repos: if self._handle_repository(repo): injected_repos.append(repo) if self.image.get('packages').get('content_sets'): url = self._prepare_content_sets(self.image.get('packages').get('content_sets')) if url: repo = Repository({'name': 'content_sets_odcs', 'url': {'repository': url}}) injected_repos.append(repo) self._fetch_repos = True if self._fetch_repos: for repo in injected_repos: repo.fetch(os.path.join(self.target, 'image', 'repos')) self.image['packages']['repositories_injected'] = injected_repos else: self.image['packages']['set_url'] = injected_repos def _prepare_content_sets(self, content_sets): if not content_sets: return False arch = platform.machine() if arch not in content_sets: raise CekitError("There are no content_sets defined for platform '{}'!".format(arch)) repos = ' '.join(content_sets[arch]) odcs_service_type = "Fedora" odcs_url = "https://odcs.fedoraproject.org" if CONFIG.get('common', 'redhat'): odcs_service_type = "Red Hat" odcs_url = "https://odcs.engineering.redhat.com" LOGGER.info("Using {} ODCS service to create composes".format(odcs_service_type)) flags = [] compose = self.image.get('osbs', {}).get( 'configuration', {}).get('container', {}).get('compose', {}) if compose.get(Generator.ODCS_HIDDEN_REPOS_FLAG, False): flags.append(Generator.ODCS_HIDDEN_REPOS_FLAG) odcs = ODCS(odcs_url, auth_mech=AuthMech.Kerberos) LOGGER.debug( "Requesting ODCS pulp compose for '{}' repositories with '{}' flags...".format(repos, flags)) try: compose = odcs.new_compose(repos, 'pulp', flags=flags) except requests.exceptions.HTTPError as ex: if ex.response.status_code == 401: LOGGER.error(("You are not authorized to use {} ODCS service. " "Are you sure you have a valid Kerberos session?").format(odcs_service_type)) raise CekitError("Could not create ODCS compose", ex) compose_id = compose.get('id', None) if not compose_id: raise CekitError( "Invalid response from ODCS service: no compose id found: {}".format(compose)) LOGGER.debug("Waiting for compose {} to finish...".format(compose_id)) compose = odcs.wait_for_compose(compose_id, timeout=600) state = compose.get('state', None) if not state: raise CekitError( "Invalid response from ODCS service: no state found: {}".format(compose)) # State 2 is "done" if state != 2: raise CekitError("Cannot create ODCS compose: '{}'".format(compose)) LOGGER.debug("Compose finished successfully") repofile = compose.get('result_repofile', None) if not repofile: raise CekitError( "Invalid response from ODCS service: no state_repofile key found: {}".format(compose)) return repofile def _handle_repository(self, repo): """Process and prepares all v2 repositories. Args: repo a repository to process Returns True if repository file is prepared and should be injected""" LOGGER.debug( "Loading configuration for repository: '{}' from 'repositories'.".format(repo['name'])) if 'id' in repo: LOGGER.warning("Repository '{}' is defined as plain. It must be available " "inside the image as Cekit will not inject it.".format(repo['name'])) return False if 'content_sets' in repo: self._fetch_repos = True return self._prepare_content_sets(repo) elif 'rpm' in repo: self._prepare_repository_rpm(repo) return False elif 'url' in repo: return True return False def _prepare_repository_rpm(self, repo): raise NotImplementedError("RPM repository injection was not implemented!") def prepare_artifacts(self): raise NotImplementedError("Artifacts handling is not implemented")
class Generator(object): """ This class process Image descriptor(self.image) and uses it to generate target directory by fetching all dependencies and artifacts Args: descriptor_path - path to an image descriptor target - path to target directory overrides - path to overrides file (can be None) """ ODCS_HIDDEN_REPOS_FLAG = 'include_unpublished_pulp_repos' def __init__(self, descriptor_path, target, overrides): self._descriptor_path = descriptor_path self._overrides = [] self.target = target self._fetch_repos = False self._module_registry = ModuleRegistry() self.image = None if overrides: for override in overrides: # TODO: If the overrides is provided as text, why do we try to get path to it? LOGGER.debug("Loading override '%s'" % (override)) self._overrides.append(Overrides(tools.load_descriptor( override), os.path.dirname(os.path.abspath(override)))) LOGGER.info("Initializing image descriptor...") def init(self): """ Initializes the image object. """ # Read the main image descriptor and create an Image object from it descriptor = tools.load_descriptor(self._descriptor_path) self.image = Image(descriptor, os.path.dirname(os.path.abspath(self._descriptor_path))) # apply overrides to the image definition self.image.apply_image_overrides(self._overrides) # add build labels self.add_build_labels() # load the definitions of the modules self.build_module_registry() # process included modules self.apply_module_overrides() self.image.process_defaults() def generate(self, builder): self.copy_modules() self.prepare_repositories(builder) self.image.remove_none_keys() self.image.write(os.path.join(self.target, 'image.yaml')) self.prepare_artifacts() self.render_dockerfile() self.render_help() def add_tech_preview_overrides(self): self._overrides.append(self.get_tech_preview_overrides()) def add_redhat_overrides(self): self._overrides.append(self.get_redhat_overrides()) def add_build_labels(self): image_labels = self.image.labels # we will persist cekit version in a label here, so we know which version of cekit # was used to build the image image_labels.append(Label({'name': 'io.cekit.version', 'value': cekit_version})) # If we define the label in the image descriptor # we should *not* override it with value from # the root's key if self.image.description and not self.image.label('description'): image_labels.append(Label({'name': 'description', 'value': self.image.description})) # Last - if there is no 'summary' label added to image descriptor # we should use the value of the 'description' key and create # a 'summary' label with it's content. If there is even that # key missing - we should not add anything. description = self.image.label('description') if not self.image.label('summary') and description: image_labels.append(Label({'name': 'summary', 'value': description['value']})) def apply_module_overrides(self): self.image.apply_module_overrides(self._module_registry) def build_module_registry(self): base_dir = os.path.join(self.target, 'repo') if not os.path.exists(base_dir): os.makedirs(base_dir) for repo in self.image.modules.repositories: LOGGER.debug("Downloading module repository: '%s'" % (repo.name)) repo.copy(base_dir) self.load_repository(os.path.join(base_dir, repo.target_file_name())) def load_repository(self, repo_dir): for modules_dir, _, files in os.walk(repo_dir): if 'module.yaml' in files: module_descriptor_path = os.path.abspath(os.path.expanduser( os.path.normcase(os.path.join(modules_dir, 'module.yaml')))) module = Module(tools.load_descriptor(module_descriptor_path), modules_dir, os.path.dirname(module_descriptor_path)) LOGGER.debug("Adding module '%s', path: '%s'" % (module.name, module.path)) self._module_registry.add_module(module) def get_tags(self): return ["%s:%s" % (self.image['name'], self.image[ 'version']), "%s:latest" % self.image['name']] def copy_modules(self): """Prepare module to be used for Dockerfile generation. This means: 1. Place module to args.target/image/modules/ directory """ target = os.path.join(self.target, 'image', 'modules') for module in self.image.modules.install: module = self._module_registry.get_module(module.name, module.version) LOGGER.debug("Copying module '%s' required by '%s'." % (module.name, self.image.name)) dest = os.path.join(target, module.name) if not os.path.exists(dest): LOGGER.debug("Copying module '%s' to: '%s'" % (module.name, dest)) shutil.copytree(module.path, dest) # write out the module with any overrides module.write(os.path.join(dest, "module.yaml")) def get_tech_preview_overrides(self): class TechPreviewOverrides(Overrides): def __init__(self, generator): super(TechPreviewOverrides, self).__init__({}, None) self._generator = generator @property def name(self): new_name = self._generator.image.name if '/' in new_name: family, new_name = new_name.split('/') new_name = "%s-tech-preview/%s" % (family, new_name) else: new_name = "%s-tech-preview" % new_name return new_name return TechPreviewOverrides(self) def get_redhat_overrides(self): class RedHatOverrides(Overrides): def __init__(self, generator): super(RedHatOverrides, self).__init__({}, None) self._generator = generator @property def envs(self): return [ Env({'name': 'JBOSS_IMAGE_NAME', 'value': '%s' % self._generator.image['name']}), Env({'name': 'JBOSS_IMAGE_VERSION', 'value': '%s' % self._generator.image['version']}) ] @property def labels(self): labels = [ Label({'name': 'name', 'value': '%s' % self._generator.image['name']}), Label({'name': 'version', 'value': '%s' % self._generator.image['version']}) ] return labels return RedHatOverrides(self) def render_dockerfile(self): """Renders Dockerfile to $target/image/Dockerfile""" LOGGER.info("Rendering Dockerfile...") template_file = os.path.join(os.path.dirname(__file__), '..', 'templates', 'template.jinja') loader = FileSystemLoader(os.path.dirname(template_file)) env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True) env.globals['helper'] = TemplateHelper(self._module_registry) env.globals['image'] = self.image template = env.get_template(os.path.basename(template_file)) dockerfile = os.path.join(self.target, 'image', 'Dockerfile') if not os.path.exists(os.path.dirname(dockerfile)): os.makedirs(os.path.dirname(dockerfile)) with open(dockerfile, 'wb') as f: f.write(template.render( self.image).encode('utf-8')) LOGGER.debug("Dockerfile rendered") def render_help(self): """ If requested, renders image help page based on the image descriptor. It is generated to the $target/image/help.md file and added later to the root of the image (/). """ if not self.image.get('help', {}).get('add', False): return LOGGER.info("Rendering help.md page...") # Set default help template help_template_path = os.path.join(os.path.dirname(__file__), '..', 'templates', 'help.jinja') # If custom template is requested, use it if self.image.get('help', {}).get('template', ""): help_template_path = self.image['help']['template'] # If the path provided is absolute, use it # If it's a relative path, make it relative to the image descriptor if not os.path.isabs(help_template_path): help_template_path = os.path.join(os.path.dirname( self._descriptor_path), help_template_path) help_dirname, help_basename = os.path.split(help_template_path) loader = FileSystemLoader(help_dirname) env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True) env.globals['helper'] = TemplateHelper(self._module_registry) help_template = env.get_template(help_basename) helpfile = os.path.join(self.target, 'image', 'help.md') with open(helpfile, 'wb') as f: f.write(help_template.render(self.image).encode('utf-8')) LOGGER.debug("help.md rendered") def prepare_repositories(self, builder): """ Prepare repositories for build time injection. """ if 'packages' not in self.image: return if self.image.get('packages').get('content_sets'): LOGGER.warning( 'The image has ContentSets repositories specified, all other repositories are removed!') self.image['packages']['repositories'] = [] repos = self.image.get('packages').get('repositories', []) injected_repos = [] for repo in repos: if self._handle_repository(repo): injected_repos.append(repo) if self.image.get('packages').get('content_sets'): url = self._prepare_content_sets(self.image.get('packages').get('content_sets')) if url: repo = Repository({'name': 'content_sets_odcs', 'url': {'repository': url}}) injected_repos.append(repo) self._fetch_repos = True if self._fetch_repos: for repo in injected_repos: repo.fetch(os.path.join(self.target, 'image', 'repos')) self.image['packages']['repositories_injected'] = injected_repos else: self.image['packages']['set_url'] = injected_repos def _prepare_content_sets(self, content_sets): if not content_sets: return False arch = platform.machine() if arch not in content_sets: raise CekitError("There are no content_sets defined for platform '{}'!".format(arch)) repos = ' '.join(content_sets[arch]) try: # ideally this will be API for ODCS, but there is no python3 package for ODCS cmd = ['/usr/bin/odcs'] odcs_service_type = "Fedora" if CONFIG.get('common', 'redhat'): odcs_service_type = "Red Hat" cmd.append('--redhat') LOGGER.info("Using {} ODCS service to created composes".format(odcs_service_type)) cmd.append('create') compose = self.image.get('osbs', {}).get( 'configuration', {}).get('container', {}).get('compose', {}) if compose.get(Generator.ODCS_HIDDEN_REPOS_FLAG, False): cmd.extend(['--flag', Generator.ODCS_HIDDEN_REPOS_FLAG]) cmd.extend(['pulp', repos]) LOGGER.debug("Creating ODCS content set via '%s'" % " ".join(cmd)) output = subprocess.check_output(cmd).decode() normalized_output = '\n'.join(output.replace(" u'", " '") .replace(' u"', ' "') .split('\n')[1:]) odcs_result = yaml.safe_load(normalized_output) if odcs_result['state'] != 2: raise CekitError("Cannot create content set: '%s'" % odcs_result['state_reason']) repo_url = odcs_result['result_repofile'] return repo_url except CekitError as ex: raise ex except OSError as ex: raise CekitError("ODCS is not installed, please install 'odcs-client' package") except subprocess.CalledProcessError as ex: raise CekitError("Cannot create content set: '%s'" % ex.output) except Exception as ex: raise CekitError('Cannot create content set!', ex) def _handle_repository(self, repo): """Process and prepares all v2 repositories. Args: repo a repository to process Returns True if repository file is prepared and should be injected""" LOGGER.debug( "Loading configuration for repository: '{}' from 'repositories'.".format(repo['name'])) if 'id' in repo: LOGGER.warning("Repository '%s' is defined as plain. It must be available " "inside the image as Cekit will not inject it." % repo['name']) return False if 'content_sets' in repo: self._fetch_repos = True return self._prepare_content_sets(repo) elif 'rpm' in repo: self._prepare_repository_rpm(repo) return False elif 'url' in repo: return True return False def _prepare_repository_rpm(self, repo): raise NotImplementedError("RPM repository injection was not implemented!") def prepare_artifacts(self): raise NotImplementedError("Artifacts handling is not implemented")
class Generator(object): """This class process Image descriptor(self.image) and uses it to generate target directory by fetching all dependencies and artifacts Args: descriptor_path - path to an image descriptor target - path to target directory builder - builder type overrides - path to overrides file (can be None) params - dictionary of builder specific parameterss """ def __new__(cls, descriptor_path, target, builder, overrides, params): if cls is Generator: if 'docker' == builder or 'buildah' == builder: from cekit.generator.docker import DockerGenerator as GeneratorImpl logger.info('Generating files for %s engine.' % builder) elif 'osbs' == builder: from cekit.generator.osbs import OSBSGenerator as GeneratorImpl logger.info('Generating files for OSBS engine.') else: raise CekitError("Unsupported generator type: '%s'" % builder) return super(Generator, cls).__new__(GeneratorImpl) def __init__(self, descriptor_path, target, builder, overrides, params): self._type = builder descriptor = tools.load_descriptor(descriptor_path) # if there is a local modules directory and no modules are defined # we will inject it for a backward compatibility local_mod_path = os.path.join( os.path.abspath(os.path.dirname(descriptor_path)), 'modules') if os.path.exists(local_mod_path) and 'modules' in descriptor: modules = descriptor.get('modules') if not modules.get('repositories'): modules['repositories'] = [{ 'path': local_mod_path, 'name': 'modules' }] self.image = Image(descriptor, os.path.dirname(os.path.abspath(descriptor_path))) self.target = target self._params = params self._fetch_repos = False if overrides: for override in overrides: self.image = self.override(override) logger.info("Initializing image descriptor...") def generate_tech_preview(self): """Appends '--tech-preview' to image name/family name""" name = self.image.get('name') if '/' in name: family, name = name.split('/') self.image['name'] = "%s-tech-preview/%s" % (family, name) else: self.image['name'] = "%s-tech-preview" % name def get_tags(self): return [ "%s:%s" % (self.image['name'], self.image['version']), "%s:latest" % self.image['name'] ] def prepare_modules(self, descriptor=None): """Prepare module to be used for Dockerfile generation. This means: 1. Place module to args.target/image/modules/ directory 2. Fetch its artifacts to target/image/sources directory 3. Merge modules descriptor with image descriptor Arguments: descriptor: Module descriptor used to dig required modules, if descriptor is not provided image descriptor is used. """ if not descriptor: descriptor = self.image modules = descriptor.get('modules', {}).get('install', [])[:] for module in reversed(modules): logger.debug("Preparing module '%s' requested by '%s'." % (module['name'], descriptor['name'])) version = module.get('version', None) req_module = copy_module_to_target( module['name'], version, os.path.join(self.target, 'image', 'modules')) self.prepare_modules(req_module) descriptor.merge(req_module) logger.debug("Merging '%s' module into '%s'." % (req_module['name'], descriptor['name'])) def prepare_artifacts(self): """Goes through artifacts section of image descriptor and fetches all of them """ if 'artifacts' not in self.image: logger.debug("No artifacts to fetch") return logger.info("Handling artifacts...") target_dir = os.path.join(self.target, 'image') for artifact in self.image['artifacts']: artifact.copy(target_dir) logger.debug("Artifacts handled") def override(self, overrides_path): logger.info("Using overrides file from '%s'." % overrides_path) descriptor = Overrides( tools.load_descriptor(overrides_path), os.path.dirname(os.path.abspath(overrides_path))) descriptor.merge(self.image) return descriptor def _generate_expose_services(self): """Generate the label io.openshift.expose-services based on the port definitions.""" ports = [] for p in self.image['ports']: if p.get('expose', True): r = "{}/{}".format(p['value'], p.get('protocol', 'tcp')) if 'service' in p: r += ":{}".format(p['service']) ports.append(r) else: # attempt to supply a service name by looking up the socket number try: service = socket.getservbyport( p['value'], p.get('protocol', 'tcp')) r += ":{}".format(service) ports.append(r) except OSError: # py3 pass except socket.error: # py2 pass return ",".join(ports) def _inject_redhat_defaults(self): envs = [{ 'name': 'JBOSS_IMAGE_NAME', 'value': '%s' % self.image['name'] }, { 'name': 'JBOSS_IMAGE_VERSION', 'value': '%s' % self.image['version'] }] labels = [{ 'name': 'name', 'value': '%s' % self.image['name'] }, { 'name': 'version', 'value': '%s' % self.image['version'] }] # do not override this label if it's already set if self.image.get('ports', []) and \ 'io.openshift.expose-services' not in [ k['name'] for k in self.image['labels'] ]: labels.append({ 'name': 'io.openshift.expose-services', 'value': self._generate_expose_services() }) redhat_override = {'envs': envs, 'labels': labels} descriptor = Overrides(redhat_override, None) descriptor.merge(self.image) self.image = descriptor def render_dockerfile(self): """Renders Dockerfile to $target/image/Dockerfile""" logger.info("Rendering Dockerfile...") if self._params.get('redhat'): self._inject_redhat_defaults() self.image.process_defaults() template_file = os.path.join(os.path.dirname(__file__), '..', 'templates', 'template.jinja') loader = FileSystemLoader(os.path.dirname(template_file)) env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True) env.globals['helper'] = TemplateHelper() env.globals['addhelp'] = self._params.get('addhelp') template = env.get_template(os.path.basename(template_file)) dockerfile = os.path.join(self.target, 'image', 'Dockerfile') if not os.path.exists(os.path.dirname(dockerfile)): os.makedirs(os.path.dirname(dockerfile)) with open(dockerfile, 'wb') as f: f.write(template.render(self.image).encode('utf-8')) logger.debug("Dockerfile rendered") if self.image.get('help', {}).get('template', ""): help_template_path = self.image['help']['template'] elif 'help_template' in self._params: help_template_path = self._params['help_template'] else: help_template_path = os.path.join(os.path.dirname(__file__), '..', 'templates', 'help.jinja') help_dirname, help_basename = os.path.split(help_template_path) loader = FileSystemLoader(help_dirname) env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True) env.globals['helper'] = TemplateHelper() help_template = env.get_template(help_basename) helpfile = os.path.join(self.target, 'image', 'help.md') with open(helpfile, 'wb') as f: f.write(help_template.render(self.image).encode('utf-8')) logger.debug("help.md rendered") def prepare_repositories(self): """ Prepare repositories for build time injection. """ if 'packages' not in self.image: return repos = self.image.get('packages').get('repositories', []) injected_repos = [] for repo in repos: if self._handle_repository(repo): injected_repos.append(repo) if self._fetch_repos: for repo in injected_repos: repo.fetch(os.path.join(self.target, 'image', 'repos')) self.image['packages']['repositories_injected'] = injected_repos else: self.image['packages']['set_url'] = injected_repos def _handle_repository(self, repo): """Process and prepares all v2 repositories. Args: repo a repository to process Returns True if repository file is prepared and should be injected""" logger.debug("Loading configuration for repository: '%s' from '%s'." % (repo['name'], 'repositories-%s' % self._type)) if 'id' in repo: logger.warning( "Repository '%s' is defined as plain. It must be available " "inside the image as Cekit will not inject it." % repo['name']) return False if 'odcs' in repo: self._fetch_repos = True return self._prepare_repository_odcs_pulp(repo) elif 'rpm' in repo: self._prepare_repository_rpm(repo) return False elif 'url' in repo: return True return False def _prepare_repository_odcs_pulp(self, repo, **kwargs): raise NotImplementedError( "ODCS pulp repository injection not implemented!") def _prepare_repository_rpm(self, repo): raise NotImplementedError( "RPM repository injection was not implemented!")
def test_image_missing_name(): with pytest.raises(CekitError): Image(yaml.safe_load(""" from: foo version: 1.9"""), 'foo')