def validate(self): version.parse_to_semver(self.release_version) existing_dir(self.repo_dir) have_ctf = os.path.exists(self.ctf_path) have_cd = os.path.exists(self.component_descriptor_v2_path) if not have_ctf ^ have_cd: ci.util.fail( 'exactly one of component-descriptor, or ctf-archive must exist' ) elif have_cd: self.component_descriptor_v2 = cm.ComponentDescriptor.from_dict( ci.util.parse_yaml_file(self.component_descriptor_v2_path), ) elif have_ctf: component_descriptors = list( cnudie.util.component_descriptors_from_ctf_archive( self.ctf_path, )) if not component_descriptors: ci.util.fail( f'No component descriptor found in CTF archive at {self.ctf_path}' ) if len(component_descriptors) > 1: ci.util.fail( f'More than one component descriptor found in CTF archive at {self.ctf_path}' ) self.component_descriptor_v2 = component_descriptors[0]
def validate(self): existing_dir(self.repo_dir) version.parse_to_semver(self.release_version) if (self.release_commit_callback): existing_file(self.release_commit_callback) existing_file(self.repository_version_file_path)
def validate(self): semver.parse(self.release_version) existing_dir(self.repo_dir) # check whether a release with the given version exists try: self.github_helper.repository.release_from_tag(self.release_version) except NotFoundError: raise RuntimeError(f'No release with tag {self.release_version} found')
def validate(self): version.parse_to_semver(self.release_version) existing_dir(self.repo_dir) try: self.component_descriptor_v2 = cnudie.util.determine_main_component( repository_hostname=self.repository_hostname, repository_path=self.repository_path, component_descriptor_v2_path=self.component_descriptor_v2_path, ctf_path=self.ctf_path, ) except ValueError as err: ci.util.fail(str(err))
def validate(self): existing_dir(self.repo_dir) version.parse_to_semver(self.release_version) if self.next_version_callback: existing_file(self.next_version_callback) existing_file(self.repository_version_file_path) # perform version ops once to validate args _calculate_next_cycle_dev_version( release_version=self.release_version, version_operation=self.version_operation, prerelease_suffix=self.prerelease_suffix, )
def export_kubeconfig( kubernetes_config_name: str, output_file: str, ): '''Write the kubeconfig contained in a kubernetes config to a given path. ''' cfg_factory = ctx().cfg_factory() kubernetes_cfg = cfg_factory.kubernetes(kubernetes_config_name) destination_path = pathlib.Path(output_file).resolve() existing_dir(destination_path.parent) with destination_path.open(mode='w') as f: yaml.dump(kubernetes_cfg.kubeconfig(), f)
def from_cfg_dir(cfg_dir: str, cfg_types_file='config_types.yaml'): cfg_dir = existing_dir(os.path.abspath(cfg_dir)) cfg_types_dict = parse_yaml_file(os.path.join(cfg_dir, cfg_types_file)) raw = {} raw[ConfigFactory.CFG_TYPES] = cfg_types_dict def parse_cfg(cfg_type): # assume for now that there is exactly one cfg source (file) cfg_sources = list(cfg_type.sources()) if not len(cfg_sources) == 1: raise ValueError( 'currently, only exactly one cfg file is supported per type' ) cfg_file = cfg_sources[0].file() parsed_cfg = parse_yaml_file(os.path.join(cfg_dir, cfg_file)) return parsed_cfg # parse all configurations for cfg_type in map(ConfigType, cfg_types_dict.values()): cfg_name = cfg_type.cfg_type_name() raw[cfg_name] = parse_cfg(cfg_type) return ConfigFactory(raw_dict=raw)
def enumerate_local_repo(self, repo_dir: str): repo_dir = existing_dir(Path(repo_dir)) if not repo_dir.joinpath('.git').is_dir(): raise ValueError(f'not a git root directory: {self.repo_dir}') for path in self.CODEOWNERS_PATHS: codeowners_file = repo_dir.joinpath(path) if codeowners_file.is_file(): with open(codeowners_file) as f: yield from self._filter_codeowners_entries(f.readlines())
def determine_head_commit_recipients(src_dirs=(), ): '''returns a generator yielding e-mail adresses from the head commit's author and committer for all given repository work trees. ''' for src_dir in src_dirs: # commiter/author from head commit repo = git.Repo(existing_dir(src_dir)) head_commit = repo.commit(repo.head) yield head_commit.author.email.lower() yield head_commit.committer.email.lower()
def _cfg_factory_from_dir(): if Config.CONTEXT.value.config_dir() is None: return None from ci.util import existing_dir cfg_dir = existing_dir(Config.CONTEXT.value.config_dir()) from model import ConfigFactory factory = ConfigFactory.from_cfg_dir(cfg_dir=cfg_dir) return factory
def enumerate_codeowners_from_local_repo( repo_dir: str, paths: typing.Iterable[str] = ('CODEOWNERS', '.github/CODEOWNERS', 'docs/CODEOWNERS'), ) -> typing.Generator[str, None, None]: repo_dir = existing_dir(Path(repo_dir)) if not repo_dir.joinpath('.git').is_dir(): raise ValueError(f'not a git root directory: {repo_dir}') for path in paths: codeowners_file = repo_dir.joinpath(path) if codeowners_file.is_file(): with open(codeowners_file) as f: yield from filter_codeowners_entries(f.readlines())
def _from_cfg_dir( cfg_dir: str, disable_cfg_element_lookup: bool, cfg_types_file='config_types.yaml', cfg_src_types=None, lookup_cfg_factory=None, ): cfg_dir = existing_dir(os.path.abspath(cfg_dir)) cfg_types_dict = parse_yaml_file(os.path.join(cfg_dir, cfg_types_file)) raw = {} raw[ConfigFactory.CFG_TYPES] = cfg_types_dict def retrieve_cfg(cfg_type): cfg_dict = {} for cfg_src in cfg_type.sources(): if cfg_src_types and type(cfg_src) not in cfg_src_types: continue if isinstance(cfg_src, LocalFileCfgSrc): parsed_cfg = ConfigFactory._parse_local_file( cfg_dir=cfg_dir, cfg_src=cfg_src, ) elif isinstance(cfg_src, GithubRepoFileSrc): if disable_cfg_element_lookup: continue parsed_cfg = ConfigFactory._parse_repo_file( cfg_src=cfg_src, lookup_cfg_factory=lookup_cfg_factory, ) else: raise NotImplementedError(cfg_src) for k, v in parsed_cfg.items(): if k in cfg_dict and cfg_dict[k] != v: raise ValueError( f'conflicting definition for {k=} in src {cfg_src}' ) cfg_dict[k] = v return cfg_dict return ConfigFactory( raw_dict=raw, retrieve_cfg=retrieve_cfg, )
def deploy_or_upgrade_whitesource_api_extension( config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), chart_dir: str = False, deployment_name: str = False, whitesource_cfg_name: str = None, ): cfg_factory = ctx().cfg_factory() cfg_set = cfg_factory.cfg_set(config_set_name) kwargs = {} if deployment_name is not False: kwargs['deployment_name'] = deployment_name if chart_dir is not False: kwargs['chart_dir'] = existing_dir(chart_dir) whitesource_cfg = cfg_set.whitesource(cfg_name=whitesource_cfg_name) wss.deploy_whitesource_api_extension( kubernetes_cfg=cfg_set.kubernetes(), whitesource_cfg=whitesource_cfg, **kwargs, )
def update_submodule( repo_path: str, tree_ish: str, submodule_path: str, commit_hash: str, author: str, email: str, ): '''Update the submodule of a git-repository to a specific commit. Create a new commit, with the passed tree-ish as parent, in the given repository. Note that this implementation only supports toplevel submodules. To be removed in a future version. Parameters ------ repo_path : str Path to a directory containing an intialised git-repo with a submodule to update. tree_ish : str Valid tree-ish to use as base for creating the new commit. Used as parent for the commit to be created Example: 'master' for the head of the master-branch. submodule_path : str Path (relative to the repository root) to the submodule. Must be immediately below the root of the repository. commit_hash : str The hash the submodule should point to in the created commit. This should be a valid commit- hash in the submodule's repository. author : str, Will be set as author of the created commit email : str Will be set for the author of the created commit Returns ------ str The hexadecimal SHA-1 hash of the created commit ''' repo_path = existing_dir(os.path.abspath(repo_path)) not_empty(submodule_path) if '/' in submodule_path: fail('This implementation only supports toplevel submodules: {s}'.format(s=submodule_path)) not_empty(tree_ish) not_empty(commit_hash) not_empty(author) not_empty(email) repo = git.Repo(repo_path) _ensure_submodule_exists(repo, submodule_path) # Create mk-tree-parseable string-representation from given tree-ish. tree = repo.tree(tree_ish) tree_representation = _serialise_and_update_submodule(tree, submodule_path, commit_hash) # Pass the patched tree to git mk-tree using GitPython. We cannot do this in GitPython # directly as it does not support arbitrary tree manipulation. # We must keep a reference to auto_interrupt as it closes all streams to the subprocess # on finalisation auto_interrupt = repo.git.mktree(istream=subprocess.PIPE, as_process=True) process = auto_interrupt.proc stdout, _ = process.communicate(input=tree_representation.encode()) # returned string is byte-encoded and newline-terminated new_sha = stdout.decode('utf-8').strip() # Create a new commit in the repo's object database from the newly created tree. actor = git.Actor(author, email) parent_commit = repo.commit(tree_ish) commit = git.Commit.create_from_tree( repo=repo, tree=new_sha, parent_commits=[parent_commit], message='Upgrade submodule {s} to commit {c}'.format( s=submodule_path, c=commit_hash, ), author=actor, committer=actor, ) return commit.hexsha
def deploy_or_upgrade_landscape( config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), components: CliHint( type=LandscapeComponent, typehint=[LandscapeComponent], choices=[component for component in LandscapeComponent], help="list of components to deploy. By default, ALL components will be deployed." )=None, webhook_dispatcher_chart_dir: CliHint( typehint=str, help="directory of webhook dispatcher chart", )=None, gardenlinux_cache_chart_dir: CliHint( typehint=str, help="directory of gardenlinux-cache chart", )=None, concourse_deployment_name: CliHint( typehint=str, help="namespace and deployment name for Concourse" )='concourse', whitesource_backend_chart_dir: CliHint( typehint=str, help="directory of Whitesource Backend chart", )=None, whitesource_backend_deployment_name: CliHint( typehint=str, help="namespace and deployment name for Whitesource" )='whitesource-backend', whitesource_cfg_name: CliHint( typehint=str, help='Whitesource Config', )='gardener', timeout_seconds: CliHint(typehint=int, help="how long to wait for concourse startup")=180, webhook_dispatcher_deployment_name: str='webhook-dispatcher', gardenlinux_cache_deployment_name: str='gardenlinux-cache', dry_run: bool=True, ): '''Deploys the given components of the Concourse landscape. ''' # handle default (all known components) if not components: components = [component for component in LandscapeComponent] # Validate if LandscapeComponent.WHD in components: if not webhook_dispatcher_chart_dir: raise ValueError( f"--webhook-dispatcher-chart-dir must be given if component " f"'{LandscapeComponent.WHD.value}' is to be deployed." ) else: webhook_dispatcher_chart_dir = existing_dir(webhook_dispatcher_chart_dir) if LandscapeComponent.GARDENLINUX_CACHE in components: if not gardenlinux_cache_chart_dir: raise ValueError( f"--gardenlinux-cache-chart-dir must be given if component " f"'{LandscapeComponent.GARDENLINUX_CACHE.value}' is to be deployed." ) else: gardenlinux_cache_chart_dir = existing_dir(gardenlinux_cache_chart_dir) _display_info( dry_run=dry_run, operation="DEPLOYED", deployment_name=concourse_deployment_name, components=components, ) if dry_run: return cfg_factory = ctx().cfg_factory() config_set = cfg_factory.cfg_set(config_set_name) concourse_cfg = config_set.concourse() # Set the global kubernetes cluster context to the cluster specified in the ConcourseConfig kubernetes_config_name = concourse_cfg.kubernetes_cluster_config() kubernetes_cfg = cfg_factory.kubernetes(kubernetes_config_name) kube_ctx.set_kubecfg(kubernetes_cfg.kubeconfig()) if LandscapeComponent.SECRETS_SERVER in components: info('Deploying Secrets Server') deploy_secrets_server( config_set_name=config_set_name, ) if LandscapeComponent.CONCOURSE in components: info('Deploying Concourse') deploy_or_upgrade_concourse( config_set_name=config_set_name, deployment_name=concourse_deployment_name, timeout_seconds=timeout_seconds, ) if LandscapeComponent.WHD in components: info('Deploying Webhook Dispatcher') deploy_or_upgrade_webhook_dispatcher( config_set_name=config_set_name, chart_dir=webhook_dispatcher_chart_dir, deployment_name=webhook_dispatcher_deployment_name, ) if LandscapeComponent.CLAMAV in components: info ('Deploying ClamAV') deploy_or_upgrade_clamav( config_set_name=config_set_name, ) if LandscapeComponent.GARDENLINUX_CACHE in components: info ('Deploying Gardenlinux Cache') deploy_or_upgrade_gardenlinux_cache( config_set_name=config_set_name, chart_dir=gardenlinux_cache_chart_dir, deployment_name=gardenlinux_cache_deployment_name, ) if LandscapeComponent.WHITESOURCE_BACKEND in components: info ('Deploying Whitesource Backend') extra_args = {} if whitesource_backend_deployment_name: extra_args['deployment_name'] = whitesource_backend_deployment_name if whitesource_cfg_name: extra_args['whitesource_cfg_name'] = whitesource_cfg_name if whitesource_backend_chart_dir: extra_args['chart_dir'] = whitesource_backend_chart_dir deploy_or_upgrade_whitesource_api_extension( config_set_name=config_set_name, **extra_args, )
def __init__(self, base_dir): self.base_dir = existing_dir(base_dir)
def validate(self): version.parse_to_semver(self.release_version) existing_dir(self.repo_dir)
def _from_cfg_dir( cfg_dir: str, cfg_types_file='config_types.yaml', cfg_src_types=None, lookup_cfg_factory=None, ): cfg_dir = existing_dir(os.path.abspath(cfg_dir)) cfg_types_dict = parse_yaml_file(os.path.join(cfg_dir, cfg_types_file)) raw = {} raw[ConfigFactory.CFG_TYPES] = cfg_types_dict def parse_cfg(cfg_type): cfg_dict = {} def parse_local_file(cfg_src: LocalFileCfgSrc): cfg_file = cfg_src.file return parse_yaml_file(os.path.join(cfg_dir, cfg_file)) def parse_repo_file(cfg_src: GithubRepoFileSrc): import ccc.github repo_url = cfg_src.repository_url if not '://' in repo_url: repo_url = 'https://' + repo_url repo_url = urllib.parse.urlparse(repo_url) if not lookup_cfg_factory: raise RuntimeError( 'cannot resolve non-local cfg w/o bootstrap-cfg-factory' ) gh_api = ccc.github.github_api( ccc.github.github_cfg_for_hostname( repo_url.hostname, cfg_factory=lookup_cfg_factory, ), cfg_factory=lookup_cfg_factory, ) org, repo = repo_url.path.strip('/').split('/') gh_repo = gh_api.repository(org, repo) file_contents = gh_repo.file_contents( path=cfg_src.relpath, ref=gh_repo.default_branch, ).decoded.decode('utf-8') return yaml.safe_load(file_contents) for cfg_src in cfg_type.sources(): if cfg_src_types and type(cfg_src) not in cfg_src_types: continue if isinstance(cfg_src, LocalFileCfgSrc): parsed_cfg = parse_local_file(cfg_src=cfg_src) elif isinstance(cfg_src, GithubRepoFileSrc): parsed_cfg = parse_repo_file(cfg_src=cfg_src) else: raise NotImplementedError(cfg_src) for k, v in parsed_cfg.items(): if k in cfg_dict and cfg_dict[k] != v: raise ValueError(f'conflicting definition for {k=}') cfg_dict[k] = v return cfg_dict # parse all configurations for cfg_type in map(ConfigType, cfg_types_dict.values()): cfg_name = cfg_type.cfg_type_name() raw[cfg_name] = parse_cfg(cfg_type) return ConfigFactory(raw_dict=raw)
def deploy_or_upgrade_landscape( config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), components: CliHint( type=LandscapeComponent, typehint=[LandscapeComponent], choices=[component for component in LandscapeComponent], help= "list of components to deploy. By default, ALL components will be deployed." ) = None, webhook_dispatcher_chart_dir: CliHint( typehint=str, help="directory of webhook dispatcher chart", ) = None, concourse_deployment_name: CliHint( typehint=str, help="namespace and deployment name for Concourse") = 'concourse', timeout_seconds: CliHint( typehint=int, help="how long to wait for concourse startup") = 180, webhook_dispatcher_deployment_name: str = 'webhook-dispatcher', dry_run: bool = True, ): '''Deploys the given components of the Concourse landscape. ''' # handle default (all known components) if not components: components = [component for component in LandscapeComponent] # Validate if LandscapeComponent.WHD in components: if not webhook_dispatcher_chart_dir: raise ValueError( f"--webhook-dispatcher-chart-dir must be given if component " f"'{LandscapeComponent.WHD.value}' is to be deployed.") else: webhook_dispatcher_chart_dir = existing_dir( webhook_dispatcher_chart_dir) _display_info( dry_run=dry_run, operation="DEPLOYED", deployment_name=concourse_deployment_name, components=components, ) if dry_run: return cfg_factory = ctx().cfg_factory() config_set = cfg_factory.cfg_set(config_set_name) concourse_cfg = config_set.concourse() # Set the global kubernetes cluster context to the cluster specified in the ConcourseConfig kubernetes_config_name = concourse_cfg.kubernetes_cluster_config() kubernetes_cfg = cfg_factory.kubernetes(kubernetes_config_name) kube_ctx.set_kubecfg(kubernetes_cfg.kubeconfig()) ensure_cluster_version(kubernetes_cfg) if LandscapeComponent.SECRETS_SERVER in components: info('Deploying Secrets Server') deploy_secrets_server(config_set_name=config_set_name, ) if LandscapeComponent.CONCOURSE in components: info('Deploying Concourse') deploy_or_upgrade_concourse( config_set_name=config_set_name, deployment_name=concourse_deployment_name, timeout_seconds=timeout_seconds, ) if LandscapeComponent.WHD in components: info('Deploying Webhook Dispatcher') deploy_or_upgrade_webhook_dispatcher( config_set_name=config_set_name, chart_dir=webhook_dispatcher_chart_dir, deployment_name=webhook_dispatcher_deployment_name, ) if LandscapeComponent.MONITORING in components: info('Deploying Monitoring stack') deploy_or_upgrade_monitoring(config_set_name=config_set_name, ) if LandscapeComponent.CLAMAV in components: info('Deploying ClamAV') deploy_or_upgrade_clamav(config_set_name=config_set_name, )
for additional_cfg in additional_cfgs: if not additional_cfg: continue cfg = merge_global_cfg(cfg, additional_cfg) load_config() def _cfg_factory_from_dir(): if not cfg or not cfg.ctx or not (cfg_dir := cfg.ctx.config_dir): return None from ci.util import existing_dir cfg_dir = existing_dir(cfg_dir) from model import ConfigFactory factory = ConfigFactory.from_cfg_dir(cfg_dir=cfg_dir) return factory def _secrets_server_client(): import ccc.secrets_server try: if bool(args.server_endpoint) ^ bool(args.concourse_cfg_name): raise ValueError( 'either all or none of server-endpoint and concourse-cfg-name must be set' ) if args.server_endpoint or args.cache_file: return ccc.secrets_server.SecretsServerClient(