def wait_until_deployment_available(self, namespace: str, name: str, timeout_seconds: int = 60): '''Block until the given deployment has at least one available replica (or timeout) Return `True` if the deployment is available, `False` if a timeout occured. ''' not_empty(namespace) not_empty(name) w = watch.Watch() # Work around IncompleteRead errors resulting in ProtocolErrors - no fault of our own start_time = int(time.time()) while (start_time + timeout_seconds) > time.time(): try: for event in w.stream(self.apps_api.list_namespaced_deployment, namespace=namespace, timeout_seconds=timeout_seconds): deployment_spec = event['object'] if deployment_spec is not None: if deployment_spec.metadata.name == name: if deployment_spec.status.available_replicas is not None \ and deployment_spec.status.available_replicas > 0: return True # Check explicitly if timeout occurred if (start_time + timeout_seconds) < time.time(): return False # Regular Watch.stream() timeout occurred, no need for further checks return False except ProtocolError: info('http connection error - ignored')
def ensure_helm_setup(): """Ensure up-to-date helm installation. Return the path to the found Helm executable""" # we currently have both helmV3 and helmV2 in our images. To keep it convenient for local # execution, try both try: helm_executable = which('helm3') except Failure: info("No executable 'helm3' found in path. Falling back to 'helm'") helm_executable = which('helm') with open(os.devnull) as devnull: subprocess.run([ helm_executable, 'repo', 'add', 'concourse', CONCOURSE_HELM_CHART_REPO ], check=True, stdout=devnull) subprocess.run( [helm_executable, 'repo', 'add', 'stable', STABLE_HELM_CHART_REPO], check=True, stdout=devnull) subprocess.run([helm_executable, 'repo', 'update'], check=True, stdout=devnull) return helm_executable
def enumerate_definition_descriptors(self): info('enumerating explicitly specified definition file') try: definitions = parse_yaml_file(self.definition_file) yield from self._wrap_into_descriptors( repo_path=self.repo_path, repo_hostname=self.repo_host, branch=self.repo_branch, raw_definitions=definitions, ) except BaseException as e: yield DefinitionDescriptor( pipeline_name='<invalid YAML>', pipeline_definition={}, main_repo={ 'path': self.repo_path, 'branch': self.repo_branch, 'hostname': self.repo_host, }, concourse_target_cfg=self.cfg_set.concourse(), concourse_target_team=self.job_mapping.team_name(), override_definitions=(), exception=e, )
def to_markdown(self, force_link_generation: bool = False) -> str: release_notes_str = MarkdownRenderer( release_note_objs=self.release_note_objs, force_link_generation=force_link_generation).render() info('Release notes:\n{rn}'.format(rn=release_notes_str)) return release_notes_str
def _rls_note_objs( self, repository_branch: str = None, commit_range: str = None, ) -> typing.List[ReleaseNote]: if not commit_range: commit_range = self.calculate_range(repository_branch, ) info(f'Fetching release notes from revision range: {commit_range}') commits = self.commits_in_range( commit_range=commit_range, repository_branch=repository_branch, ) pr_numbers = fetch_pr_numbers_from_commits(commits=commits) verbose( f'Merged pull request numbers in range {commit_range}: {pr_numbers}' ) release_note_objs = self.fetch_release_notes_from_prs( pr_numbers_in_range=pr_numbers, ) release_note_objs.extend( fetch_release_notes_from_commits( commits=commits, current_component=self.component, )) return release_note_objs
def post_to_slack(self, channel: str, title: str, message: str, filetype: str = 'post'): api_token = self.slack_cfg.api_token() if not api_token: raise RuntimeError( "can't post to slack as there is no slack api token in config") info(f"posting message '{title}' to slack channel '{channel}'") client = slack.WebClient(token=api_token) # We expect rather long messages, so we do not use incoming webhooks etc. to post # messages as those get truncated, see # https://api.slack.com/changelog/2018-04-truncating-really-long-messages # Instead we use the file upload mechanism so that this limit does not apply. # For contents of result see https://api.slack.com/methods/files.upload response = self._post_with_retry( client=client, retries=5, channels=channel, content=message, title=title, filetype=filetype, ) if not response['ok']: raise RuntimeError( f"failed to post to slack channel '{channel}': {response['error']}" ) return response
def enumerate_definition_descriptors(self): executor = ThreadPoolExecutor(max_workers=16) # scan github repositories for github_org_cfg in self.job_mapping.github_organisations(): github_cfg = self.cfg_set.github(github_org_cfg.github_cfg_name()) github_org_name = github_org_cfg.org_name() info('scanning github organisation {gho}'.format( gho=github_org_name)) github_api = ccc.github.github_api(github_cfg) github_org = github_api.organization(github_org_name) scan_repository_for_definitions = functools.partial( self._scan_repository_for_definitions, github_cfg=github_cfg, org_name=github_org_name, ) for definition_descriptors in executor.map( scan_repository_for_definitions, (repo for repo in github_org.repositories() if github_org_cfg.repository_matches(repo.name)), ): yield from definition_descriptors
def apply(self): # clean repository if required worktree_dirty = bool(self.git_helper._changed_file_paths()) if worktree_dirty: if self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_AND_PUSH_TO_BRANCH: reset_to = self.context().release_commit elif self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_ONLY: reset_to = 'HEAD' else: raise NotImplementedError self.git_helper.repo.head.reset( commit=reset_to, index=True, working_tree=True, ) # prepare next dev cycle commit next_version = _calculate_next_cycle_dev_version( release_version=self.release_version, version_operation=self.version_operation, prerelease_suffix=self.prerelease_suffix, ) info(f'next version: {next_version}') with open(self.repository_version_file_path, 'w') as f: f.write(next_version) # call optional dev cycle commit callback if self.next_version_callback: _invoke_callback( callback_script_path=self.next_version_callback, repo_dir=self.repo_dir, effective_version=next_version, ) # depending on publishing-policy, bump-commit should become successor of # either the release commit, or just be pushed to branch-head if self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_AND_PUSH_TO_BRANCH: parent_commits = [self.context().release_commit] elif self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_ONLY: parent_commits = None # default to current branch head next_cycle_commit = self.git_helper.index_to_commit( message=self._next_dev_cycle_commit_message( version=next_version, message_prefix=self.next_cycle_commit_message_prefix, ), parent_commits=parent_commits, ) # Push commit to remote self.git_helper.push( from_ref=next_cycle_commit.hexsha, to_ref=self.repository_branch, ) return { 'next cycle commit sha': next_cycle_commit.hexsha, }
def _notify_broken_definition_owners(self, failed_descriptor): definition_descriptor = failed_descriptor.definition_descriptor main_repo = definition_descriptor.main_repo github_cfg = ccc.github.github_cfg_for_hostname(main_repo['hostname'], self._cfg_set) github_api = ccc.github.github_api(github_cfg) repo_owner, repo_name = main_repo['path'].split('/') repo_helper = ccc.github.github_repo_helper( host=main_repo['hostname'], org=repo_owner, repo=repo_name, branch=main_repo['branch'], ) codeowners_enumerator = CodeownersEnumerator() codeowners_resolver = CodeOwnerEntryResolver(github_api=github_api) recipients = set(codeowners_resolver.resolve_email_addresses( codeowners_enumerator.enumerate_remote_repo(github_repo_helper=repo_helper) )) # in case no codeowners are available, resort to using the committer if not recipients: head_commit = repo_helper.repository.commit(main_repo['branch']) user_ids = { user_info.get('login') for user_info in (head_commit.committer, head_commit.author) if user_info and user_info.get('login') } for user_id in user_ids: user = github_api.user(user_id) if user.email: recipients.add(user.email) # if there are still no recipients available print a warning if not recipients: warning(textwrap.dedent( f""" Unable to determine recipient for pipeline '{definition_descriptor.pipeline_name}' found in branch '{main_repo['branch']}' ({main_repo['path']}). Please make sure that CODEOWNERS and committers have exposed a public e-mail address in their profile. """ )) else: info(f'Sending notification e-mail to {recipients} ({main_repo["path"]})') email_cfg = self._cfg_set.email("ses_gardener_cloud_sap") _send_mail( email_cfg=email_cfg, recipients=recipients, subject='Your pipeline definition in {repo} is erroneous'.format( repo=main_repo['path'], ), mail_template=( f"The pipeline definition for pipeline '{definition_descriptor.pipeline_name}' " f" on branch '{main_repo['branch']}' contains errors.\n\n" f"Error details:\n{str(failed_descriptor.error_details)}" ) )
def _display_info(dry_run: bool, operation: str, **kwargs): info("Concourse will be {o} using helm with the following arguments".format(o=operation)) max_leng = max(map(len, kwargs.keys())) for k, v in kwargs.items(): key_str = k.ljust(max_leng) info("{k}: {v}".format(k=key_str, v=v)) if dry_run: warning("this was a --dry-run. Set the --no-dry-run flag to actually deploy")
def retrieve_image(image_reference: str): try: container.registry.retrieve_container_image( image_reference=image_reference) info(f'downloaded {image_reference}') except Exception: warning(f'failed to retrieve {image_reference}') import traceback traceback.print_exc()
def _scan_repository_for_definitions( self, repository, github_cfg, org_name, ) -> RawPipelineDefinitionDescriptor: for branch_name, cfg_entry in self._determine_repository_branches( repository=repository): try: definitions = repository.file_contents( path='.ci/pipeline_definitions', ref=branch_name) except NotFoundError: continue # no pipeline definition for this branch repo_hostname = urlparse(github_cfg.http_url()).hostname override_definitions = cfg_entry.override_definitions( ) if cfg_entry else {} verbose('from repo: ' + repository.name + ':' + branch_name) try: decoded_definitions = definitions.decoded.decode('utf-8') info( f'Linting pipeline_definitions for {repository} on branch {branch_name}' ) lint_yaml(decoded_definitions) definitions = load_yaml(decoded_definitions) except BaseException as e: repo_path = f'{org_name}/{repository.name}' yield DefinitionDescriptor( pipeline_name='<invalid YAML>', pipeline_definition={}, main_repo={ 'path': repo_path, 'branch': branch_name, 'hostname': repo_hostname }, concourse_target_cfg=self.cfg_set.concourse(), concourse_target_team=self.job_mapping.team_name(), override_definitions=(), exception=e, ) return # nothing else to yield in case parsing failed # handle inheritance definitions = merge_dicts(definitions, override_definitions) yield from self._wrap_into_descriptors( repo_path='/'.join([org_name, repository.name]), repo_hostname=repo_hostname, branch=branch_name, raw_definitions=definitions, override_definitions=override_definitions, )
def revert(self): # Fetch release try: release = self.github_helper.repository.release_from_tag( self.release_version) except NotFoundError: release = None if release: info(f"Deleting Release {self.release_version}") if not release.delete(): raise RuntimeError("Release could not be deleted")
def release_note_blocks(self): block_strings = _.map(self.release_note_objs, lambda rls_note_obj: rls_note_obj.to_block_str()) if block_strings: release_notes_str = '\n\n'.join(block_strings) else: release_notes_str = '' info('Release note blocks:\n{rn}'.format(rn=release_notes_str)) return release_notes_str
def diff_pipelines(left_file: CliHints.yaml_file(), right_file: CliHints.yaml_file()): from deepdiff import DeepDiff from pprint import pprint diff = DeepDiff(left_file, right_file, ignore_order=True) if diff: pprint(diff) fail('diffs were found') else: info('the yaml documents are equivalent')
def test_info_with_quiet(): class Args(object): pass args = Args() args.quiet = True import ctx ctx.args = args with capture_out() as (stdout, stderr): examinee.info(msg='should not be printed') assert len(stdout.getvalue()) == 0 assert len(stderr.getvalue()) == 0
def delete_file( self, file_id: str, ): api_token = self.slack_cfg.api_token() if not api_token: raise RuntimeError( "can't post to slack as there is no slack api token in config") info(f"deleting file with id '{file_id}' from Slack") client = slack.WebClient(token=api_token) response = client.files_delete(id=file_id, ) if not response['ok']: raise RuntimeError(f"failed to delete file with id {file_id}") return response
def _revert(self, steps): # attempt to revert each step. Raise an exception if not all reverts succeeded. all_reverted = True for step in steps: step_name = step.name() info(f"Reverting step {step_name}") try: step.revert() except BaseException as e: all_reverted = False warning(f"An error occured while reverting step '{step_name}': {e}") traceback.print_exc() if not all_reverted: raise RuntimeError("Unable to revert all steps.")
def _branch_cfg_or_none( self, repository, ): try: branch_cfg = repository.file_contents( path='branch.cfg', ref='refs/meta/ci', ).decoded.decode('utf-8') info(f'Linting branch cfg for {repository}') lint_yaml(branch_cfg) except NotFoundError: return None # no branch cfg present return BranchCfg(raw_dict=load_yaml(branch_cfg))
def deploy_or_upgrade_clamav(config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), ): cfg_factory = ctx().cfg_factory() cfg_set = cfg_factory.cfg_set(config_set_name) concourse_cfg = cfg_set.concourse() kubernetes_cfg_name = concourse_cfg.kubernetes_cluster_config() clamav_cfg_name = concourse_cfg.clamav_config() if clamav_cfg_name is not None: setup_clamav.deploy_clam_av( clamav_cfg_name=clamav_cfg_name, kubernetes_cfg_name=kubernetes_cfg_name, ) else: info( f"No ClamAV configured for the Concourse in config set '{config_set_name}'. Will " "not deploy ClamAV.")
def _ssh_auth_env(github_cfg): credentials = github_cfg.credentials() info(f'using github-credentials with uid: {credentials.username()}') tmp_id = tempfile.NamedTemporaryFile( mode='w', delete=False) # attention: callers must unlink tmp_id.write(credentials.private_key()) tmp_id.flush() os.chmod(tmp_id.name, 0o400) suppress_hostcheck = '-o "StrictHostKeyChecking no"' id_only = '-o "IdentitiesOnly yes"' cmd_env = os.environ.copy() cmd_env[ 'GIT_SSH_COMMAND'] = f'ssh -v -i {tmp_id.name} {suppress_hostcheck} {id_only}' return (cmd_env, tmp_id)
def notify( subject: str, body: str, email_cfg_name: str, recipients: typing.Iterable[str], ): recipients = set(recipients) cfg_factory = ctx().cfg_factory() email_cfg = cfg_factory.email(email_cfg_name) _send_mail(email_cfg=email_cfg, recipients=recipients, mail_template=body, subject=subject) info('sent email to: {r}'.format(r=recipients))
def apply(self): responses = post_to_slack( release_notes=self.release_notes, github_repository_name=self.githubrepobranch.github_repo_path(), slack_cfg_name=self.slack_cfg_name, slack_channel=self.slack_channel, release_version=self.release_version, ) for response in responses: if response and response.get('file', None): uploaded_file_id = response.get('file').get('id') info(f'uploaded file id {uploaded_file_id} to slack') else: raise RuntimeError("Unable to get file id from Slack response") info('successfully posted contents to slack')
def download_images( component_descriptor: ComponentDescriptor, upload_registry_prefix: str, image_reference_filter=(lambda component, container_image: True), parallel_jobs=8, # eight is a good number ): ''' downloads all matching container images, discarding the retrieved contents afterwards. While this may seem pointless, this actually does server a purpose. Namely, we use the vulnerability scanning service offered by GCR. However, said scanning service will only continue to run (and thus update vulnerability reports) for images that keep being retrieved occasionally (relevant timeout being roughly 4w). ''' image_refs = [ container_image.image_reference() for component, container_image in product.util._enumerate_effective_images( component_descriptor=component_descriptor, ) if image_reference_filter(component, container_image) ] # XXX deduplicate this again (copied from product/scanning.py) def upload_image_ref(image_reference): image_name, tag = image_reference.rsplit(':', 1) mangled_reference = ':'.join((image_name.replace('.', '_'), tag)) return urljoin(upload_registry_prefix, mangled_reference) image_refs = [upload_image_ref(ref) for ref in image_refs] info( f'downloading {len(image_refs)} container images to simulate consumption' ) executor = ThreadPoolExecutor(max_workers=parallel_jobs) def retrieve_image(image_reference: str): try: container.registry.retrieve_container_image( image_reference=image_reference) info(f'downloaded {image_reference}') except Exception: warning(f'failed to retrieve {image_reference}') import traceback traceback.print_exc() # force generator to be exhausted tuple(executor.map(retrieve_image, image_refs)) success(f'successfully retrieved {len(image_refs)} container images')
def execute(self): executed_steps = list() for step in self._steps: step_name = step.name() info(f"Applying step '{step_name}'") executed_steps.append(step) try: output = step.apply() self._context.set_step_output(step_name, output) except BaseException as e: warning(f"An error occured while applying step '{step_name}': {e}") traceback.print_exc() # revert the changes attempted, in reverse order self._revert(reversed(executed_steps)) # do not execute apply for remaining steps return False return True
def revert(self): # Fetch release try: release = self.github_helper.repository.release_from_tag(self.release_version) except NotFoundError: release = None if release: info(f"Deleting Release {self.release_version}") if not release.delete(): raise RuntimeError("Release could not be deleted") try: tag = self.github_helper.repository.ref(f"tags/{self.release_version}") except NotFoundError: # Ref wasn't created return if not tag.delete(): raise RuntimeError("Tag could not be deleted")
def retrieve_scan_result( self, resource: gci.componentmodel.Resource, component: gci.componentmodel.Component, group_id: int=None, ): metadata = self._metadata( resource=resource, component=component, omit_version=True, # omit version when searching for existing app # (only one component version must exist per group by our chosen definition) ) if not group_id: group_id = self._group_id existing_products = self._api.list_apps( group_id=group_id, custom_attribs=metadata ) if len(existing_products) == 0: return None # no result existed yet if len(existing_products) > 1: warning(f"found more than one product for image '{resource.access.imageReference}'") products_to_rm = existing_products[1:] for p in products_to_rm: self._api.delete_product(p.product_id()) info( f'deleted product {p.display_name()} ' f'with product_id: {p.product_id()}' ) # use first (or only) match (we already printed a warning if we found more than one) product = existing_products[0] product_id = product.product_id() # update upload name to reflect new component version (if changed) upload_name = self._upload_name(resource, component) self._update_product_name(product_id, upload_name) # retrieve existing product's details (list of products contained only subset of data) product = self._api.scan_result(product_id=product_id) return product
def deploy_webhook_dispatcher_landscape( cfg_set, webhook_dispatcher_deployment_cfg: WebhookDispatcherDeploymentConfig, chart_dir: str, deployment_name: str, ): not_empty(deployment_name) chart_dir = os.path.abspath(chart_dir) cfg_factory = global_ctx().cfg_factory() # Set the global context to the cluster specified in KubernetesConfig kubernetes_config_name = webhook_dispatcher_deployment_cfg.kubernetes_config_name( ) kubernetes_config = cfg_factory.kubernetes(kubernetes_config_name) kube_ctx.set_kubecfg(kubernetes_config.kubeconfig()) ensure_cluster_version(kubernetes_config) # TLS config tls_config_name = webhook_dispatcher_deployment_cfg.tls_config_name() tls_config = cfg_factory.tls_config(tls_config_name) tls_secret_name = "webhook-dispatcher-tls" info('Creating tls-secret ...') create_tls_secret( tls_config=tls_config, tls_secret_name=tls_secret_name, namespace=deployment_name, ) kubernetes_cfg_name = webhook_dispatcher_deployment_cfg.kubernetes_config_name( ) kubernetes_cfg = cfg_factory.kubernetes(kubernetes_cfg_name) whd_helm_values = create_webhook_dispatcher_helm_values( cfg_set=cfg_set, webhook_dispatcher_deployment_cfg=webhook_dispatcher_deployment_cfg, cfg_factory=cfg_factory, ) execute_helm_deployment(kubernetes_cfg, deployment_name, chart_dir, deployment_name, whd_helm_values)
def deploy(self, definition_descriptor): pipeline_definition = definition_descriptor.pipeline pipeline_name = definition_descriptor.pipeline_name try: api = client.from_cfg( concourse_cfg=definition_descriptor.concourse_target_cfg, team_name=definition_descriptor.concourse_target_team, ) response = api.set_pipeline( name=pipeline_name, pipeline_definition=pipeline_definition ) info( 'Deployed pipeline: ' + pipeline_name + ' to team: ' + definition_descriptor.concourse_target_team ) if self.unpause_pipelines: info(f'Unpausing pipeline {pipeline_name}') api.unpause_pipeline(pipeline_name=pipeline_name) if self.expose_pipelines: api.expose_pipeline(pipeline_name=pipeline_name) deploy_status = DeployStatus.SUCCEEDED if response is concourse.client.model.SetPipelineResult.CREATED: deploy_status |= DeployStatus.CREATED elif response is concourse.client.model.SetPipelineResult.UPDATED: pass else: raise NotImplementedError return DeployResult( definition_descriptor=definition_descriptor, deploy_status=deploy_status, ) except Exception as e: import traceback traceback.print_exc() warning(e) return DeployResult( definition_descriptor=definition_descriptor, deploy_status=DeployStatus.FAILED, error_details=traceback.format_exc(), )
def render(self, definition_descriptor): try: definition_descriptor = self._render(definition_descriptor) info('rendered pipeline {pn}'.format(pn=definition_descriptor.pipeline_name)) return RenderResult( definition_descriptor, render_status=RenderStatus.SUCCEEDED, ) except Exception: warning( f"erroneous pipeline definition '{definition_descriptor.pipeline_name}' " f"in repository '{definition_descriptor.main_repo.get('path')}' on branch " f"'{definition_descriptor.main_repo.get('branch')}'" ) traceback.print_exc() return RenderResult( definition_descriptor, render_status=RenderStatus.FAILED, error_details=traceback.format_exc(), )