def update_repos(): """ Updates locally cached metadata for all available chart repositories. """ logger.info('Updating Helm repository metadata') baseutils.exe_cmd('{helm} repo update'.format(helm=helm_binary)) logger.info('Helm repository metadata updated')
def upgrade_tiller(namespace): """ Updates the version of Tiller in a namespace to match the currently configured Helm client. An exception will be thrown if Tiller is not present. Args: namespace: The namespace of the Tiller deployment """ # Check if Tiller is already at the correct version (rc, output) = baseutils.exe_cmd( '{helm} version --tiller-namespace {namespace} --short'.format( helm=helm_binary, namespace=baseutils.shell_escape(namespace))) output = output.strip().splitlines() client_version = output[0].strip().split()[1] tiller_version = output[1].strip().split()[1] if client_version != tiller_version: deployment = k8s.get('deployment', namespace=namespace, name='tiller-deploy') pod_spec = deployment['spec']['template']['spec'] service_account_name = pod_spec['serviceAccountName'] container_spec = pod_spec['containers'][0] override = None if 'command' in container_spec: override = '"spec.template.spec.containers[0].command"="{{{{{command}}}}}"'.format( command=','.join(container_spec['command'])) baseutils.exe_cmd( '{helm} init --history-max 20 --tiller-namespace {namespace} --service-account {service_account_name} {override} --upgrade' .format(helm=helm_binary, namespace=baseutils.shell_escape(namespace), service_account_name=baseutils.shell_escape( service_account_name), override='--override {override}'.format( override=baseutils.shell_escape(override)) if override else ''))
def add_repo( name, username, password, repo_url='https://na.artifactory.swg-devops.com/artifactory/wce-p2paas-helm-virtual' ): """ Adds a new helm repository to the helm configuration. It is safe to re-add an already existing repository. Args: name: The name of the repository to add. This is the local name which will be used to reference the repository username: The username for authentication to the repository password: The password for authentication to the repository repo_url: The url to the repository. This is the full remote url to the Helm repository (Optional, default is corporate artifactory wce-p2paas-helm-virtual repository) """ logger.info('Configuring Helm repository {repo}'.format(repo=name)) baseutils.exe_cmd( '{helm} repo add {repo} {repo_url} --username {username} --password {password}' .format(helm=helm_binary, repo=baseutils.shell_escape(name), repo_url=baseutils.shell_escape(repo_url), username=baseutils.shell_escape(username), password=baseutils.shell_escape(password)), obfuscate=baseutils.shell_escape(password)) baseutils.exe_cmd('{helm} repo update {repo}'.format( helm=helm_binary, repo=baseutils.shell_escape(name)))
def uninstall(release_name, namespace): """ Deletes a deployed release. Args: release_name: The release to delete purge: Whether to purge all Helm history for the release (Optional, default: True) """ baseutils.exe_cmd('{helm} uninstall {release_name} {namespace}'.format( helm=helm_binary, release_name=baseutils.shell_escape(release_name), namespace=baseutils.shell_escape(namespace)))
def delete(release_name, purge=True): """ Deletes a deployed release. Args: release_name: The release to delete purge: Whether to purge all Helm history for the release (Optional, default: True) """ baseutils.exe_cmd('{helm} delete {release_name} {purge}'.format( helm=helm_binary, release_name=baseutils.shell_escape(release_name), purge='--purge' if purge else ''))
def rollback(release, revision): """ Rolls a release back to a specified revision. Args: release: The name of the release to rollback revision: The revision number to roll back to """ baseutils.exe_cmd('{helm} rollback {release} {revision}'.format( helm=helm_binary, release=baseutils.shell_escape(release), revision=int(revision)))
def _attempt_chart_deploy(deploy_cmd, attempt=0): """ Attempts to perofrm a chart deployment. The actual deploy command must be passed as an argument. Retries will be attempted if the failure reason can be calculated as being "safe to retry". Args: deploy_cmd: The command to use to deploy (install/upgrade) the chart """ try: baseutils.exe_cmd(deploy_cmd, working_dir=os.environ.get('HELM_HOME')) except Exception as e: exc_message = str(e) if 'Could not get apiVersions from Kubernetes: unable to retrieve the complete list of server APIs' in exc_message and attempt < 5: time.sleep(10) _attempt_chart_deploy(deploy_cmd, attempt=attempt + 1) else: raise
def search_charts(search_term): """ Triggers a helm search command using the passed search term and returns the output. Args: search_term: A search term that will be passed through to the "helm search <search_term>" command Returns: The output from the helm search command """ (rc, output) = baseutils.exe_cmd('{helm} search {search_term}'.format( helm=helm_binary, search_term=baseutils.shell_escape(search_term))) return output
def test(release, seconds=1260): """ Execute Helm tests associated to a deployed release. An exception is raised if the tests fail. Args: release: The name of the release to test seconds: The timeout to apply to the tests in seconds (Optional, default: 1260) """ (rc, output) = baseutils.exe_cmd( '{helm} test {release} --timeout {timeout}'.format( helm=helm_binary, release=baseutils.shell_escape(release), timeout=int(seconds)))
def install_chart(chart, version, valuesFile, release, namespace, validate_manifest=False, dry_run=False, debug=False): """ Install a new chart with a specified release name. Args: chart: The name of the chart to install version: The version of the chart to install valuesFile: A file containing chart values release: The name to assign to the deployed release namespace: The namespace to deploy the release into validate_manifest: Validate the manifest meets a predermined set of criteria. See #validate_manifest_requirements for details (Optional, default: True) dry_run: Perform the install in dry-run mode. No changes will be made in the Kubernetes cluster (Optional, default: False) debug: Perform the install in debug mode, increasing logging output (Optional, default: False) """ logger.info( 'Installing chart {chart} (release: {release}) valuesFile: {valuesFile} with version {version} {dry_run}' .format(chart=chart, release=release, valuesFile=valuesFile, version=version, dry_run=dry_run)) validate_manifest = False try: (rc, output) = baseutils.exe_cmd( '{helm} install {release} {chart} --values {valuesFile} --version {version} --namespace {namespace} --dry-run' .format(helm=helm_binary, release=baseutils.shell_escape(release), chart=baseutils.shell_escape(chart), valuesFile=baseutils.shell_escape(valuesFile), version=baseutils.shell_escape(version), namespace=baseutils.shell_escape(namespace), working_dir=os.environ.get('HELM_HOME'), log_level=logging.NOTSET, raise_exception=False )) # Logging is disabled as output can contain secrets if rc: helm_error = output.strip().splitlines()[-1] if output else '' if helm_error: logger.error(helm_error) raise Exception( 'Failed to parse Helm template. {helm_error}'.format( helm_error=helm_error)) elif validate_manifest: # Remove non-yaml output, everything above first "MANIFEST:" manifest = output.partition('MANIFEST:')[2] # Remove non-yaml output after the manifest manifest = manifest.partition( '[{year}'.format(year=datetime.now().year))[0] errors = validate_manifest_requirements(manifest) if len(errors) > 0: for error in errors: logger.error(error) raise Exception( 'Chart pre-approval validation failed. Reason: {failure_reasons}' .format(failure_reasons='. '.join(errors))) deploy_cmd = '{helm} install {release} {chart} --values {valuesFile} --version {version} --namespace {namespace}'.format( helm=helm_binary, release=baseutils.shell_escape(release), chart=baseutils.shell_escape(chart), valuesFile=baseutils.shell_escape(valuesFile), version=baseutils.shell_escape(version), namespace=baseutils.shell_escape(namespace), dry_run='--dry-run' if dry_run else '', debug='--debug' if debug else '') _attempt_chart_deploy(deploy_cmd) finally: logger.info("completed") logger.info( 'Install request for chart {chart} (release: {release}) valuesFile: {valuesFile} with version {version} passed to Kubernetes' .format(chart=chart, release=release, valuesFile=valuesFile, version=version))
def install_helm(helm_version): """ Install Helm and Tiller into the Kubernetes infrastructure. This assumes Tiller is to be installed in the kube-system namespace. It will upgrade Tiller if it is already present. It is safe to call this function multiple times. There are checks for understanding the current state of the Helm/Tiller deployment and only necessary updates are made. Args: helm_version: The version of helm that should be installed, eg: v2.11.1 """ # First check and ensure that the correct client version is present (rc, output) = baseutils.exe_cmd( '{helm} version --client'.format(helm=helm_binary), raise_exception=False, log_level=logging.NOTSET) if rc or helm_version not in output: tmp_dir = tempfile.mkdtemp() try: helm_tar = baseutils.shell_escape( os.path.join(tmp_dir, 'helm.tar.gz')) baseutils.exe_cmd('/usr/bin/curl -L {url} -o {helm_tar}'.format( url=baseutils.shell_escape( 'https://storage.googleapis.com/kubernetes-helm/helm-{version}-linux-amd64.tar.gz' .format(version=helm_version)), helm_tar=helm_tar)) baseutils.exe_cmd( '/bin/tar -xzvf {helm_tar} -C {tmp_dir} && rm -f {helm_tar}'. format(helm_tar=helm_tar, tmp_dir=baseutils.shell_escape(tmp_dir))) os.rename(os.path.join(tmp_dir, 'linux-amd64', 'helm'), helm_binary.strip('\'')) os.chmod(helm_binary.strip('\''), 0o755) finally: shutil.rmtree(tmp_dir) # Secondly check that the correct version of Tiller is installed into the Kubernetes cluster (rc, output) = baseutils.exe_cmd('{helm} version'.format(helm=helm_binary), raise_exception=False, log_level=logging.NOTSET) if rc: # Tiller is not installed. We must check if the service account exists yet service_accounts = k8s.get('serviceaccount', namespace='kube-system') if 'tiller' not in [ service_account['metadata']['name'] for service_account in service_accounts ]: k8s.apply({ 'apiVersion': 'v1', 'kind': 'ServiceAccount', 'metadata': { 'name': 'tiller', 'namespace': 'kube-system' } }) cluster_role_bindings = k8s.get('clusterrolebinding') if 'tiller' not in [ cluster_role_binding['metadata']['name'] for cluster_role_binding in cluster_role_bindings ]: k8s.apply({ 'apiVersion': 'rbac.authorization.k8s.io/v1', 'kind': 'ClusterRoleBinding', 'metadata': { 'name': 'tiller', }, 'roleRef': { 'apiGroup': 'rbac.authorization.k8s.io', 'kind': 'ClusterRole', 'name': 'cluster-admin' }, 'subjects': [{ 'kind': 'ServiceAccount', 'name': 'tiller', 'namespace': 'kube-system' }] }) baseutils.exe_cmd( '{helm} init --history-max 20 --service-account tiller --override "spec.template.spec.containers[0].command"="{{/tiller,--storage=secret}}"' .format(helm=helm_binary)) elif output.count(helm_version) != 2: # Tiller is installed but it is an old version. Upgrade it baseutils.exe_cmd( '{helm} init --history-max 20 --service-account tiller --override "spec.template.spec.containers[0].command"="{{/tiller,--storage=secret}}" --upgrade' .format(helm=helm_binary)) else: # Tiller is correctly configured. We still need to init the client to facilitate the usage of helm repositories baseutils.exe_cmd('{helm} init --client-only'.format(helm=helm_binary))