def csutil_cluster_setup(cluster_name, service_name, cname): """ Configure (deploy) the SOS IKS tooling into an IKS cluster. This will retrieve and reserve an ovpn file from Vault if one is not yet reserved for the cluster. Args: cluster_name: The name of the cluster to configure service_name: The SOS application name that the cluster will be registered under in the SOS Inventory DB cname: Must be ether "bluemix" or "staging" depending on whether the cluster is produciton or non-production respectively """ iks_ovpn_config_name = _reserve_iks_ovpn_config_name(cluster_name) iks_ovpn_config = vault.read( '{vault_path}/files/{config_name}'.format( vault_path=vault_iks_ovpn_path, config_name=iks_ovpn_config_name), 'content') tmp_dir = tempfile.mkdtemp() try: config_path = os.path.join(tmp_dir, iks_ovpn_config_name) with open(config_path, 'w') as fh: fh.write(iks_ovpn_config) baseutils.exe_cmd(( '/usr/local/bin/ibmcloud csutil cluster-setup --crn-service-name {service_name} --crn-cname {cname} ' '--sos-config-path {config_path} --skip-prometheus=true {cluster_name} --silent' ).format(service_name=baseutils.shell_escape(service_name), cname=baseutils.shell_escape(cname), config_path=baseutils.shell_escape(config_path), cluster_name=baseutils.shell_escape(cluster_name)), env=_get_csutil_env()) finally: shutil.rmtree(tmp_dir)
def ks_enable_key_protect(cluster_name, region, key_protect_instance_guid, key_id): """ Enables Key Protect on a given IKS cluster. Key Protect will add additional security to the use of Kubernetes secrets. If Key Protect is already enabled for the cluster, no action will be taken. This function will wait until Key Protect is fully enabled in the environment before returning. Args: cluster_name: The name of the cluster to update region: The region of the Key Protect instance key_protect_instance_guid; The GUID of the Key Protect instance key_id: The ID of the key inside Key Protect to use. This should be a root key """ cluster = baseutils.retry(ks_cluster_get, cluster_name, interval=30, retry=40) if not cluster.key_protect_enabled: baseutils.exe_cmd( '/usr/local/bin/ibmcloud ks key-protect-enable --cluster {cluster} --key-protect-url {kp_url} --key-protect-instance {kp_guid} --crk {key_id}' .format(cluster=baseutils.shell_escape(cluster_name), kp_url=baseutils.shell_escape( '{region}.kms.cloud.ibm.com'.format(region=region)), kp_guid=baseutils.shell_escape(key_protect_instance_guid), key_id=baseutils.shell_escape(key_id))) while not cluster.key_protect_enabled or cluster.master_status != 'Ready': time.sleep(30) cluster = baseutils.retry(ks_cluster_get, cluster_name, interval=30, retry=40)
def test_exe_cmd(self): self.assertEqual((0, 'value\n'), baseutils.exe_cmd('echo value')) custom_value = 'value1' custom_env = os.environ.copy() custom_env['custom_value'] = custom_value (rc, output) = baseutils.exe_cmd('fake_cmd', raise_exception=False) if os.name == 'nt': self.assertEqual(1, rc) self.assertEqual((0, '{result}\n'.format(result=custom_value)), baseutils.exe_cmd('echo %custom_value%', env=custom_env)) self.assertEqual((0, '{result}\n'.format(result=custom_value)), baseutils.exe_cmd('more', stdin=custom_value)) else: self.assertEqual(127, rc) self.assertEqual((0, custom_value), baseutils.exe_cmd('echo -n "${custom_value}"', env=custom_env)) self.assertEqual((0, custom_value), baseutils.exe_cmd('less', stdin=custom_value)) with self.assertRaises(Exception) as context: baseutils.exe_cmd('fake_cmd') e_msg = str(context.exception) self.assertTrue('is not recognized' in e_msg or 'not found' in e_msg) with self.assertRaises(Exception) as context: baseutils.exe_cmd('fake_cmd', log_level=logging.NOTSET) e_msg = str(context.exception) self.assertFalse('is not recognized' in e_msg or 'not found' in e_msg)
def update_repos(): """ Updates locally cached metadata for all available chart repositories. """ logger.info('Updating Helm repository metadata') baseutils.exe_cmd('{helm} repo update'.format(helm=helm_binary)) logger.info('Helm repository metadata updated')
def upgrade_tiller(namespace): """ Updates the version of Tiller in a namespace to match the currently configured Helm client. An exception will be thrown if Tiller is not present. Args: namespace: The namespace of the Tiller deployment """ # Check if Tiller is already at the correct version (rc, output) = baseutils.exe_cmd('{helm} version --tiller-namespace {namespace} --short'.format( helm=helm_binary, namespace=baseutils.shell_escape(namespace))) output = output.strip().splitlines() client_version = output[0].strip().split()[1] tiller_version = output[1].strip().split()[1] if client_version != tiller_version: deployment = k8s.get('deployment', namespace=namespace, name='tiller-deploy') pod_spec = deployment['spec']['template']['spec'] service_account_name = pod_spec['serviceAccountName'] container_spec = pod_spec['containers'][0] override = None if 'command' in container_spec: override = '"spec.template.spec.containers[0].command"="{{{{{command}}}}}"'.format(command=','.join(container_spec['command'])) baseutils.exe_cmd('{helm} init --history-max 20 --tiller-namespace {namespace} --service-account {service_account_name} {override} --upgrade'.format( helm=helm_binary, namespace=baseutils.shell_escape(namespace), service_account_name=baseutils.shell_escape(service_account_name), override='--override {override}'.format(override=baseutils.shell_escape(override)) if override else ''))
def ks_worker_pool_create_classic(cluster_name, worker_pool_name, machine_type, size_per_zone, hardware, labels=None): """ Creates a new worker pool for a cluster. Args: cluster_name: The name of the cluster worker_pool_name: The name of the worker pool to create machine_type: The machine type for the worker pool. See "ibmcloud ks flavors --zone <zone>" size_per_zone: The number of nodes per zone in the worker pool hardware: Deploy the nodes to either "dedicated" or "shared" infrastructure servers labels: A dictionary of key-value pairs representing labels to apply to workers in the worker pool (Optional) Returns: The IKSWorkerPool model for the newly created worker pool """ baseutils.exe_cmd(( '/usr/local/bin/ibmcloud ks worker-pool create classic --cluster {cluster_name} --machine-type {machine_type} --name {worker_pool_name} ' '--size-per-zone {size_per_zone} --hardware {hardware} {labels}' ).format(cluster_name=baseutils.shell_escape(cluster_name), worker_pool_name=baseutils.shell_escape(worker_pool_name), machine_type=baseutils.shell_escape(machine_type), size_per_zone=int(size_per_zone), hardware=baseutils.shell_escape(hardware), labels=' '.join([ '--label {label}'.format(label=baseutils.shell_escape( '{key}={value}'.format(key=key, value=value))) for (key, value) in labels.items() ]) if labels else ''))
def target(region=None, resource_group=None, org=None, space=None): """ Configure the cli to target a specific aspect such as a resource group or region. Args: region: The region of IBM Cloud to target (Optional) resource_group: IBMCloud resource group to target (Optional) target_org: Cloud Foundary org to use (Optional) target_space: Cloud Foundary space to use (Optional) Returns: If no parameters are passed, a Target object representing the currently configured target information, otherwise None """ target = None if region or resource_group or (org and space): baseutils.exe_cmd( '/usr/local/bin/ibmcloud target {region} {resource_group} {cf}'. format(region='-r {region}'.format( region=baseutils.shell_escape(region)) if region else '', resource_group='-g {resource_group}'.format( resource_group=baseutils.shell_escape(resource_group)) if resource_group else '', cf='--cf -o {org} -s {space}'.format( org=baseutils.shell_escape(org), space=baseutils.shell_escape(space)) if (org and space) else '')) else: (rc, output ) = baseutils.exe_cmd('/usr/local/bin/ibmcloud target --output json') target = Target(json.loads(output)) return target
def plugin_install(plugin_name, repository='IBM Cloud', version=None): """ Installs a plugin into the IBM Cloud cli. This is a no-op if the plugin is already installed. Args: plugin_name: The name of the plugin to install repository: The repository containing the plugin. If None is passed, no repository flag will be passed to the CLI (Optional, default: IBM Cloud) version: The version to install. This does not need to be an exact match, for example, it can just specify the major.minor version to pull in the latest associated patch version (Optional, default: latest available if no version present, otherwise no action) """ plugins_dir = os.path.join( os.environ.get('IBMCLOUD_HOME', os.environ['HOME']), '.bluemix', 'plugins') logger.info( 'Acquiring lock to query status of IBM Cloud plugin "{plugin}"'.format( plugin=plugin_name)) with baseutils.local_lock(lock_name=cli_update_lock_name): if version or plugin_name.startswith('/') or not os.path.exists( os.path.join(plugins_dir, plugin_name)): baseutils.exe_cmd( '/usr/local/bin/ibmcloud plugin install {plugin_name} {repository} {version} -f' .format(plugin_name=baseutils.shell_escape(plugin_name), repository='-r {repository}'.format( repository=baseutils.shell_escape(repository)) if repository else '', version='-v {version}'.format( version=baseutils.shell_escape(version)) if version else ''))
def apply_pull_secret(cluster_name): """ Triggers the application of default image pull secrets to an IKS cluster. Args: cluster_name: The name of the cluster to apply the pull secrets to """ baseutils.exe_cmd( '/usr/local/bin/ibmcloud ks cluster pull-secret apply --cluster {cluster}' .format(cluster=baseutils.shell_escape(cluster_name)))
def ks_cluster_remove(cluster_name): """ Destroy an IKS cluster. This action is irreversible. Args: cluster_name: The name of the cluster to destroy """ baseutils.exe_cmd( '/usr/local/bin/ibmcloud ks cluster rm --cluster {cluster} -f'.format( cluster=baseutils.shell_escape(cluster_name)))
def ks_cluster_master_refresh(cluster_name): """ Triggers a refresh of the master components of an IKS cluster to apply new configuration. There is no outage of the applications in the cluster. Args: cluster_name: The name of the IKS cluster to refresh """ baseutils.exe_cmd( '/usr/local/bin/ibmcloud ks cluster master refresh --cluster {cluster_name}' .format(cluster_name=baseutils.shell_escape(cluster_name)))
def sl_block_volume_cancel(volume_id): """Delete block volume Args: Volume IDs taken from flagged orphans EXAMPLE: ibmcloud.sl_block_volume_cancel(12345678) This command cancels volume with ID 12345678 immediately and without asking for confirmation""" baseutils.exe_cmd( '/usr/local/bin/ibmcloud sl block volume-cancel {volume_id} --immediate -f' .format(volume_id=baseutils.shell_escape(volume_id)))
def configure_alb(alb_id, enable=True): """ Configures (enables or disables) an IKS ALB. Args: alb_id: The ID of the ALB to configure enable: Whether to enable or disable the ALB. True enabled, False disables (Optional, default: True) """ baseutils.exe_cmd( '/usr/local/bin/ibmcloud ks alb configure classic --alb-id {alb} {enable}' .format(alb=baseutils.shell_escape(alb_id), enable='--enable' if enable else '--disable'))
def rollback(release, revision): """ Rolls a release back to a specified revision. Args: release: The name of the release to rollback revision: The revision number to roll back to """ baseutils.exe_cmd('{helm} rollback {release} {revision}'.format( helm=helm_binary, release=baseutils.shell_escape(release), revision=int(revision)))
def add_cluster_subnet(cluster_name, subnet_id): """ Add (binds) a subnet to an IKS cluster. Args: cluster_name: The name of the IKS cluster to bind it to or its ID subnet_id: The ID of the subnet to add """ baseutils.exe_cmd( '/usr/local/bin/ibmcloud ks cluster subnet add --cluster {cluster_name} --subnet-id {subnet_id}' .format(cluster_name=baseutils.shell_escape(cluster_name), subnet_id=baseutils.shell_escape(subnet_id)))
def ks_worker_pool_rm(cluster_name, worker_pool_name): """ Delete a worker pool from a cluster. Args: cluster_name: The name of the cluster to process worker_pool_name: The name of the worker pool to remove """ baseutils.exe_cmd( '/usr/local/bin/ibmcloud ks worker-pool rm --cluster {cluster} --worker-pool {worker_pool} -f' .format(cluster=baseutils.shell_escape(cluster_name), worker_pool=baseutils.shell_escape(worker_pool_name)))
def delete(release_name, purge=True): """ Deletes a deployed release. Args: release_name: The release to delete purge: Whether to purge all Helm history for the release (Optional, default: True) """ baseutils.exe_cmd('{helm} delete {release_name} {purge}'.format( helm=helm_binary, release_name=baseutils.shell_escape(release_name), purge='--purge' if purge else ''))
def csutil_cluster_cleanup(cluster_name): """ Removes the SOS IKS tooling from a cluster. This will unreserve the ovpn configuration from Vault also. Args: cluster_name: The name of the cluster to clean up """ release_iks_ovpn_config_reservation(cluster_name) baseutils.exe_cmd( '/usr/local/bin/ibmcloud csutil cluster-cleanup {cluster_name} --silent' .format(cluster_name=baseutils.shell_escape(cluster_name)), env=_get_csutil_env())
def ks_cluster_master_auditwebhook_set(cluster_name, remote_url): """ Set the audit webhook configuration for a cluster's Kubernetes API server. The webhook backend forwards API server audit logs to a remote server. Args: cluster_name: The name of the IKS cluster to configure remote_url: The remove url to send webhooks to, including http:// as appropriate """ baseutils.exe_cmd( '/usr/local/bin/ibmcloud ks cluster master audit-webhook set --cluster {cluster_name} --remote-server {remote_url}' .format(cluster_name=baseutils.shell_escape(cluster_name), remote_url=baseutils.shell_escape(remote_url)))
def ks_infra_credentials(username, api_key): """ Set classic infrastructure credentials for the kubernetes service sub-command. This requires that a region has been previously set in the cli. Args: infra_username: Username for access to SoftLayer infrastructure (Optional) infra_api_key: API key for SoftLayer infrastructure (Optional) """ baseutils.exe_cmd( '/usr/local/bin/ibmcloud ks credential set classic --infrastructure-username {username} --infrastructure-api-key {api_key}' .format(username=baseutils.shell_escape(username), api_key=baseutils.shell_escape(api_key)), obfuscate=baseutils.shell_escape(api_key))
def cordon(node=None, labels=None): """ Cordons a specified Kubernetes node by name or a selection of nodes based on label. Args: node: The node to cordon (Optional) labels: A label selector query for nodes to cordon. Can either be a string of the form "label1=value1,labe2=value2" or a dictionary with "key: value" pairs (Optional) """ if isinstance(labels, dict): labels = ','.join('{key}={value}'.format(key=key, value=value) for (key, value) in labels.items()) baseutils.exe_cmd('{kubectl} cordon {node} {labels}'.format( kubectl=kubectl_binary, node=baseutils.shell_escape(node) if node else '', labels='-l {labels}'.format( labels=baseutils.shell_escape(labels)) if labels else ''))
def create_resource_service_key(name, role, instance_id, service_endpoint=None, parameters=None): """ Creates a resource service key via the ibmcli. A service key is attached to a service instance and there can be many keys to a single instance. Args: name: The name of the service key to create role: The role to grant the key. This will be service type-specific. Examples are Reader, Writer, Manager instance_id: The ID of the service instance to associate the key to service_endpoint: Type of service endpoint, 'public' or 'private' (Optional) paramaters: JSON paramaters (as a dictionary) that can be passed to the key creation call (Optional) """ (rc, output) = baseutils.exe_cmd( '/usr/local/bin/ibmcloud resource service-key-create {name} {role} --instance-id {instance_id} {service_endpoint} {parameters}' .format( name=baseutils.shell_escape(name), role=baseutils.shell_escape(role), instance_id=baseutils.shell_escape(instance_id), service_endpoint='--service-endpoint {service_endpoint}'.format( service_endpoint=baseutils.shell_escape(service_endpoint)) if service_endpoint else '', parameters='--parameters {parameters}'.format( parameters=baseutils.shell_escape(json.dumps(parameters))) if parameters else ''))
def describe(kind, namespace=None, name=None, labels=None): """ Describe one or more resources of a specific kind in Kubernetes. An exception is thrown for invalid types or a name of a resource that does not exist. Args: kind: The kind of the resource, eg. deployment namespace: The namespace of the resources. Setting to "all" triggers the flag --all-namespaces (Optional, default is as per kubecfg configuration) name: The name of an individual resource (Optional, default: retrieve all) labels: A label selector query to be passed to kubectl. Can either be a string of the form "label1=value1,labe2=value2" or a dictionary with "key: value" pairs (Optional) Returns: Human readable output from the describe sub-command of kubectl """ if isinstance(labels, dict): labels = ','.join('{key}={value}'.format(key=key, value=value) for (key, value) in labels.items()) (rc, output) = baseutils.exe_cmd( '{kubectl} describe {kind} {name} {namespace} {labels}'.format( kubectl=kubectl_binary, kind=baseutils.shell_escape(kind), name=baseutils.shell_escape(name) if name else '', namespace='--all-namespaces' if namespace == 'all' else ('-n {namespace}'.format( namespace=baseutils.shell_escape(namespace)) if namespace else ''), labels='-l {labels}'.format( labels=baseutils.shell_escape(labels)) if labels else ''), log_level=logging.NOTSET if 'secret' in kind.lower() else logging.INFO) return output
def _attempt_chart_deploy(deploy_cmd, attempt=0): """ Attempts to perofrm a chart deployment. The actual deploy command must be passed as an argument. Retries will be attempted if the failure reason can be calculated as being "safe to retry". Args: deploy_cmd: The command to use to deploy (install/upgrade) the chart """ try: baseutils.exe_cmd(deploy_cmd, working_dir=os.environ.get('HELM_HOME')) except Exception as e: exc_message = str(e) if 'Could not get apiVersions from Kubernetes: unable to retrieve the complete list of server APIs' in exc_message and attempt < 5: time.sleep(10) _attempt_chart_deploy(deploy_cmd, attempt=attempt+1) else: raise
def rollout_status(kind, name, namespace=None, watch=False): """ Queries a resource for its rollout status. By default, it will not follow (watch) the status. If kubectl exits with non-zero, an exception will be raised. The exception will still contain the output from kubectl for upstream parsing. Args: kind: The type of resource to check. Must be either deployment, statefulset or daemonset name: The name of the resource to query namespace: The namespace of the resource to query (Optional) watch: If True, kubectl will wait for the rollout to complete Returns: The output from kubectl """ (rc, output) = baseutils.exe_cmd( '{kubectl} rollout status {kind} {name} {namespace} --watch={watch}'. format(kubectl=kubectl_binary, kind=baseutils.shell_escape(kind), name=baseutils.shell_escape(name), namespace='-n {namespace}'.format( namespace=baseutils.shell_escape(namespace)) if namespace else '', watch='true' if watch else 'false'), raise_exception=False) if rc: # If an error occurred, put the error message in the exception raise Exception(output) return output
def get_kube_versions(version_to_match=None): """ Retrieves a list of available Kubernetes versions for IKS. If version is provided, only the version that matches the supplied major and minor version will be returned or None if it is not found. Args: version: A version for which the major and minor version should be matched for the returned supported version (Optional) Returns: A list of supported K8s versions. If version_to_match is specified, a single supported version or None """ (rc, output) = baseutils.exe_cmd('/usr/local/bin/ibmcloud ks versions --json') kube_versions = json.loads(output)['kubernetes'] if version_to_match: result = None version_to_match = version_to_match.split('.') major = int(version_to_match[0]) minor = int(version_to_match[1]) for kube_version in kube_versions: if kube_version['major'] == major and kube_version[ 'minor'] == minor: result = '{major}.{minor}.{patch}'.format( major=major, minor=minor, patch=kube_version['patch']) break else: result = [] for kube_version in kube_versions: result.append('{major}.{minor}.{patch}'.format( major=kube_version['major'], minor=kube_version['minor'], patch=kube_version['patch'])) result.sort() # Ensure versions are returned in ascending order return result
def get_resource_service_instances(name=None, service=None, type=None, location=None): """ Retrieves a list of resource service instances from IBM Cloud using "ibmcloud resource service-instances". The output is scoped to the currently configured resource group. If name, type or location is provided, the resultant list is filtered to those that match. Args: name: The name of the service instance to filter by. Names are non-unique (Optional) service: The service that the instance is an instance of, eg. logdna, kms, cloud-object-storage (Optional) location: The location of the instances to query. "global" is a valid location (Optional) Returns: A list of ServiceInstance objects """ (rc, output) = baseutils.exe_cmd( '/usr/local/bin/ibmcloud resource service-instances {service} {location} --output json' .format(service='--service-name {service}'.format( service=baseutils.shell_escape(service)) if service else '', location='--location {location}'.format( location=baseutils.shell_escape(location)) if location else '')) instances = ServiceInstance.parse_service_instances( json.loads(output) or []) if name: instances = [ instance for instance in instances if instance.name == name ] return instances
def ks_cluster_ls(): """ Retrieves a list of available IKS clusters. Returns: A list of IKSCluster objects """ (rc, output ) = baseutils.exe_cmd('/usr/local/bin/ibmcloud ks cluster ls --json') return IKSCluster.parse_iks_clusters(json.loads(output))
def label(label, kind, namespace=None, name=None): """ Applies labels to a Kubernetes resource. Args: kind: The type of resource to label namespace: The namespace of the resource to label. Not all resources will be namespaced (Optional) name: The name of the resource to label (Optional) """ baseutils.exe_cmd( '{kubectl} label {kind} {name} {namespace} {label} --overwrite'.format( kubectl=kubectl_binary, kind=baseutils.shell_escape(kind), name=baseutils.shell_escape(name) if name else '', namespace='-n {namespace}'.format( namespace=baseutils.shell_escape(namespace)) if namespace else '', label=baseutils.shell_escape(label)))
def add_repo(name, username, password, repo_url='https://na.artifactory.swg-devops.com/artifactory/wce-p2paas-helm-virtual'): """ Adds a new helm repository to the helm configuration. It is safe to re-add an already existing repository. Args: name: The name of the repository to add. This is the local name which will be used to reference the repository username: The username for authentication to the repository password: The password for authentication to the repository repo_url: The url to the repository. This is the full remote url to the Helm repository (Optional, default is corporate artifactory wce-p2paas-helm-virtual repository) """ logger.info('Configuring Helm repository {repo}'.format(repo=name)) baseutils.exe_cmd('{helm} repo add {repo} {repo_url} --username {username} --password {password}'.format( helm=helm_binary, repo=baseutils.shell_escape(name), repo_url=baseutils.shell_escape(repo_url), username=baseutils.shell_escape(username), password=baseutils.shell_escape(password)), obfuscate=baseutils.shell_escape(password)) baseutils.exe_cmd('{helm} repo update {repo}'.format(helm=helm_binary, repo=baseutils.shell_escape(name)))