Beispiel #1
0
def set_cluster_autoscaler(enabled, worker_pool_names=None, new_worker_pool_names=None):
    """
    Enables or disables the cluster autoscaler in a cluster.
    This will neither install nor uninstall the autoscaler, merely update the configuration of the autoscaler if present.
    If the autoscaler is installed but a given worker pool is not already present in the autoscaler config, it will not be added.
    Args:
        enabled: Whether to enable or disable the cluster autoscaler. True = enable, False = disable
        worker_pool_names: If present, only the passed list of pools will be enabled/disabled(Optional, default: all worker pools currently configured)
        new_worker_pool_names: If worker_pool_names is also specified, element n in worker_pool_names will be renamed to element n in new_worker_pool_names.
                               Each element in worker_pool_names must have a corresponding entry in new_worker_pool_names and at the same index (Optional)
    Returns: A list of the worker pools that had their configuration changed
    """
    modified_pools = []
    if k8s.exists('configmap', 'kube-system', 'iks-ca-configmap'):
        config_map = k8s.get('configmap', 'kube-system', 'iks-ca-configmap')
        worker_pools_config = json.loads(config_map['data']['workerPoolsConfig.json'])
        rename_worker_pools = new_worker_pool_names and worker_pool_names and len(new_worker_pool_names) == len(worker_pool_names)
        for pool_config in worker_pools_config:
            if not worker_pool_names or pool_config['name'] in worker_pool_names:
                if rename_worker_pools:
                    pool_config['name'] = new_worker_pool_names[worker_pool_names.index(pool_config['name'])]
                    pool_config['enabled'] = enabled
                    modified_pools.append(pool_config['name'])
                elif pool_config['enabled'] != enabled:
                    pool_config['enabled'] = enabled
                    modified_pools.append(pool_config['name'])
        if modified_pools:
            config_map['data']['workerPoolsConfig.json'] = json.dumps(worker_pools_config, ensure_ascii=False)  # TODO: Remove ensure_ascii when migration to py3 is complete
            k8s.apply(config_map)
    else:
        logger.info('Cluster autoscaler is not present')
    return modified_pools
Beispiel #2
0
def set_recovery_tool(enabled):
    """
    Sets ibm-worker-recovery tool "Enabled" attribute, which governs whether it monitors nodes for failures.
    This can be used to disable the tool when performing maintenance on IKS nodes. Otherwise an intermediate state of a node could trigger the tool to queue up a reload.
    If the recovery tool is not present in an environment, nothing will be done.
    Currently only KUBEAPI checks are enabled/disabled.
    Args:
        enabled: Boolean value to define if the ibm-worker-recovery tool should be enabled.
    """
    if k8s.exists('configmap', 'kube-system', 'ibm-worker-recovery-checks'):
        config_map = k8s.get('configmap', 'kube-system', 'ibm-worker-recovery-checks')
        for check in config_map['data']:
            check_config = json.loads(config_map['data'][check])
            if check_config['Check'] == 'KUBEAPI':
                check_config['Enabled'] = enabled
                config_map['data'][check] = json.dumps(check_config, ensure_ascii=False)  # TODO: Remove ensure_ascii when migration to py3 is complete
        k8s.apply(config_map)
    else:
        logger.info('IBM Auto-Recovery tool is not present')
Beispiel #3
0
def install_helm(helm_version):
    """
    Install Helm and Tiller into the Kubernetes infrastructure.
    This assumes Tiller is to be installed in the kube-system namespace. It will upgrade Tiller if it is already present.
    It is safe to call this function multiple times. There are checks for understanding the current state of the Helm/Tiller deployment and only necessary updates are made.
    Args:
        helm_version: The version of helm that should be installed, eg: v2.11.1
    """
    # First check and ensure that the correct client version is present
    (rc, output) = baseutils.exe_cmd(
        '{helm} version --client'.format(helm=helm_binary),
        raise_exception=False,
        log_level=logging.NOTSET)
    if rc or helm_version not in output:
        tmp_dir = tempfile.mkdtemp()
        try:
            helm_tar = baseutils.shell_escape(
                os.path.join(tmp_dir, 'helm.tar.gz'))
            baseutils.exe_cmd('/usr/bin/curl -L {url} -o {helm_tar}'.format(
                url=baseutils.shell_escape(
                    'https://storage.googleapis.com/kubernetes-helm/helm-{version}-linux-amd64.tar.gz'
                    .format(version=helm_version)),
                helm_tar=helm_tar))
            baseutils.exe_cmd(
                '/bin/tar -xzvf {helm_tar} -C {tmp_dir} && rm -f {helm_tar}'.
                format(helm_tar=helm_tar,
                       tmp_dir=baseutils.shell_escape(tmp_dir)))
            os.rename(os.path.join(tmp_dir, 'linux-amd64', 'helm'),
                      helm_binary.strip('\''))
            os.chmod(helm_binary.strip('\''), 0o755)
        finally:
            shutil.rmtree(tmp_dir)
    # Secondly check that the correct version of Tiller is installed into the Kubernetes cluster
    (rc, output) = baseutils.exe_cmd('{helm} version'.format(helm=helm_binary),
                                     raise_exception=False,
                                     log_level=logging.NOTSET)
    if rc:
        # Tiller is not installed. We must check if the service account exists yet
        service_accounts = k8s.get('serviceaccount', namespace='kube-system')
        if 'tiller' not in [
                service_account['metadata']['name']
                for service_account in service_accounts
        ]:
            k8s.apply({
                'apiVersion': 'v1',
                'kind': 'ServiceAccount',
                'metadata': {
                    'name': 'tiller',
                    'namespace': 'kube-system'
                }
            })
        cluster_role_bindings = k8s.get('clusterrolebinding')
        if 'tiller' not in [
                cluster_role_binding['metadata']['name']
                for cluster_role_binding in cluster_role_bindings
        ]:
            k8s.apply({
                'apiVersion':
                'rbac.authorization.k8s.io/v1',
                'kind':
                'ClusterRoleBinding',
                'metadata': {
                    'name': 'tiller',
                },
                'roleRef': {
                    'apiGroup': 'rbac.authorization.k8s.io',
                    'kind': 'ClusterRole',
                    'name': 'cluster-admin'
                },
                'subjects': [{
                    'kind': 'ServiceAccount',
                    'name': 'tiller',
                    'namespace': 'kube-system'
                }]
            })
        baseutils.exe_cmd(
            '{helm} init  --history-max 20 --service-account tiller --override "spec.template.spec.containers[0].command"="{{/tiller,--storage=secret}}"'
            .format(helm=helm_binary))
    elif output.count(helm_version) != 2:
        # Tiller is installed but it is an old version. Upgrade it
        baseutils.exe_cmd(
            '{helm} init --history-max 20 --service-account tiller --override "spec.template.spec.containers[0].command"="{{/tiller,--storage=secret}}" --upgrade'
            .format(helm=helm_binary))
    else:
        # Tiller is correctly configured. We still need to init the client to facilitate the usage of helm repositories
        baseutils.exe_cmd('{helm} init --client-only'.format(helm=helm_binary))
Beispiel #4
0
 def test_apply(self, mock_exe_cmd):
     mock_exe_cmd.return_value = (0, '')
     self.assertIsNone(k8s.apply({}))