def configure_kubernetes_service(service, base_args, extra_args_key): # Handle api-extra-args config option to_add, to_remove = get_config_args(extra_args_key) flag_manager = FlagManager(service) # Remove arguments that are no longer provided as config option # this allows them to be reverted to charm defaults for arg in to_remove: hookenv.log('Removing option: {}'.format(arg)) flag_manager.destroy(arg) # We need to "unset" options by setting their value to "null" string cmd = ['snap', 'set', service, '{}=null'.format(arg)] check_call(cmd) # Add base arguments for k, v in base_args.items(): flag_manager.add(k, v, strict=True) # Add operator-provided arguments, this allows operators # to override defaults for arg in to_add: hookenv.log('Adding option: {} {}'.format(arg[0], arg[1])) # Make sure old value is gone flag_manager.destroy(arg[0]) flag_manager.add(arg[0], arg[1], strict=True) cmd = ['snap', 'set', service] + flag_manager.to_s().split(' ') check_call(cmd)
def setup_non_leader_authentication(): service_key = '/root/cdk/serviceaccount.key' basic_auth = '/root/cdk/basic_auth.csv' known_tokens = '/root/cdk/known_tokens.csv' keys = [service_key, basic_auth, known_tokens] # The source of truth for non-leaders is the leader. # Therefore we overwrite_local with whatever the leader has. if not get_keys_from_leader(keys, overwrite_local=True): # the keys were not retrieved. Non-leaders have to retry. return if not any_file_changed(keys) and is_state('authentication.setup'): # No change detected and we have already setup the authentication return hookenv.status_set('maintenance', 'Rendering authentication templates.') api_opts = FlagManager('kube-apiserver') api_opts.add('basic-auth-file', basic_auth) api_opts.add('token-auth-file', known_tokens) api_opts.add('service-account-key-file', service_key) controller_opts = FlagManager('kube-controller-manager') controller_opts.add('service-account-private-key-file', service_key) remove_state('kubernetes-master.components.started') set_state('authentication.setup')
def cleanup_pre_snap_services(): # remove old states remove_state('kubernetes-worker.components.installed') # disable old services services = ['kubelet', 'kube-proxy'] for service in services: hookenv.log('Stopping {0} service.'.format(service)) service_stop(service) # cleanup old files files = [ "/lib/systemd/system/kubelet.service", "/lib/systemd/system/kube-proxy.service", "/etc/default/kube-default", "/etc/default/kubelet", "/etc/default/kube-proxy", "/srv/kubernetes", "/usr/local/bin/kubectl", "/usr/local/bin/kubelet", "/usr/local/bin/kube-proxy", "/etc/kubernetes" ] for file in files: if os.path.isdir(file): hookenv.log("Removing directory: " + file) shutil.rmtree(file) elif os.path.isfile(file): hookenv.log("Removing file: " + file) os.remove(file) # cleanup old flagmanagers FlagManager('kubelet').destroy_all() FlagManager('kube-proxy').destroy_all()
def set_privileged(privileged, render_config=True): """Update the KUBE_ALLOW_PRIV flag for kubelet and re-render config files. If the flag already matches the requested value, this is a no-op. :param str privileged: "true" or "false" :param bool render_config: whether to render new config files :return: True if the flag was changed, else false """ if privileged == "true": set_state('kubernetes-worker.privileged') else: remove_state('kubernetes-worker.privileged') flag = '--allow-privileged' kube_allow_priv_opts = FlagManager('KUBE_ALLOW_PRIV') if kube_allow_priv_opts.get(flag) == privileged: # Flag isn't changing, nothing to do return False hookenv.log('Setting {}={}'.format(flag, privileged)) # Update --allow-privileged flag value kube_allow_priv_opts.add(flag, privileged, strict=True) # re-render config with new options if render_config: render_init_scripts() # signal that we need a kubelet restart set_state('kubernetes-worker.kubelet.restart') return True
def start_worker(kube_api, kube_control, cni): ''' Start kubelet using the provided API and DNS info.''' config = hookenv.config() servers = get_kube_api_servers(kube_api) # Note that the DNS server doesn't necessarily exist at this point. We know # what its IP will eventually be, though, so we can go ahead and configure # kubelet with that info. This ensures that early pods are configured with # the correct DNS even though the server isn't ready yet. dns = kube_control.get_dns() if (data_changed('kube-api-servers', servers) or data_changed('kube-dns', dns)): # Create FlagManager for kubelet and add dns flags opts = FlagManager('kubelet') opts.add('--cluster-dns', dns['sdn-ip']) # FIXME sdn-ip needs a rename opts.add('--cluster-domain', dns['domain']) # Create FlagManager for KUBE_MASTER and add api server addresses kube_master_opts = FlagManager('KUBE_MASTER') kube_master_opts.add('--master', ','.join(servers)) # set --allow-privileged flag for kubelet set_privileged( "true" if config['allow-privileged'] == "true" else "false", render_config=False) create_config(servers[0]) render_init_scripts() set_state('kubernetes-worker.config.created') restart_unit_services() update_kubelet_status()
def migrate_from_pre_snaps(): # remove old states remove_state('kubernetes.components.installed') remove_state('kubernetes.dashboard.available') remove_state('kube-dns.available') remove_state('kubernetes-master.app_version.set') # disable old services services = ['kube-apiserver', 'kube-controller-manager', 'kube-scheduler'] for service in services: hookenv.log('Stopping {0} service.'.format(service)) host.service_stop(service) # rename auth files os.makedirs('/root/cdk', exist_ok=True) rename_file_idempotent('/etc/kubernetes/serviceaccount.key', '/root/cdk/serviceaccount.key') rename_file_idempotent('/srv/kubernetes/basic_auth.csv', '/root/cdk/basic_auth.csv') rename_file_idempotent('/srv/kubernetes/known_tokens.csv', '/root/cdk/known_tokens.csv') # cleanup old files files = [ "/lib/systemd/system/kube-apiserver.service", "/lib/systemd/system/kube-controller-manager.service", "/lib/systemd/system/kube-scheduler.service", "/etc/default/kube-defaults", "/etc/default/kube-apiserver.defaults", "/etc/default/kube-controller-manager.defaults", "/etc/default/kube-scheduler.defaults", "/srv/kubernetes", "/home/ubuntu/kubectl", "/usr/local/bin/kubectl", "/usr/local/bin/kube-apiserver", "/usr/local/bin/kube-controller-manager", "/usr/local/bin/kube-scheduler", "/etc/kubernetes" ] for file in files: if os.path.isdir(file): hookenv.log("Removing directory: " + file) shutil.rmtree(file) elif os.path.isfile(file): hookenv.log("Removing file: " + file) os.remove(file) # clear the flag managers FlagManager('kube-apiserver').destroy_all() FlagManager('kube-controller-manager').destroy_all() FlagManager('kube-scheduler').destroy_all()
def configure_scheduler(): scheduler_opts = FlagManager('kube-scheduler') scheduler_opts.add('v', '2') scheduler_opts.add('logtostderr', 'true') scheduler_opts.add('master', 'http://127.0.0.1:8080') cmd = ['snap', 'set', 'kube-scheduler'] + scheduler_opts.to_s().split(' ') check_call(cmd) set_state('kube-scheduler.do-restart')
def setup_leader_authentication(): '''Setup basic authentication and token access for the cluster.''' api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') service_key = '/root/cdk/serviceaccount.key' basic_auth = '/root/cdk/basic_auth.csv' known_tokens = '/root/cdk/known_tokens.csv' api_opts.add('basic-auth-file', basic_auth) api_opts.add('token-auth-file', known_tokens) hookenv.status_set('maintenance', 'Rendering authentication templates.') keys = [service_key, basic_auth, known_tokens] # Try first to fetch data from an old leadership broadcast. if not get_keys_from_leader(keys) \ or is_state('reconfigure.authentication.setup'): last_pass = get_password('basic_auth.csv', 'admin') setup_basic_auth(last_pass, 'admin', 'admin') if not os.path.isfile(known_tokens): setup_tokens(None, 'admin', 'admin') setup_tokens(None, 'kubelet', 'kubelet') setup_tokens(None, 'kube_proxy', 'kube_proxy') # Generate the default service account token key os.makedirs('/root/cdk', exist_ok=True) if not os.path.isfile(service_key): cmd = ['openssl', 'genrsa', '-out', service_key, '2048'] check_call(cmd) remove_state('reconfigure.authentication.setup') api_opts.add('service-account-key-file', service_key) controller_opts.add('service-account-private-key-file', service_key) # read service account key for syndication leader_data = {} for f in [known_tokens, basic_auth, service_key]: with open(f, 'r') as fp: leader_data[f] = fp.read() # this is slightly opaque, but we are sending file contents under its file # path as a key. # eg: # {'/root/cdk/serviceaccount.key': 'RSA:2471731...'} charms.leadership.leader_set(leader_data) remove_state('kubernetes-master.components.started') set_state('authentication.setup')
def upgrade_charm(): # Trigger removal of PPA docker installation if it was previously set. set_state('config.changed.install_from_upstream') hookenv.atexit(remove_state, 'config.changed.install_from_upstream') cleanup_pre_snap_services() check_resources_for_upgrade_needed() # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags, # since they can differ between k8s versions remove_state('kubernetes-worker.gpu.enabled') kubelet_opts = FlagManager('kubelet') kubelet_opts.destroy('feature-gates') kubelet_opts.destroy('experimental-nvidia-gpus') remove_state('kubernetes-worker.cni-plugins.installed') remove_state('kubernetes-worker.config.created') remove_state('kubernetes-worker.ingress.available') set_state('kubernetes-worker.restart-needed')
def set_privileged(): """Update the allow-privileged flag for kubelet. """ privileged = hookenv.config('allow-privileged') if privileged == 'auto': gpu_enabled = is_state('kubernetes-worker.gpu.enabled') privileged = 'true' if gpu_enabled else 'false' flag = 'allow-privileged' hookenv.log('Setting {}={}'.format(flag, privileged)) kubelet_opts = FlagManager('kubelet') kubelet_opts.add(flag, privileged) if privileged == 'true': set_state('kubernetes-worker.privileged') else: remove_state('kubernetes-worker.privileged')
def setup_non_leader_authentication(): api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') service_key = '/root/cdk/serviceaccount.key' basic_auth = '/root/cdk/basic_auth.csv' known_tokens = '/root/cdk/known_tokens.csv' hookenv.status_set('maintenance', 'Rendering authentication templates.') keys = [service_key, basic_auth, known_tokens] if not get_keys_from_leader(keys): # the keys were not retrieved. Non-leaders have to retry. return api_opts.add('basic-auth-file', basic_auth) api_opts.add('token-auth-file', known_tokens) api_opts.add('service-account-key-file', service_key) controller_opts.add('service-account-private-key-file', service_key) set_state('authentication.setup')
def enable_gpu(): """Enable GPU usage on this node. """ config = hookenv.config() if config['allow-privileged'] == "false": hookenv.status_set( 'active', 'GPUs available. Set allow-privileged="auto" to enable.') return hookenv.log('Enabling gpu mode') try: # Not sure why this is necessary, but if you don't run this, k8s will # think that the node has 0 gpus (as shown by the output of # `kubectl get nodes -o yaml` check_call(['nvidia-smi']) except CalledProcessError as cpe: hookenv.log('Unable to communicate with the NVIDIA driver.') hookenv.log(cpe) return kubelet_opts = FlagManager('kubelet') if get_version('kubelet') < (1, 6): hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet') kubelet_opts.add('experimental-nvidia-gpus', '1') else: hookenv.log('Adding --feature-gates=Accelerators=true to kubelet') kubelet_opts.add('feature-gates', 'Accelerators=true') # Apply node labels _apply_node_label('gpu=true', overwrite=True) _apply_node_label('cuda=true', overwrite=True) set_state('kubernetes-worker.gpu.enabled') set_state('kubernetes-worker.restart-needed')
def disable_gpu(): """Disable GPU usage on this node. This handler fires when we're running in gpu mode, and then the operator sets allow-privileged="false". Since we can no longer run privileged containers, we need to disable gpu mode. """ hookenv.log('Disabling gpu mode') kubelet_opts = FlagManager('kubelet') if get_version('kubelet') < (1, 6): kubelet_opts.destroy('experimental-nvidia-gpus') else: kubelet_opts.remove('feature-gates', 'Accelerators=true') # Remove node labels _apply_node_label('gpu', delete=True) _apply_node_label('cuda', delete=True) remove_state('kubernetes-worker.gpu.enabled') set_state('kubernetes-worker.restart-needed')
def enable_gpu(): """Enable GPU usage on this node. """ config = hookenv.config() if config['allow-privileged'] == "false": hookenv.status_set( 'active', 'GPUs available. Set allow-privileged="auto" to enable.' ) return hookenv.log('Enabling gpu mode') try: # Not sure why this is necessary, but if you don't run this, k8s will # think that the node has 0 gpus (as shown by the output of # `kubectl get nodes -o yaml` check_call(['nvidia-smi']) except CalledProcessError as cpe: hookenv.log('Unable to communicate with the NVIDIA driver.') hookenv.log(cpe) return kubelet_opts = FlagManager('kubelet') if get_version('kubelet') < (1, 6): hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet') kubelet_opts.add('experimental-nvidia-gpus', '1') else: hookenv.log('Adding --feature-gates=Accelerators=true to kubelet') kubelet_opts.add('feature-gates', 'Accelerators=true') # Apply node labels _apply_node_label('gpu=true', overwrite=True) _apply_node_label('cuda=true', overwrite=True) set_state('kubernetes-worker.gpu.enabled') set_state('kubernetes-worker.restart-needed')
def remove_installed_state(): remove_state('kubernetes-worker.components.installed') # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags, # since they can differ between k8s versions remove_state('kubernetes-worker.gpu.enabled') kubelet_opts = FlagManager('kubelet') kubelet_opts.destroy('--feature-gates') kubelet_opts.destroy('--experimental-nvidia-gpus')
def start_worker(kube_api, kube_dns, cni): ''' Start kubelet using the provided API and DNS info.''' servers = get_kube_api_servers(kube_api) # Note that the DNS server doesn't necessarily exist at this point. We know # what its IP will eventually be, though, so we can go ahead and configure # kubelet with that info. This ensures that early pods are configured with # the correct DNS even though the server isn't ready yet. dns = kube_dns.details() if (data_changed('kube-api-servers', servers) or data_changed('kube-dns', dns)): # Initialize a FlagManager object to add flags to unit data. opts = FlagManager('kubelet') # Append the DNS flags + data to the FlagManager object. opts.add('--cluster-dns', dns['sdn-ip']) # FIXME sdn-ip needs a rename opts.add('--cluster-domain', dns['domain']) create_config(servers[0]) render_init_scripts(servers) set_state('kubernetes-worker.config.created') restart_unit_services() update_kubelet_status()
def upgrade_charm(): cleanup_pre_snap_services() check_resources_for_upgrade_needed() # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags, # since they can differ between k8s versions remove_state('kubernetes-worker.gpu.enabled') kubelet_opts = FlagManager('kubelet') kubelet_opts.destroy('feature-gates') kubelet_opts.destroy('experimental-nvidia-gpus') remove_state('kubernetes-worker.cni-plugins.installed') remove_state('kubernetes-worker.config.created') remove_state('kubernetes-worker.ingress.available') set_state('kubernetes-worker.restart-needed')
def enable_gpu(): """Enable GPU usage on this node. """ config = hookenv.config() if config['allow-privileged'] == "false": hookenv.status_set( 'active', 'GPUs available. Set allow-privileged="auto" to enable.' ) return hookenv.log('Enabling gpu mode') kubelet_opts = FlagManager('kubelet') if get_version('kubelet') < (1, 6): hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet') kubelet_opts.add('--experimental-nvidia-gpus', '1') else: hookenv.log('Adding --feature-gates=Accelerators=true to kubelet') kubelet_opts.add('--feature-gates', 'Accelerators=true') # enable privileged mode and re-render config files set_privileged("true", render_config=False) render_init_scripts() # Apply node labels _apply_node_label('gpu=true', overwrite=True) _apply_node_label('cuda=true', overwrite=True) # Not sure why this is necessary, but if you don't run this, k8s will # think that the node has 0 gpus (as shown by the output of # `kubectl get nodes -o yaml` check_call(['nvidia-smi']) set_state('kubernetes-worker.gpu.enabled') set_state('kubernetes-worker.kubelet.restart')
def setup_non_leader_authentication(): api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') service_key = '/root/cdk/serviceaccount.key' basic_auth = '/root/cdk/basic_auth.csv' known_tokens = '/root/cdk/known_tokens.csv' # This races with other codepaths, and seems to require being created first # This block may be extracted later, but for now seems to work as intended os.makedirs('/root/cdk', exist_ok=True) hookenv.status_set('maintenance', 'Rendering authentication templates.') # Set an array for looping logic keys = [service_key, basic_auth, known_tokens] for k in keys: # If the path does not exist, assume we need it if not os.path.exists(k): # Fetch data from leadership broadcast contents = charms.leadership.leader_get(k) # Default to logging the warning and wait for leader data to be set if contents is None: msg = "Waiting on leaders crypto keys." hookenv.status_set('waiting', msg) hookenv.log('Missing content for file {}'.format(k)) return # Write out the file and move on to the next item with open(k, 'w+') as fp: fp.write(contents) api_opts.add('basic-auth-file', basic_auth) api_opts.add('token-auth-file', known_tokens) api_opts.add('service-account-key-file', service_key) controller_opts.add('service-account-private-key-file', service_key) set_state('authentication.setup')
def setup_leader_authentication(): '''Setup basic authentication and token access for the cluster.''' api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') service_key = '/root/cdk/serviceaccount.key' basic_auth = '/root/cdk/basic_auth.csv' known_tokens = '/root/cdk/known_tokens.csv' api_opts.add('basic-auth-file', basic_auth) api_opts.add('token-auth-file', known_tokens) hookenv.status_set('maintenance', 'Rendering authentication templates.') if not os.path.isfile(basic_auth): setup_basic_auth('admin', 'admin', 'admin') if not os.path.isfile(known_tokens): setup_tokens(None, 'admin', 'admin') setup_tokens(None, 'kubelet', 'kubelet') setup_tokens(None, 'kube_proxy', 'kube_proxy') # Generate the default service account token key os.makedirs('/root/cdk', exist_ok=True) if not os.path.isfile(service_key): cmd = ['openssl', 'genrsa', '-out', service_key, '2048'] check_call(cmd) api_opts.add('service-account-key-file', service_key) controller_opts.add('service-account-private-key-file', service_key) # read service account key for syndication leader_data = {} for f in [known_tokens, basic_auth, service_key]: with open(f, 'r') as fp: leader_data[f] = fp.read() # this is slightly opaque, but we are sending file contents under its file # path as a key. # eg: # {'/root/cdk/serviceaccount.key': 'RSA:2471731...'} charms.leadership.leader_set(leader_data) set_state('authentication.setup')
def configure_master_services(): ''' Add remaining flags for the master services and configure snaps to use them ''' api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') scheduler_opts = FlagManager('kube-scheduler') scheduler_opts.add('v', '2') # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') client_cert_path = layer_options.get('client_certificate_path') client_key_path = layer_options.get('client_key_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') if is_privileged(): api_opts.add('allow-privileged', 'true', strict=True) set_state('kubernetes-master.privileged') else: api_opts.add('allow-privileged', 'false', strict=True) remove_state('kubernetes-master.privileged') # Handle static options for now api_opts.add('service-cluster-ip-range', service_cidr()) api_opts.add('min-request-timeout', '300') api_opts.add('v', '4') api_opts.add('tls-cert-file', server_cert_path) api_opts.add('tls-private-key-file', server_key_path) api_opts.add('kubelet-certificate-authority', ca_cert_path) api_opts.add('kubelet-client-certificate', client_cert_path) api_opts.add('kubelet-client-key', client_key_path) api_opts.add('logtostderr', 'true') api_opts.add('insecure-bind-address', '127.0.0.1') api_opts.add('insecure-port', '8080') api_opts.add('storage-backend', 'etcd2') # FIXME: add etcd3 support admission_control = [ 'Initializers', 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount', 'ResourceQuota', 'DefaultTolerationSeconds' ] if get_version('kube-apiserver') < (1, 6): hookenv.log('Removing DefaultTolerationSeconds from admission-control') admission_control.remove('DefaultTolerationSeconds') if get_version('kube-apiserver') < (1, 7): hookenv.log('Removing Initializers from admission-control') admission_control.remove('Initializers') api_opts.add('admission-control', ','.join(admission_control), strict=True) # Default to 3 minute resync. TODO: Make this configureable? controller_opts.add('min-resync-period', '3m') controller_opts.add('v', '2') controller_opts.add('root-ca-file', ca_cert_path) controller_opts.add('logtostderr', 'true') controller_opts.add('master', 'http://127.0.0.1:8080') scheduler_opts.add('v', '2') scheduler_opts.add('logtostderr', 'true') scheduler_opts.add('master', 'http://127.0.0.1:8080') cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ') check_call(cmd) cmd = ( ['snap', 'set', 'kube-controller-manager'] + controller_opts.to_s().split(' ') ) check_call(cmd) cmd = ['snap', 'set', 'kube-scheduler'] + scheduler_opts.to_s().split(' ') check_call(cmd)
def handle_etcd_relation(reldata): ''' Save the client credentials and set appropriate daemon flags when etcd declares itself as available''' connection_string = reldata.get_connection_string() # Define where the etcd tls files will be kept. etcd_dir = '/root/cdk/etcd' # Create paths to the etcd client ca, key, and cert file locations. ca = os.path.join(etcd_dir, 'client-ca.pem') key = os.path.join(etcd_dir, 'client-key.pem') cert = os.path.join(etcd_dir, 'client-cert.pem') # Save the client credentials (in relation data) to the paths provided. reldata.save_client_credentials(key, cert, ca) api_opts = FlagManager('kube-apiserver') # Never use stale data, always prefer whats coming in during context # building. if its stale, its because whats in unitdata is stale data = api_opts.data if data.get('etcd-servers-strict') or data.get('etcd-servers'): api_opts.destroy('etcd-cafile') api_opts.destroy('etcd-keyfile') api_opts.destroy('etcd-certfile') api_opts.destroy('etcd-servers', strict=True) api_opts.destroy('etcd-servers') # Set the apiserver flags in the options manager api_opts.add('etcd-cafile', ca) api_opts.add('etcd-keyfile', key) api_opts.add('etcd-certfile', cert) api_opts.add('etcd-servers', connection_string, strict=True)
def setup_authentication(): '''Setup basic authentication and token access for the cluster.''' api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') api_opts.add('--basic-auth-file', '/srv/kubernetes/basic_auth.csv') api_opts.add('--token-auth-file', '/srv/kubernetes/known_tokens.csv') api_opts.add('--service-cluster-ip-range', service_cidr()) hookenv.status_set('maintenance', 'Rendering authentication templates.') htaccess = '/srv/kubernetes/basic_auth.csv' if not os.path.isfile(htaccess): setup_basic_auth('admin', 'admin', 'admin') known_tokens = '/srv/kubernetes/known_tokens.csv' if not os.path.isfile(known_tokens): setup_tokens(None, 'admin', 'admin') setup_tokens(None, 'kubelet', 'kubelet') setup_tokens(None, 'kube_proxy', 'kube_proxy') # Generate the default service account token key os.makedirs('/etc/kubernetes', exist_ok=True) cmd = ['openssl', 'genrsa', '-out', '/etc/kubernetes/serviceaccount.key', '2048'] check_call(cmd) api_opts.add('--service-account-key-file', '/etc/kubernetes/serviceaccount.key') controller_opts.add('--service-account-private-key-file', '/etc/kubernetes/serviceaccount.key') set_state('authentication.setup')
def configure_controller_manager(): controller_opts = FlagManager('kube-controller-manager') # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') # Default to 3 minute resync. TODO: Make this configureable? controller_opts.add('min-resync-period', '3m') controller_opts.add('v', '2') controller_opts.add('root-ca-file', ca_cert_path) controller_opts.add('logtostderr', 'true') controller_opts.add('master', 'http://127.0.0.1:8080') cmd = ( ['snap', 'set', 'kube-controller-manager'] + controller_opts.to_s().split(' ') ) check_call(cmd) set_state('kube-controller-manager.do-restart')
def configure_apiserver(): # TODO: investigate if it's possible to use config file to store args # https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/315 # Handle api-extra-args config option to_add, to_remove = get_config_args() api_opts = FlagManager('kube-apiserver') # Remove arguments that are no longer provided as config option # this allows them to be reverted to charm defaults for arg in to_remove: hookenv.log('Removing option: {}'.format(arg)) api_opts.destroy(arg) # We need to "unset" options by settig their value to "null" string cmd = ['snap', 'set', 'kube-apiserver', '{}=null'.format(arg)] check_call(cmd) # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') client_cert_path = layer_options.get('client_certificate_path') client_key_path = layer_options.get('client_key_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') if is_privileged(): api_opts.add('allow-privileged', 'true', strict=True) set_state('kubernetes-master.privileged') else: api_opts.add('allow-privileged', 'false', strict=True) remove_state('kubernetes-master.privileged') # Handle static options for now api_opts.add('service-cluster-ip-range', service_cidr()) api_opts.add('min-request-timeout', '300') api_opts.add('v', '4') api_opts.add('tls-cert-file', server_cert_path) api_opts.add('tls-private-key-file', server_key_path) api_opts.add('kubelet-certificate-authority', ca_cert_path) api_opts.add('kubelet-client-certificate', client_cert_path) api_opts.add('kubelet-client-key', client_key_path) api_opts.add('logtostderr', 'true') api_opts.add('insecure-bind-address', '127.0.0.1') api_opts.add('insecure-port', '8080') api_opts.add('storage-backend', 'etcd2') # FIXME: add etcd3 support admission_control = [ 'Initializers', 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount', 'ResourceQuota', 'DefaultTolerationSeconds' ] if get_version('kube-apiserver') < (1, 6): hookenv.log('Removing DefaultTolerationSeconds from admission-control') admission_control.remove('DefaultTolerationSeconds') if get_version('kube-apiserver') < (1, 7): hookenv.log('Removing Initializers from admission-control') admission_control.remove('Initializers') api_opts.add('admission-control', ','.join(admission_control), strict=True) # Add operator-provided arguments, this allows operators # to override defaults for arg in to_add: hookenv.log('Adding option: {} {}'.format(arg[0], arg[1])) # Make sure old value is gone api_opts.destroy(arg[0]) api_opts.add(arg[0], arg[1]) cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ') check_call(cmd) set_state('kube-apiserver.do-restart')
def render_init_scripts(api_servers): ''' We have related to either an api server or a load balancer connected to the apiserver. Render the config files and prepare for launch ''' context = {} context.update(hookenv.config()) # Get the tls paths from the layer data. layer_options = layer.options('tls-client') context['ca_cert_path'] = layer_options.get('ca_certificate_path') context['client_cert_path'] = layer_options.get('client_certificate_path') context['client_key_path'] = layer_options.get('client_key_path') unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-') context.update({'kube_api_endpoint': ','.join(api_servers), 'JUJU_UNIT_NAME': unit_name}) # Create a flag manager for kubelet to render kubelet_opts. kubelet_opts = FlagManager('kubelet') # Declare to kubelet it needs to read from kubeconfig kubelet_opts.add('--require-kubeconfig', None) kubelet_opts.add('--kubeconfig', kubeconfig_path) kubelet_opts.add('--network-plugin', 'cni') context['kubelet_opts'] = kubelet_opts.to_s() # Create a flag manager for kube-proxy to render kube_proxy_opts. kube_proxy_opts = FlagManager('kube-proxy') kube_proxy_opts.add('--kubeconfig', kubeconfig_path) context['kube_proxy_opts'] = kube_proxy_opts.to_s() os.makedirs('/var/lib/kubelet', exist_ok=True) # Set the user when rendering config context['user'] = '******' # Set the user when rendering config context['user'] = '******' render('kube-default', '/etc/default/kube-default', context) render('kubelet.defaults', '/etc/default/kubelet', context) render('kube-proxy.defaults', '/etc/default/kube-proxy', context) render('kube-proxy.service', '/lib/systemd/system/kube-proxy.service', context) render('kubelet.service', '/lib/systemd/system/kubelet.service', context)
def handle_etcd_relation(reldata): ''' Save the client credentials and set appropriate daemon flags when etcd declares itself as available''' connection_string = reldata.get_connection_string() # Define where the etcd tls files will be kept. etcd_dir = '/etc/ssl/etcd' # Create paths to the etcd client ca, key, and cert file locations. ca = os.path.join(etcd_dir, 'client-ca.pem') key = os.path.join(etcd_dir, 'client-key.pem') cert = os.path.join(etcd_dir, 'client-cert.pem') # Save the client credentials (in relation data) to the paths provided. reldata.save_client_credentials(key, cert, ca) api_opts = FlagManager('kube-apiserver') # Never use stale data, always prefer whats coming in during context # building. if its stale, its because whats in unitdata is stale data = api_opts.data if data.get('--etcd-servers-strict') or data.get('--etcd-servers'): api_opts.destroy('--etcd-cafile') api_opts.destroy('--etcd-keyfile') api_opts.destroy('--etcd-certfile') api_opts.destroy('--etcd-servers', strict=True) api_opts.destroy('--etcd-servers') # Set the apiserver flags in the options manager api_opts.add('--etcd-cafile', ca) api_opts.add('--etcd-keyfile', key) api_opts.add('--etcd-certfile', cert) api_opts.add('--etcd-servers', connection_string, strict=True)
def setup_leader_authentication(): '''Setup basic authentication and token access for the cluster.''' api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') service_key = '/root/cdk/serviceaccount.key' basic_auth = '/root/cdk/basic_auth.csv' known_tokens = '/root/cdk/known_tokens.csv' api_opts.add('basic-auth-file', basic_auth) api_opts.add('token-auth-file', known_tokens) hookenv.status_set('maintenance', 'Rendering authentication templates.') keys = [service_key, basic_auth, known_tokens] # Try first to fetch data from an old leadership broadcast. if not get_keys_from_leader(keys) \ or is_state('reconfigure.authentication.setup'): last_pass = get_password('basic_auth.csv', 'admin') setup_basic_auth(last_pass, 'admin', 'admin', 'system:masters') if not os.path.isfile(known_tokens): touch(known_tokens) # Generate the default service account token key os.makedirs('/root/cdk', exist_ok=True) if not os.path.isfile(service_key): cmd = ['openssl', 'genrsa', '-out', service_key, '2048'] check_call(cmd) remove_state('reconfigure.authentication.setup') api_opts.add('service-account-key-file', service_key) controller_opts.add('service-account-private-key-file', service_key) # read service account key for syndication leader_data = {} for f in [known_tokens, basic_auth, service_key]: with open(f, 'r') as fp: leader_data[f] = fp.read() # this is slightly opaque, but we are sending file contents under its file # path as a key. # eg: # {'/root/cdk/serviceaccount.key': 'RSA:2471731...'} charms.leadership.leader_set(leader_data) remove_state('kubernetes-master.components.started') set_state('authentication.setup')
def configure_worker_services(api_servers, dns, cluster_cidr): ''' Add remaining flags for the worker services and configure snaps to use them ''' layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') kubelet_opts = FlagManager('kubelet') kubelet_opts.add('require-kubeconfig', 'true') kubelet_opts.add('kubeconfig', kubeconfig_path) kubelet_opts.add('network-plugin', 'cni') kubelet_opts.add('v', '0') kubelet_opts.add('address', '0.0.0.0') kubelet_opts.add('port', '10250') kubelet_opts.add('cluster-dns', dns['sdn-ip']) kubelet_opts.add('cluster-domain', dns['domain']) kubelet_opts.add('anonymous-auth', 'false') kubelet_opts.add('client-ca-file', ca_cert_path) kubelet_opts.add('tls-cert-file', server_cert_path) kubelet_opts.add('tls-private-key-file', server_key_path) kubelet_opts.add('logtostderr', 'true') kubelet_opts.add('fail-swap-on', 'false') kube_proxy_opts = FlagManager('kube-proxy') kube_proxy_opts.add('cluster-cidr', cluster_cidr) kube_proxy_opts.add('kubeconfig', kubeproxyconfig_path) kube_proxy_opts.add('logtostderr', 'true') kube_proxy_opts.add('v', '0') kube_proxy_opts.add('master', random.choice(api_servers), strict=True) if b'lxc' in check_output('virt-what', shell=True): kube_proxy_opts.add('conntrack-max-per-core', '0') cmd = ['snap', 'set', 'kubelet'] + kubelet_opts.to_s().split(' ') check_call(cmd) cmd = ['snap', 'set', 'kube-proxy'] + kube_proxy_opts.to_s().split(' ') check_call(cmd)
def configure_master_services(): ''' Add remaining flags for the master services and configure snaps to use them ''' api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') scheduler_opts = FlagManager('kube-scheduler') scheduler_opts.add('v', '2') # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') client_cert_path = layer_options.get('client_certificate_path') client_key_path = layer_options.get('client_key_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') if is_privileged(): api_opts.add('allow-privileged', 'true', strict=True) set_state('kubernetes-master.privileged') else: api_opts.add('allow-privileged', 'false', strict=True) remove_state('kubernetes-master.privileged') # Handle static options for now api_opts.add('service-cluster-ip-range', service_cidr()) api_opts.add('min-request-timeout', '300') api_opts.add('v', '4') api_opts.add('client-ca-file', ca_cert_path) api_opts.add('tls-cert-file', server_cert_path) api_opts.add('tls-private-key-file', server_key_path) api_opts.add('kubelet-certificate-authority', ca_cert_path) api_opts.add('kubelet-client-certificate', client_cert_path) api_opts.add('kubelet-client-key', client_key_path) api_opts.add('logtostderr', 'true') api_opts.add('insecure-bind-address', '127.0.0.1') api_opts.add('insecure-port', '8080') api_opts.add('storage-backend', 'etcd2') # FIXME: add etcd3 support admission_control = [ 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount', 'ResourceQuota', 'DefaultTolerationSeconds' ] if get_version('kube-apiserver') < (1, 6): hookenv.log('Removing DefaultTolerationSeconds from admission-control') admission_control.remove('DefaultTolerationSeconds') api_opts.add('admission-control', ','.join(admission_control), strict=True) # Default to 3 minute resync. TODO: Make this configureable? controller_opts.add('min-resync-period', '3m') controller_opts.add('v', '2') controller_opts.add('root-ca-file', ca_cert_path) controller_opts.add('logtostderr', 'true') controller_opts.add('master', 'http://127.0.0.1:8080') scheduler_opts.add('v', '2') scheduler_opts.add('logtostderr', 'true') scheduler_opts.add('master', 'http://127.0.0.1:8080') cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ') check_call(cmd) cmd = (['snap', 'set', 'kube-controller-manager'] + controller_opts.to_s().split(' ')) check_call(cmd) cmd = ['snap', 'set', 'kube-scheduler'] + scheduler_opts.to_s().split(' ') check_call(cmd)
def render_files(): '''Use jinja templating to render the docker-compose.yml and master.json file to contain the dynamic data for the configuration files.''' context = {} config = hookenv.config() # Add the charm configuration data to the context. context.update(config) # Update the context with extra values: arch, and networking information context.update({'arch': arch(), 'master_address': hookenv.unit_get('private-address'), 'public_address': hookenv.unit_get('public-address'), 'private_address': hookenv.unit_get('private-address')}) api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') scheduler_opts = FlagManager('kube-scheduler') # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') # Handle static options for now api_opts.add('--min-request-timeout', '300') api_opts.add('--v', '4') api_opts.add('--client-ca-file', ca_cert_path) api_opts.add('--tls-cert-file', server_cert_path) api_opts.add('--tls-private-key-file', server_key_path) scheduler_opts.add('--v', '2') # Default to 3 minute resync. TODO: Make this configureable? controller_opts.add('--min-resync-period', '3m') controller_opts.add('--v', '2') controller_opts.add('--root-ca-file', ca_cert_path) context.update({'kube_apiserver_flags': api_opts.to_s(), 'kube_scheduler_flags': scheduler_opts.to_s(), 'kube_controller_manager_flags': controller_opts.to_s()}) # Render the configuration files that contains parameters for # the apiserver, scheduler, and controller-manager render_service('kube-apiserver', context) render_service('kube-controller-manager', context) render_service('kube-scheduler', context) # explicitly render the generic defaults file render('kube-defaults.defaults', '/etc/default/kube-defaults', context) # when files change on disk, we need to inform systemd of the changes call(['systemctl', 'daemon-reload']) call(['systemctl', 'enable', 'kube-apiserver']) call(['systemctl', 'enable', 'kube-controller-manager']) call(['systemctl', 'enable', 'kube-scheduler'])
def configure_worker_services(api_servers, dns, cluster_cidr): ''' Add remaining flags for the worker services and configure snaps to use them ''' layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') kubelet_opts = FlagManager('kubelet') kubelet_opts.add('require-kubeconfig', 'true') kubelet_opts.add('kubeconfig', kubeconfig_path) kubelet_opts.add('network-plugin', 'cni') kubelet_opts.add('v', '0') kubelet_opts.add('address', '0.0.0.0') kubelet_opts.add('port', '10250') kubelet_opts.add('cluster-dns', dns['sdn-ip']) kubelet_opts.add('cluster-domain', dns['domain']) kubelet_opts.add('anonymous-auth', 'false') kubelet_opts.add('client-ca-file', ca_cert_path) kubelet_opts.add('tls-cert-file', server_cert_path) kubelet_opts.add('tls-private-key-file', server_key_path) kubelet_opts.add('logtostderr', 'true') kube_proxy_opts = FlagManager('kube-proxy') kube_proxy_opts.add('cluster-cidr', cluster_cidr) kube_proxy_opts.add('kubeconfig', kubeconfig_path) kube_proxy_opts.add('logtostderr', 'true') kube_proxy_opts.add('v', '0') kube_proxy_opts.add('master', random.choice(api_servers), strict=True) if b'lxc' in check_output('virt-what', shell=True): kube_proxy_opts.add('conntrack-max-per-core', '0') cmd = ['snap', 'set', 'kubelet'] + kubelet_opts.to_s().split(' ') check_call(cmd) cmd = ['snap', 'set', 'kube-proxy'] + kube_proxy_opts.to_s().split(' ') check_call(cmd)
def render_files(): '''Use jinja templating to render the docker-compose.yml and master.json file to contain the dynamic data for the configuration files.''' context = {} config = hookenv.config() # Add the charm configuration data to the context. context.update(config) # Update the context with extra values: arch, and networking information context.update({'arch': arch(), 'master_address': hookenv.unit_get('private-address'), 'public_address': hookenv.unit_get('public-address'), 'private_address': hookenv.unit_get('private-address')}) api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') scheduler_opts = FlagManager('kube-scheduler') scheduler_opts.add('--v', '2') # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') client_cert_path = layer_options.get('client_certificate_path') client_key_path = layer_options.get('client_key_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') # set --allow-privileged flag for kube-apiserver set_privileged( "true" if config['allow-privileged'] == "true" else "false", render_config=False) # Handle static options for now api_opts.add('--min-request-timeout', '300') api_opts.add('--v', '4') api_opts.add('--client-ca-file', ca_cert_path) api_opts.add('--tls-cert-file', server_cert_path) api_opts.add('--tls-private-key-file', server_key_path) api_opts.add('--kubelet-certificate-authority', ca_cert_path) api_opts.add('--kubelet-client-certificate', client_cert_path) api_opts.add('--kubelet-client-key', client_key_path) # Needed for upgrade from 1.5.x to 1.6.0 # XXX: support etcd3 api_opts.add('--storage-backend', 'etcd2') admission_control = [ 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount', 'ResourceQuota', 'DefaultTolerationSeconds' ] if get_version('kube-apiserver') < (1, 6): hookenv.log('Removing DefaultTolerationSeconds from admission-control') admission_control.remove('DefaultTolerationSeconds') api_opts.add( '--admission-control', ','.join(admission_control), strict=True) # Default to 3 minute resync. TODO: Make this configureable? controller_opts.add('--min-resync-period', '3m') controller_opts.add('--v', '2') controller_opts.add('--root-ca-file', ca_cert_path) context.update({ 'kube_allow_priv': FlagManager('KUBE_ALLOW_PRIV').to_s(), 'kube_apiserver_flags': api_opts.to_s(), 'kube_scheduler_flags': scheduler_opts.to_s(), 'kube_controller_manager_flags': controller_opts.to_s(), }) # Render the configuration files that contains parameters for # the apiserver, scheduler, and controller-manager render_service('kube-apiserver', context) render_service('kube-controller-manager', context) render_service('kube-scheduler', context) # explicitly render the generic defaults file render('kube-defaults.defaults', '/etc/default/kube-defaults', context) # when files change on disk, we need to inform systemd of the changes call(['systemctl', 'daemon-reload']) call(['systemctl', 'enable', 'kube-apiserver']) call(['systemctl', 'enable', 'kube-controller-manager']) call(['systemctl', 'enable', 'kube-scheduler'])
def render_init_scripts(api_servers): ''' We have related to either an api server or a load balancer connected to the apiserver. Render the config files and prepare for launch ''' context = {} context.update(hookenv.config()) layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-') context.update({ 'kube_api_endpoint': ','.join(api_servers), 'JUJU_UNIT_NAME': unit_name }) kubelet_opts = FlagManager('kubelet') kubelet_opts.add('--require-kubeconfig', None) kubelet_opts.add('--kubeconfig', kubeconfig_path) kubelet_opts.add('--network-plugin', 'cni') kubelet_opts.add('--anonymous-auth', 'false') kubelet_opts.add('--client-ca-file', ca_cert_path) kubelet_opts.add('--tls-cert-file', server_cert_path) kubelet_opts.add('--tls-private-key-file', server_key_path) context['kubelet_opts'] = kubelet_opts.to_s() kube_proxy_opts = FlagManager('kube-proxy') kube_proxy_opts.add('--kubeconfig', kubeconfig_path) context['kube_proxy_opts'] = kube_proxy_opts.to_s() os.makedirs('/var/lib/kubelet', exist_ok=True) render('kube-default', '/etc/default/kube-default', context) render('kubelet.defaults', '/etc/default/kubelet', context) render('kubelet.service', '/lib/systemd/system/kubelet.service', context) render('kube-proxy.defaults', '/etc/default/kube-proxy', context) render('kube-proxy.service', '/lib/systemd/system/kube-proxy.service', context)
def configure_apiserver(): # TODO: investigate if it's possible to use config file to store args # https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/315 # Handle api-extra-args config option to_add, to_remove = get_config_args() api_opts = FlagManager('kube-apiserver') # Remove arguments that are no longer provided as config option # this allows them to be reverted to charm defaults for arg in to_remove: hookenv.log('Removing option: {}'.format(arg)) api_opts.destroy(arg) # We need to "unset" options by settig their value to "null" string cmd = ['snap', 'set', 'kube-apiserver', '{}=null'.format(arg)] check_call(cmd) # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') client_cert_path = layer_options.get('client_certificate_path') client_key_path = layer_options.get('client_key_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') if is_privileged(): api_opts.add('allow-privileged', 'true', strict=True) set_state('kubernetes-master.privileged') else: api_opts.add('allow-privileged', 'false', strict=True) remove_state('kubernetes-master.privileged') # Handle static options for now api_opts.add('service-cluster-ip-range', service_cidr()) api_opts.add('min-request-timeout', '300') api_opts.add('v', '4') api_opts.add('tls-cert-file', server_cert_path) api_opts.add('tls-private-key-file', server_key_path) api_opts.add('kubelet-certificate-authority', ca_cert_path) api_opts.add('kubelet-client-certificate', client_cert_path) api_opts.add('kubelet-client-key', client_key_path) api_opts.add('logtostderr', 'true') api_opts.add('insecure-bind-address', '127.0.0.1') api_opts.add('insecure-port', '8080') api_opts.add('storage-backend', 'etcd2') # FIXME: add etcd3 support admission_control = [ 'Initializers', 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount', 'ResourceQuota', 'DefaultTolerationSeconds' ] auth_mode = hookenv.config('authorization-mode') if 'Node' in auth_mode: admission_control.append('NodeRestriction') api_opts.add('authorization-mode', auth_mode, strict=True) if get_version('kube-apiserver') < (1, 6): hookenv.log('Removing DefaultTolerationSeconds from admission-control') admission_control.remove('DefaultTolerationSeconds') if get_version('kube-apiserver') < (1, 7): hookenv.log('Removing Initializers from admission-control') admission_control.remove('Initializers') api_opts.add('admission-control', ','.join(admission_control), strict=True) # Add operator-provided arguments, this allows operators # to override defaults for arg in to_add: hookenv.log('Adding option: {} {}'.format(arg[0], arg[1])) # Make sure old value is gone api_opts.destroy(arg[0]) api_opts.add(arg[0], arg[1]) cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ') check_call(cmd) set_state('kube-apiserver.do-restart')
def setup_authentication(): '''Setup basic authentication and token access for the cluster.''' api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') api_opts.add('--basic-auth-file', '/srv/kubernetes/basic_auth.csv') api_opts.add('--token-auth-file', '/srv/kubernetes/known_tokens.csv') api_opts.add('--service-cluster-ip-range', service_cidr()) hookenv.status_set('maintenance', 'Rendering authentication templates.') htaccess = '/srv/kubernetes/basic_auth.csv' if not os.path.isfile(htaccess): setup_basic_auth('admin', 'admin', 'admin') known_tokens = '/srv/kubernetes/known_tokens.csv' if not os.path.isfile(known_tokens): setup_tokens(None, 'admin', 'admin') setup_tokens(None, 'kubelet', 'kubelet') setup_tokens(None, 'kube_proxy', 'kube_proxy') # Generate the default service account token key os.makedirs('/etc/kubernetes', exist_ok=True) cmd = [ 'openssl', 'genrsa', '-out', '/etc/kubernetes/serviceaccount.key', '2048' ] check_call(cmd) api_opts.add('--service-account-key-file', '/etc/kubernetes/serviceaccount.key') controller_opts.add('--service-account-private-key-file', '/etc/kubernetes/serviceaccount.key') set_state('authentication.setup')
def setup_leader_authentication(): '''Setup basic authentication and token access for the cluster.''' api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') service_key = '/etc/kubernetes/serviceaccount.key' basic_auth = '/srv/kubernetes/basic_auth.csv' known_tokens = '/srv/kubernetes/known_tokens.csv' api_opts.add('--basic-auth-file', basic_auth) api_opts.add('--token-auth-file', known_tokens) api_opts.add('--service-cluster-ip-range', service_cidr()) hookenv.status_set('maintenance', 'Rendering authentication templates.') if not os.path.isfile(basic_auth): setup_basic_auth('admin', 'admin', 'admin') if not os.path.isfile(known_tokens): setup_tokens(None, 'admin', 'admin') setup_tokens(None, 'kubelet', 'kubelet') setup_tokens(None, 'kube_proxy', 'kube_proxy') # Generate the default service account token key os.makedirs('/etc/kubernetes', exist_ok=True) cmd = ['openssl', 'genrsa', '-out', service_key, '2048'] check_call(cmd) api_opts.add('--service-account-key-file', service_key) controller_opts.add('--service-account-private-key-file', service_key) # read service account key for syndication leader_data = {} for f in [known_tokens, basic_auth, service_key]: with open(f, 'r') as fp: leader_data[f] = fp.read() # this is slightly opaque, but we are sending file contents under its file # path as a key. # eg: # {'/etc/kubernetes/serviceaccount.key': 'RSA:2471731...'} charms.leadership.leader_set(leader_data) set_state('authentication.setup')
def render_files(): '''Use jinja templating to render the docker-compose.yml and master.json file to contain the dynamic data for the configuration files.''' context = {} config = hookenv.config() # Add the charm configuration data to the context. context.update(config) # Update the context with extra values: arch, and networking information context.update({ 'arch': arch(), 'master_address': hookenv.unit_get('private-address'), 'public_address': hookenv.unit_get('public-address'), 'private_address': hookenv.unit_get('private-address') }) api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') scheduler_opts = FlagManager('kube-scheduler') # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') # Handle static options for now api_opts.add('--min-request-timeout', '300') api_opts.add('--v', '4') api_opts.add('--client-ca-file', ca_cert_path) api_opts.add('--tls-cert-file', server_cert_path) api_opts.add('--tls-private-key-file', server_key_path) scheduler_opts.add('--v', '2') # Default to 3 minute resync. TODO: Make this configureable? controller_opts.add('--min-resync-period', '3m') controller_opts.add('--v', '2') controller_opts.add('--root-ca-file', ca_cert_path) context.update({ 'kube_apiserver_flags': api_opts.to_s(), 'kube_scheduler_flags': scheduler_opts.to_s(), 'kube_controller_manager_flags': controller_opts.to_s() }) # Render the configuration files that contains parameters for # the apiserver, scheduler, and controller-manager render_service('kube-apiserver', context) render_service('kube-controller-manager', context) render_service('kube-scheduler', context) # explicitly render the generic defaults file render('kube-defaults.defaults', '/etc/default/kube-defaults', context) # when files change on disk, we need to inform systemd of the changes call(['systemctl', 'daemon-reload']) call(['systemctl', 'enable', 'kube-apiserver']) call(['systemctl', 'enable', 'kube-controller-manager']) call(['systemctl', 'enable', 'kube-scheduler'])
def setup_non_leader_authentication(): api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') service_key = '/etc/kubernetes/serviceaccount.key' basic_auth = '/srv/kubernetes/basic_auth.csv' known_tokens = '/srv/kubernetes/known_tokens.csv' # This races with other codepaths, and seems to require being created first # This block may be extracted later, but for now seems to work as intended os.makedirs('/etc/kubernetes', exist_ok=True) os.makedirs('/srv/kubernetes', exist_ok=True) hookenv.status_set('maintenance', 'Rendering authentication templates.') # Set an array for looping logic keys = [service_key, basic_auth, known_tokens] for k in keys: # If the path does not exist, assume we need it if not os.path.exists(k): # Fetch data from leadership broadcast contents = charms.leadership.leader_get(k) # Default to logging the warning and wait for leader data to be set if contents is None: msg = "Waiting on leaders crypto keys." hookenv.status_set('waiting', msg) hookenv.log('Missing content for file {}'.format(k)) return # Write out the file and move on to the next item with open(k, 'w+') as fp: fp.write(contents) api_opts.add('--basic-auth-file', basic_auth) api_opts.add('--token-auth-file', known_tokens) api_opts.add('--service-cluster-ip-range', service_cidr()) api_opts.add('--service-account-key-file', service_key) controller_opts.add('--service-account-private-key-file', service_key) set_state('authentication.setup')
def render_init_scripts(api_servers): ''' We have related to either an api server or a load balancer connected to the apiserver. Render the config files and prepare for launch ''' context = {} context.update(hookenv.config()) layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-') context.update({'kube_api_endpoint': ','.join(api_servers), 'JUJU_UNIT_NAME': unit_name}) kubelet_opts = FlagManager('kubelet') kubelet_opts.add('--require-kubeconfig', None) kubelet_opts.add('--kubeconfig', kubeconfig_path) kubelet_opts.add('--network-plugin', 'cni') kubelet_opts.add('--anonymous-auth', 'false') kubelet_opts.add('--client-ca-file', ca_cert_path) kubelet_opts.add('--tls-cert-file', server_cert_path) kubelet_opts.add('--tls-private-key-file', server_key_path) context['kubelet_opts'] = kubelet_opts.to_s() kube_proxy_opts = FlagManager('kube-proxy') kube_proxy_opts.add('--kubeconfig', kubeconfig_path) context['kube_proxy_opts'] = kube_proxy_opts.to_s() os.makedirs('/var/lib/kubelet', exist_ok=True) render('kube-default', '/etc/default/kube-default', context) render('kubelet.defaults', '/etc/default/kubelet', context) render('kubelet.service', '/lib/systemd/system/kubelet.service', context) render('kube-proxy.defaults', '/etc/default/kube-proxy', context) render('kube-proxy.service', '/lib/systemd/system/kube-proxy.service', context)
def configure_controller_manager(): controller_opts = FlagManager('kube-controller-manager') # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') # Default to 3 minute resync. TODO: Make this configureable? controller_opts.add('min-resync-period', '3m') controller_opts.add('v', '2') controller_opts.add('root-ca-file', ca_cert_path) controller_opts.add('logtostderr', 'true') controller_opts.add('master', 'http://127.0.0.1:8080') cmd = (['snap', 'set', 'kube-controller-manager'] + controller_opts.to_s().split(' ')) check_call(cmd) set_state('kube-controller-manager.do-restart')