def bootstrap_charm_deps(): """ Set up the base charm dependencies so that the reactive system can run. """ venv = os.path.abspath('../.venv') vbin = os.path.join(venv, 'bin') vpip = os.path.join(vbin, 'pip') vpy = os.path.join(vbin, 'python') if os.path.exists('wheelhouse/.bootstrapped'): from charms import layer cfg = layer.options('basic') if cfg.get('use_venv') and '.venv' not in sys.executable: # activate the venv os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']]) reload_interpreter(vpy) return # bootstrap wheelhouse if os.path.exists('wheelhouse'): apt_install(['python3-pip', 'python3-yaml']) from charms import layer cfg = layer.options('basic') # include packages defined in layer.yaml apt_install(cfg.get('packages', [])) # if we're using a venv, set it up if cfg.get('use_venv'): if not os.path.exists(venv): apt_install(['python-virtualenv']) cmd = ['virtualenv', '--python=python3', venv] if cfg.get('include_system_packages'): cmd.append('--system-site-packages') check_call(cmd) os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']]) pip = vpip else: pip = 'pip3' # save a copy of system pip to prevent `pip3 install -U pip` from changing it if os.path.exists('/usr/bin/pip'): shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save') # need newer pip, to fix spurious Double Requirement error https://github.com/pypa/pip/issues/56 check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse', 'pip']) # install the rest of the wheelhouse deps check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse'] + glob('wheelhouse/*')) if not cfg.get('use_venv'): # restore system pip to prevent `pip3 install -U pip` from changing it if os.path.exists('/usr/bin/pip.save'): shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip') os.remove('/usr/bin/pip.save') # flag us as having already bootstrapped so we don't do it again open('wheelhouse/.bootstrapped', 'w').close() # Ensure that the newly bootstrapped libs are available. # Note: this only seems to be an issue with namespace packages. # Non-namespace-package libs (e.g., charmhelpers) are available # without having to reload the interpreter. :/ reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0])
def stop_spark(): hookenv.status_set("maintenance", "Stopping Livy REST server") dist = DistConfig(data=layer.options("livy")) livy = Livy(dist) livy.close_ports() livy.stop() remove_state("livy.started") hookenv.status_set("maintenance", "Stopping Apache Spark") dist = DistConfig(data=layer.options("apache-spark")) spark = Spark(dist) spark.close_ports() spark.stop() remove_state("spark.started")
def install_livy(hadoop): # pylint: disable=w0613 dist = DistConfig(data=layer.options("livy")) livy = Livy(dist) if livy.verify_resources(): hookenv.status_set("maintenance", "Installing Livy REST server") livy.install() set_state("livy.installed")
def install(): ''' Install the docker daemon, and supporting tooling ''' # Often when building layer-docker based subordinates, you dont need to # incur the overhead of installing docker. This tuneable layer option # allows you to disable the exec of that install routine, and instead short # circuit immediately to docker.available, so you can charm away! layer_opts = layer.options('docker') if layer_opts['skip-install']: set_state('docker.available') set_state('docker.ready') return status_set('maintenance', 'Installing AUFS and other tools') kernel_release = check_output(['uname', '-r']).rstrip() packages = [ 'aufs-tools', 'git', 'linux-image-extra-{0}'.format(kernel_release), ] apt_update() apt_install(packages) # Install docker-engine from apt. install_from_apt() opts = DockerOpts() render('docker.defaults', '/etc/default/docker', {'opts': opts.to_s()}) status_set('active', 'Docker installed, cycling for extensions') set_state('docker.ready') # Make with the adding of the users to the groups check_call(['usermod', '-aG', 'docker', 'ubuntu'])
def _url(self, *parts): dc = utils.DistConfig( data=layer.options('apache-bigtop-base')) url = 'http://localhost:{}/api/'.format(dc.port('zeppelin')) for part in parts: url = urljoin(url, part) return url
def __init__(self): self.options = layer.options('puppet-base') self.puppet_pkg = self.options.get('puppet-srvc') self.puppet_base_url = 'http://apt.puppetlabs.com' self.puppet_gpg_key = config['puppet-gpg-key'] self.puppet_exe = '/opt/puppetlabs/bin/puppet' self.facter_exe = '/opt/puppetlabs/bin/facter' self.puppet_conf_dir = '/etc/puppetlabs/puppet' self.modules_dir = '/etc/puppetlabs/code/modules/' self.puppet_apt_src = \ 'deb %s %s PC1' % (self.puppet_base_url, lsb_release()['DISTRIB_CODENAME']) # Determine puppet apt package if self.puppet_pkg == 'master': self.puppet_apt_pkg = 'puppetserver' self.puppet_srvc = self.puppet_apt_pkg elif self.puppet_pkg == 'agent': self.puppet_apt_pkg = 'puppet-agent' self.puppet_srvc = 'puppet' elif self.puppet_pkg == 'db': self.puppet_apt_pkg = 'puppetdb' self.puppet_srvc = self.puppet_apt_pkg elif self.puppet_pkg == 'ca': self.puppet_apt_pkg = 'puppetserver' self.puppet_srvc = self.puppet_apt_pkg elif self.puppet_pkg == 'standalone': self.puppet_apt_pkg = 'puppet-agent' self.puppet_srvc = None else: raise PuppetException("puppet-srvc option value '{}' unkown. \ Please change this option in the puppet-base layer options.")
def configure_worker_services(api_servers, dns, cluster_cidr): ''' Add remaining flags for the worker services and configure snaps to use them ''' layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') kubelet_opts = FlagManager('kubelet') kubelet_opts.add('require-kubeconfig', 'true') kubelet_opts.add('kubeconfig', kubeconfig_path) kubelet_opts.add('network-plugin', 'cni') kubelet_opts.add('v', '0') kubelet_opts.add('address', '0.0.0.0') kubelet_opts.add('port', '10250') kubelet_opts.add('cluster-dns', dns['sdn-ip']) kubelet_opts.add('cluster-domain', dns['domain']) kubelet_opts.add('anonymous-auth', 'false') kubelet_opts.add('client-ca-file', ca_cert_path) kubelet_opts.add('tls-cert-file', server_cert_path) kubelet_opts.add('tls-private-key-file', server_key_path) kubelet_opts.add('logtostderr', 'true') kube_proxy_opts = FlagManager('kube-proxy') kube_proxy_opts.add('cluster-cidr', cluster_cidr) kube_proxy_opts.add('kubeconfig', kubeconfig_path) kube_proxy_opts.add('logtostderr', 'true') kube_proxy_opts.add('v', '0') kube_proxy_opts.add('master', random.choice(api_servers), strict=True) cmd = ['snap', 'set', 'kubelet'] + kubelet_opts.to_s().split(' ') check_call(cmd) cmd = ['snap', 'set', 'kube-proxy'] + kube_proxy_opts.to_s().split(' ') check_call(cmd)
def build_kubeconfig(server): '''Gather the relevant data for Kubernetes configuration objects and create a config object with that information.''' # Get the options from the tls-client layer. layer_options = layer.options('tls-client') # Get all the paths to the tls information required for kubeconfig. ca = layer_options.get('ca_certificate_path') ca_exists = ca and os.path.isfile(ca) key = layer_options.get('client_key_path') key_exists = key and os.path.isfile(key) cert = layer_options.get('client_certificate_path') cert_exists = cert and os.path.isfile(cert) # Do we have everything we need? if ca_exists and key_exists and cert_exists: # Cache last server string to know if we need to regenerate the config. if not data_changed('kubeconfig.server', server): return # The final destination of the kubeconfig and kubectl. destination_directory = '/home/ubuntu' # Create an absolute path for the kubeconfig file. kubeconfig_path = os.path.join(destination_directory, 'config') # Create the kubeconfig on this system so users can access the cluster. create_kubeconfig(kubeconfig_path, server, ca, key, cert) # Copy the kubectl binary to the destination directory. cmd = ['install', '-v', '-o', 'ubuntu', '-g', 'ubuntu', '/usr/local/bin/kubectl', destination_directory] check_call(cmd) # Make the config file readable by the ubuntu users so juju scp works. cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path] check_call(cmd)
def configure_kubelet(dns, ingress_ip): layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') kubelet_opts = {} kubelet_opts['require-kubeconfig'] = 'true' kubelet_opts['kubeconfig'] = kubeconfig_path kubelet_opts['network-plugin'] = 'cni' kubelet_opts['v'] = '0' kubelet_opts['address'] = '0.0.0.0' kubelet_opts['port'] = '10250' kubelet_opts['cluster-domain'] = dns['domain'] kubelet_opts['anonymous-auth'] = 'false' kubelet_opts['client-ca-file'] = ca_cert_path kubelet_opts['tls-cert-file'] = server_cert_path kubelet_opts['tls-private-key-file'] = server_key_path kubelet_opts['logtostderr'] = 'true' kubelet_opts['fail-swap-on'] = 'false' kubelet_opts['node-ip'] = ingress_ip if (dns['enable-kube-dns']): kubelet_opts['cluster-dns'] = dns['sdn-ip'] # set --allow-privileged flag for kubelet kubelet_opts['allow-privileged'] = set_privileged() if is_state('kubernetes-worker.gpu.enabled'): hookenv.log('Adding ' '--feature-gates=DevicePlugins=true ' 'to kubelet') kubelet_opts['feature-gates'] = 'DevicePlugins=true' configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def install_presentation(): """ Install presentation """ opts = layer.options('git-deploy') # Clone repo hookenv.status_set('maintenance', 'Installing and building the presentation.') # Build and install with chdir(opts.get('target')): with open('requirements.txt', 'r') as f: for i in list(map(lambda b: b.strip('\n'), f.readlines())): pip_install(i) sphinx_build_cmd = 'sphinx-build -b html source %s' % opts.get('target') subprocess.call(sphinx_build_cmd.split(), shell=False) present_chown_cmd = 'chown -R www-data:www-data %s' % opts.get('target') subprocess.call(present_chown_cmd.split(), shell=False) # Configure nginx vhost configure_site('present', 'present.vhost', app_path=opts.get('target')) # Open presentation front-end port hookenv.open_port(config['port']) # Set status hookenv.status_set('active', 'Presentation is active on port %s' % config['port']) # Set state set_state('presentation.available')
def activate_venv(): """ Activate the venv if enabled in ``layer.yaml``. This is handled automatically for normal hooks, but actions might need to invoke this manually, using something like: # Load modules from $CHARM_DIR/lib import sys sys.path.append('lib') from charms.layer.basic import activate_venv activate_venv() This will ensure that modules installed in the charm's virtual environment are available to the action. """ venv = os.path.abspath('../.venv') vbin = os.path.join(venv, 'bin') vpy = os.path.join(vbin, 'python') from charms import layer cfg = layer.options('basic') if cfg.get('use_venv') and '.venv' not in sys.executable: # activate the venv os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']]) reload_interpreter(vpy)
def configure_kubelet(dns): layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') kubelet_opts = {} kubelet_opts['require-kubeconfig'] = 'true' kubelet_opts['kubeconfig'] = kubeconfig_path kubelet_opts['network-plugin'] = 'cni' kubelet_opts['v'] = '0' kubelet_opts['address'] = '0.0.0.0' kubelet_opts['port'] = '10250' kubelet_opts['cluster-dns'] = dns['sdn-ip'] kubelet_opts['cluster-domain'] = dns['domain'] kubelet_opts['anonymous-auth'] = 'false' kubelet_opts['client-ca-file'] = ca_cert_path kubelet_opts['tls-cert-file'] = server_cert_path kubelet_opts['tls-private-key-file'] = server_key_path kubelet_opts['logtostderr'] = 'true' kubelet_opts['fail-swap-on'] = 'false' privileged = is_state('kubernetes-worker.privileged') kubelet_opts['allow-privileged'] = 'true' if privileged else 'false' if is_state('kubernetes-worker.gpu.enabled'): if get_version('kubelet') < (1, 6): hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet') kubelet_opts['experimental-nvidia-gpus'] = '1' else: hookenv.log('Adding --feature-gates=Accelerators=true to kubelet') kubelet_opts['feature-gates'] = 'Accelerators=true' configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def install_load_balancer(apiserver, tls): ''' Create the default vhost template for load balancing ''' # Get the tls paths from the layer data. layer_options = layer.options('tls-client') server_cert_path = layer_options.get('server_certificate_path') cert_exists = server_cert_path and os.path.isfile(server_cert_path) server_key_path = layer_options.get('server_key_path') key_exists = server_key_path and os.path.isfile(server_key_path) # Do both the the key and certificate exist? if cert_exists and key_exists: # At this point the cert and key exist, and they are owned by root. chown = ['chown', 'www-data:www-data', server_cert_path] # Change the owner to www-data so the nginx process can read the cert. subprocess.call(chown) chown = ['chown', 'www-data:www-data', server_key_path] # Change the owner to www-data so the nginx process can read the key. subprocess.call(chown) hookenv.open_port(hookenv.config('port')) services = apiserver.services() nginx.configure_site( 'apilb', 'apilb.conf', server_name='_', services=services, port=hookenv.config('port'), server_certificate=server_cert_path, server_key=server_key_path, ) hookenv.status_set('active', 'Loadbalancer ready.')
def install(): opts = layer.options('snap') for snapname, snap_opts in opts.items(): installed_state = 'snap.installed.{}'.format(snapname) if not reactive.is_state(installed_state): snap.install(snapname, **snap_opts) if data_changed('snap.install.opts', opts): snap.connect_all()
def start_spark(hadoop): # pylint: disable=w0613 hookenv.status_set("maintenance", "Setting up Apache Spark") dist = DistConfig(data=layer.options("apache-spark")) spark = Spark(dist) spark.configure() spark.start() spark.open_ports() set_state("spark.started")
def configure_apiserver(): api_opts = {} # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') client_cert_path = layer_options.get('client_certificate_path') client_key_path = layer_options.get('client_key_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') if is_privileged(): api_opts['allow-privileged'] = 'true' set_state('kubernetes-master.privileged') else: api_opts['allow-privileged'] = 'false' remove_state('kubernetes-master.privileged') # Handle static options for now api_opts['service-cluster-ip-range'] = service_cidr() api_opts['min-request-timeout'] = '300' api_opts['v'] = '4' api_opts['tls-cert-file'] = server_cert_path api_opts['tls-private-key-file'] = server_key_path api_opts['kubelet-certificate-authority'] = ca_cert_path api_opts['kubelet-client-certificate'] = client_cert_path api_opts['kubelet-client-key'] = client_key_path api_opts['logtostderr'] = 'true' api_opts['insecure-bind-address'] = '127.0.0.1' api_opts['insecure-port'] = '8080' api_opts['storage-backend'] = 'etcd2' # FIXME: add etcd3 support admission_control = [ 'Initializers', 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount', 'ResourceQuota', 'DefaultTolerationSeconds' ] auth_mode = hookenv.config('authorization-mode') if 'Node' in auth_mode: admission_control.append('NodeRestriction') api_opts['authorization-mode'] = auth_mode if get_version('kube-apiserver') < (1, 6): hookenv.log('Removing DefaultTolerationSeconds from admission-control') admission_control.remove('DefaultTolerationSeconds') if get_version('kube-apiserver') < (1, 7): hookenv.log('Removing Initializers from admission-control') admission_control.remove('Initializers') api_opts['admission-control'] = ','.join(admission_control) configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args') set_state('kube-apiserver.do-restart')
def render_nginx_conf(): # Configure nginx vhost configure_site('fiche', 'fiche.nginx.tmpl', port=config('port'), app_path=options('git-deploy').get('target')) # Open fiche front-end port open_port(config('port')) # Set state set_state('fiche.web.configured')
def install_spark(hadoop): # pylint: disable=w0613 dist = DistConfig(data=layer.options("apache-spark")) spark = Spark(dist) if spark.verify_resources(): hookenv.status_set("maintenance", "Installing Apache Spark") spark.install() spark.setup_spark_config() spark.install_demo() set_state("spark.installed")
def install_fiche(): """ Install Fiche """ status_set('maintenance', 'Installing and configuring Fiche.') # Build fiche with chdir(options('git-deploy').get('target')): subprocess.call('make', shell=False) subprocess.call('make install PREFIX=/usr/local/bin'.split(), shell=False) subprocess.call('rm -rf /srv/fiche/*'.split(), shell=False) # Get uid for www-data and chown /srv/fiche uid = pwd.getpwnam('www-data').pw_uid os.chown(options('git-deploy').get('target'), uid, -1) set_state('fiche.installed')
def packages(): status_set('maintenance', 'installing php packages') try: php.install(*options('php-fpm').get('packages')) except: status_set('maintenance', 'Unable to install packages, trying again soon...') else: set_state('php.installed')
def render_files(): '''Use jinja templating to render the docker-compose.yml and master.json file to contain the dynamic data for the configuration files.''' context = {} config = hookenv.config() # Add the charm configuration data to the context. context.update(config) # Update the context with extra values: arch, and networking information context.update({'arch': arch(), 'master_address': hookenv.unit_get('private-address'), 'public_address': hookenv.unit_get('public-address'), 'private_address': hookenv.unit_get('private-address')}) api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') scheduler_opts = FlagManager('kube-scheduler') # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') # Handle static options for now api_opts.add('--min-request-timeout', '300') api_opts.add('--v', '4') api_opts.add('--client-ca-file', ca_cert_path) api_opts.add('--tls-cert-file', server_cert_path) api_opts.add('--tls-private-key-file', server_key_path) scheduler_opts.add('--v', '2') # Default to 3 minute resync. TODO: Make this configureable? controller_opts.add('--min-resync-period', '3m') controller_opts.add('--v', '2') controller_opts.add('--root-ca-file', ca_cert_path) context.update({'kube_apiserver_flags': api_opts.to_s(), 'kube_scheduler_flags': scheduler_opts.to_s(), 'kube_controller_manager_flags': controller_opts.to_s()}) # Render the configuration files that contains parameters for # the apiserver, scheduler, and controller-manager render_service('kube-apiserver', context) render_service('kube-controller-manager', context) render_service('kube-scheduler', context) # explicitly render the generic defaults file render('kube-defaults.defaults', '/etc/default/kube-defaults', context) # when files change on disk, we need to inform systemd of the changes call(['systemctl', 'daemon-reload']) call(['systemctl', 'enable', 'kube-apiserver']) call(['systemctl', 'enable', 'kube-controller-manager']) call(['systemctl', 'enable', 'kube-scheduler'])
def reconfigure_spark(hadoop): # pylint: disable=w0613 config = hookenv.config() if not data_changed("configuration", config): return hookenv.status_set("maintenance", "Configuring Apache Spark and Livy REST server") dist = DistConfig(data=layer.options("apache-spark")) spark = Spark(dist) dist = DistConfig(data=layer.options("livy")) livy = Livy(dist) livy.stop() spark.stop() spark.configure() mode = hookenv.config()["spark_execution_mode"] livy.configure(mode) spark.start() livy.start() hookenv.status_set("active", "Ready")
def start_livy(hadoop): # pylint: disable=w0613 hookenv.status_set("maintenance", "Setting up Livy REST server") dist = DistConfig(data=layer.options("livy")) livy = Livy(dist) mode = hookenv.config()["spark_execution_mode"] livy.configure(mode) livy.start() livy.open_ports() set_state("livy.started") hookenv.status_set("active", "Ready")
def queue_layer_packages(): """Add packages listed in build-time layer options.""" # Both basic and apt layer. basic layer will have already installed # its defined packages, but rescheduling it here gets the apt layer # state set and they will pinned as any other apt layer installed # package. opts = layer.options() for section in ['basic', 'apt']: if section in opts and 'packages' in opts[section]: charms.apt.queue_install(opts[section]['packages'])
def reset_application_version(): '''Set the Juju application version, per settings in layer.yaml''' # Reset the application version. We call this after installing # packages to initialize the version. We also call this every # hook, incase the version has changed (eg. Landscape upgraded # the package). opts = layer.options().get('apt', {}) pkg = opts.get('version_package') if pkg and pkg in installed(): ver = get_package_version(pkg, opts.get('full_version', False)) hookenv.application_version_set(ver)
def configure_controller_manager(): controller_opts = {} # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') # Default to 3 minute resync. TODO: Make this configureable? controller_opts['min-resync-period'] = '3m' controller_opts['v'] = '2' controller_opts['root-ca-file'] = ca_cert_path controller_opts['logtostderr'] = 'true' controller_opts['master'] = 'http://127.0.0.1:8080' configure_kubernetes_service('kube-controller-manager', controller_opts, 'controller-manager-extra-args') set_state('kube-controller-manager.do-restart')
def create_config(server, creds): '''Create a kubernetes configuration for the worker unit.''' # Get the options from the tls-client layer. layer_options = layer.options('tls-client') # Get all the paths to the tls information required for kubeconfig. ca = layer_options.get('ca_certificate_path') # Create kubernetes configuration in the default location for ubuntu. create_kubeconfig('/home/ubuntu/.kube/config', server, ca, token=creds['client_token'], user='******') # Make the config dir readable by the ubuntu users so juju scp works. cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube'] check_call(cmd) # Create kubernetes configuration in the default location for root. create_kubeconfig('/root/.kube/config', server, ca, token=creds['client_token'], user='******') # Create kubernetes configuration for kubelet, and kube-proxy services. create_kubeconfig(kubeconfig_path, server, ca, token=creds['kubelet_token'], user='******')
def get_dist_config(required_keys=None): required_keys = required_keys or [ 'vendor', 'hadoop_version', 'packages', 'groups', 'users', 'dirs', 'ports'] dist = DistConfig(filename='dist.yaml', required_keys=required_keys) opts = layer.options('hadoop-base') for key in ('hadoop_version',): if key in opts: dist.dist_config[key] = opts[key] for key in ('packages', 'groups'): if key in opts: dist.dist_config[key] = list( set(dist.dist_config[key]) | set(opts[key]) ) for key in ('users', 'dirs', 'ports'): if key in opts: dist.dist_config[key].update(opts[key]) return dist
def build_kubeconfig(server): '''Gather the relevant data for Kubernetes configuration objects and create a config object with that information.''' # Get the options from the tls-client layer. layer_options = layer.options('tls-client') # Get all the paths to the tls information required for kubeconfig. ca = layer_options.get('ca_certificate_path') ca_exists = ca and os.path.isfile(ca) client_pass = get_password('basic_auth.csv', 'admin') # Do we have everything we need? if ca_exists and client_pass: # Create an absolute path for the kubeconfig file. kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config') # Create the kubeconfig on this system so users can access the cluster. create_kubeconfig(kubeconfig_path, server, ca, user='******', password=client_pass) # Make the config file readable by the ubuntu users so juju scp works. cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path] check_call(cmd)
def configure_controller_manager(): controller_opts = FlagManager('kube-controller-manager') # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') # Default to 3 minute resync. TODO: Make this configureable? controller_opts.add('min-resync-period', '3m') controller_opts.add('v', '2') controller_opts.add('root-ca-file', ca_cert_path) controller_opts.add('logtostderr', 'true') controller_opts.add('master', 'http://127.0.0.1:8080') cmd = ( ['snap', 'set', 'kube-controller-manager'] + controller_opts.to_s().split(' ') ) check_call(cmd) set_state('kube-controller-manager.do-restart')
def start_web_service(): service_name = layer.options('lets-encrypt').get('service-name') if service_name: log('starting service: %s' % (service_name)) service_start(service_name)
def __init__(self): self.dist_config = utils.DistConfig( data=layer.options('hadoop-client'))
def install(): """ Install the docker daemon, and supporting tooling. :return: None or False """ # Switching runtimes causes a reinstall so remove any holds that exist. unhold_all() # Often when building layer-docker based subordinates, you dont need to # incur the overhead of installing docker. This tuneable layer option # allows you to disable the exec of that install routine, and instead short # circuit immediately to docker.available, so you can charm away! layer_opts = layer.options('docker') if layer_opts['skip-install']: set_state('docker.available') set_state('docker.ready') return status_set('maintenance', 'Installing AUFS and other tools.') kernel_release = check_output(['uname', '-r']).rstrip() packages = [ 'aufs-tools', 'git', 'linux-image-extra-{}'.format(kernel_release.decode('utf-8')), ] apt_update() apt_install(packages) # Install docker-engine from apt. runtime = determine_apt_source() remove_state('nvidia-docker.supported') remove_state('nvidia-docker.installed') if runtime == 'upstream': install_from_upstream_apt() elif runtime == 'nvidia': set_state('nvidia-docker.supported') install_from_nvidia_apt() set_state('nvidia-docker.installed') elif runtime == 'apt': install_from_archive_apt() elif runtime == 'custom': if not install_from_custom_apt(): return False # If install fails, stop. else: hookenv.log('Unknown runtime {}'.format(runtime)) return False validate_config() opts = DockerOpts() render('docker.defaults', '/etc/default/docker', { 'opts': opts.to_s(), 'docker_runtime': runtime }) render('docker.systemd', '/lib/systemd/system/docker.service', config()) reload_system_daemons() hold_all() hookenv.log( 'Holding docker-engine and docker.io packages at current revision.') host.service_restart('docker') hookenv.log('Docker installed, setting "docker.ready" state.') set_state('docker.ready') # Make with the adding of the users to the groups check_call(['usermod', '-aG', 'docker', 'ubuntu'])
def __init__(self): self.dist_config = utils.DistConfig( data=layer.options('apache-bigtop-base'))
def configure_apiserver(): # TODO: investigate if it's possible to use config file to store args id:118 gh:119 # https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/315 # Handle api-extra-args config option to_add, to_remove = get_config_args() api_opts = FlagManager('kube-apiserver') # Remove arguments that are no longer provided as config option # this allows them to be reverted to charm defaults for arg in to_remove: hookenv.log('Removing option: {}'.format(arg)) api_opts.destroy(arg) # We need to "unset" options by settig their value to "null" string cmd = ['snap', 'set', 'kube-apiserver', '{}=null'.format(arg)] check_call(cmd) # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') client_cert_path = layer_options.get('client_certificate_path') client_key_path = layer_options.get('client_key_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') if is_privileged(): api_opts.add('allow-privileged', 'true', strict=True) set_state('kubernetes-master.privileged') else: api_opts.add('allow-privileged', 'false', strict=True) remove_state('kubernetes-master.privileged') # Handle static options for now api_opts.add('service-cluster-ip-range', service_cidr()) api_opts.add('min-request-timeout', '300') api_opts.add('v', '4') api_opts.add('tls-cert-file', server_cert_path) api_opts.add('tls-private-key-file', server_key_path) api_opts.add('kubelet-certificate-authority', ca_cert_path) api_opts.add('kubelet-client-certificate', client_cert_path) api_opts.add('kubelet-client-key', client_key_path) api_opts.add('logtostderr', 'true') api_opts.add('insecure-bind-address', '127.0.0.1') api_opts.add('insecure-port', '8080') api_opts.add('storage-backend', 'etcd2') # FIXME: add etcd3 support id:91 gh:92 admission_control = [ 'Initializers', 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount', 'ResourceQuota', 'DefaultTolerationSeconds' ] auth_mode = hookenv.config('authorization-mode') if 'Node' in auth_mode: admission_control.append('NodeRestriction') api_opts.add('authorization-mode', auth_mode, strict=True) if get_version('kube-apiserver') < (1, 6): hookenv.log('Removing DefaultTolerationSeconds from admission-control') admission_control.remove('DefaultTolerationSeconds') if get_version('kube-apiserver') < (1, 7): hookenv.log('Removing Initializers from admission-control') admission_control.remove('Initializers') api_opts.add('admission-control', ','.join(admission_control), strict=True) # Add operator-provided arguments, this allows operators # to override defaults for arg in to_add: hookenv.log('Adding option: {} {}'.format(arg[0], arg[1])) # Make sure old value is gone api_opts.destroy(arg[0]) api_opts.add(arg[0], arg[1]) cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ') check_call(cmd) set_state('kube-apiserver.do-restart')
def report_blocked(): cfg = layer.options('hadoop-client') if not cfg.get('silent'): hookenv.status_set('blocked', 'Waiting for relation to Hadoop Plugin')
def format_and_mount_storage(): ''' This allows users to request persistent volumes from the cloud provider for the purposes of disaster recovery. ''' set_state('data.volume.attached') # Query juju for the information about the block storage device_info = storage_get() block = device_info['location'] bag = EtcdDatabag() bag.cluster = leader_get('cluster') # the databag has behavior that keeps the path updated. # Reference the default path from layer_options. etcd_opts = layer.options('etcd') # Split the tail of the path to mount the volume 1 level before # the data directory. tail = os.path.split(bag.etcd_data_dir)[0] if volume_is_mounted(block): hookenv.log('Device is already attached to the system.') hookenv.log('Refusing to take action against {}'.format(block)) return # Format the device in non-interactive mode cmd = ['mkfs.ext4', device_info['location'], '-F'] hookenv.log('Creating filesystem on {}'.format(device_info['location'])) hookenv.log('With command: {}'.format(' '.join(cmd))) check_call(cmd) # halt etcd to perform the data-store migration host.service_stop(bag.etcd_daemon) os.makedirs(tail, exist_ok=True) mount_volume(block, tail) # handle first run during early-attach storage, pre-config-changed hook. os.makedirs(bag.etcd_data_dir, exist_ok=True) # Only attempt migration if directory exists if os.path.isdir(etcd_opts['etcd_data_dir']): migrate_path = "{}/".format(etcd_opts['etcd_data_dir']) output_path = "{}/".format(bag.etcd_data_dir) cmd = ['rsync', '-azp', migrate_path, output_path] hookenv.log('Detected existing data, migrating to new location.') hookenv.log('With command: {}'.format(' '.join(cmd))) check_call(cmd) with open('/etc/fstab', 'r') as fp: contents = fp.readlines() found = 0 # scan fstab for the device for line in contents: if block in line: found = found + 1 # if device not in fstab, append so it persists through reboots if not found > 0: append = "{0} {1} ext4 defaults 0 0".format(block, tail) # noqa with open('/etc/fstab', 'a') as fp: fp.writelines([append]) # Finally re-render the configuration and resume operation render_config(bag) host.service_restart(bag.etcd_daemon)
def stop_running_web_service(): service_name = layer.options('lets-encrypt').get('service-name') if service_name and service_running(service_name): log('stopping running service: %s' % (service_name)) service_stop(service_name) return True
def configure_apiserver(etcd_connection_string, leader_etcd_version): api_opts = {} # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') client_cert_path = layer_options.get('client_certificate_path') client_key_path = layer_options.get('client_key_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') # at one point in time, this code would set ca-client-cert, # but this was removed. This was before configure_kubernetes_service # kept track of old arguments and removed them, so client-ca-cert # was able to hang around forever stored in the snap configuration. # This removes that stale configuration from the snap if it still # exists. api_opts['client-ca-file'] = 'null' if is_privileged(): api_opts['allow-privileged'] = 'true' set_state('kubernetes-master.privileged') else: api_opts['allow-privileged'] = 'false' remove_state('kubernetes-master.privileged') # Handle static options for now api_opts['service-cluster-ip-range'] = service_cidr() api_opts['min-request-timeout'] = '300' api_opts['v'] = '4' api_opts['tls-cert-file'] = server_cert_path api_opts['tls-private-key-file'] = server_key_path api_opts['kubelet-certificate-authority'] = ca_cert_path api_opts['kubelet-client-certificate'] = client_cert_path api_opts['kubelet-client-key'] = client_key_path api_opts['logtostderr'] = 'true' api_opts['insecure-bind-address'] = '127.0.0.1' api_opts['insecure-port'] = '8080' api_opts['storage-backend'] = leader_etcd_version api_opts['basic-auth-file'] = '/root/cdk/basic_auth.csv' api_opts['token-auth-file'] = '/root/cdk/known_tokens.csv' api_opts['service-account-key-file'] = '/root/cdk/serviceaccount.key' api_opts['kubelet-preferred-address-types'] = \ '[InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP]' etcd_dir = '/root/cdk/etcd' etcd_ca = os.path.join(etcd_dir, 'client-ca.pem') etcd_key = os.path.join(etcd_dir, 'client-key.pem') etcd_cert = os.path.join(etcd_dir, 'client-cert.pem') api_opts['etcd-cafile'] = etcd_ca api_opts['etcd-keyfile'] = etcd_key api_opts['etcd-certfile'] = etcd_cert api_opts['etcd-servers'] = etcd_connection_string admission_control_pre_1_9 = [ 'Initializers', 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount', 'ResourceQuota', 'DefaultTolerationSeconds' ] admission_control = [ 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount', 'PersistentVolumeLabel', 'DefaultStorageClass', 'DefaultTolerationSeconds', 'MutatingAdmissionWebhook', 'ValidatingAdmissionWebhook', 'ResourceQuota' ] auth_mode = hookenv.config('authorization-mode') if 'Node' in auth_mode: admission_control.append('NodeRestriction') api_opts['authorization-mode'] = auth_mode kube_version = get_version('kube-apiserver') if kube_version < (1, 6): hookenv.log('Removing DefaultTolerationSeconds from admission-control') admission_control_pre_1_9.remove('DefaultTolerationSeconds') if kube_version < (1, 7): hookenv.log('Removing Initializers from admission-control') admission_control_pre_1_9.remove('Initializers') if kube_version < (1, 9): api_opts['admission-control'] = ','.join(admission_control_pre_1_9) else: api_opts['admission-control'] = ','.join(admission_control) if kube_version > (1, 6) and \ hookenv.config('enable-metrics'): api_opts['requestheader-client-ca-file'] = ca_cert_path api_opts['requestheader-allowed-names'] = 'client' api_opts['requestheader-extra-headers-prefix'] = 'X-Remote-Extra-' api_opts['requestheader-group-headers'] = 'X-Remote-Group' api_opts['requestheader-username-headers'] = 'X-Remote-User' api_opts['proxy-client-cert-file'] = client_cert_path api_opts['proxy-client-key-file'] = client_key_path api_opts['enable-aggregator-routing'] = 'true' api_opts['client-ca-file'] = ca_cert_path configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args') restart_apiserver()
def configure_master_services(): ''' Add remaining flags for the master services and configure snaps to use them ''' api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') scheduler_opts = FlagManager('kube-scheduler') scheduler_opts.add('v', '2') # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') client_cert_path = layer_options.get('client_certificate_path') client_key_path = layer_options.get('client_key_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') if is_privileged(): api_opts.add('allow-privileged', 'true', strict=True) set_state('kubernetes-master.privileged') else: api_opts.add('allow-privileged', 'false', strict=True) remove_state('kubernetes-master.privileged') # Handle static options for now api_opts.add('service-cluster-ip-range', service_cidr()) api_opts.add('min-request-timeout', '300') api_opts.add('v', '4') api_opts.add('tls-cert-file', server_cert_path) api_opts.add('tls-private-key-file', server_key_path) api_opts.add('kubelet-certificate-authority', ca_cert_path) api_opts.add('kubelet-client-certificate', client_cert_path) api_opts.add('kubelet-client-key', client_key_path) api_opts.add('logtostderr', 'true') api_opts.add('insecure-bind-address', '127.0.0.1') api_opts.add('insecure-port', '8080') api_opts.add('storage-backend', 'etcd2') # FIXME: add etcd3 support admission_control = [ 'Initializers', 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount', 'ResourceQuota', 'DefaultTolerationSeconds' ] if get_version('kube-apiserver') < (1, 6): hookenv.log('Removing DefaultTolerationSeconds from admission-control') admission_control.remove('DefaultTolerationSeconds') if get_version('kube-apiserver') < (1, 7): hookenv.log('Removing Initializers from admission-control') admission_control.remove('Initializers') api_opts.add('admission-control', ','.join(admission_control), strict=True) # Default to 3 minute resync. TODO: Make this configureable? controller_opts.add('min-resync-period', '3m') controller_opts.add('v', '2') controller_opts.add('root-ca-file', ca_cert_path) controller_opts.add('logtostderr', 'true') controller_opts.add('master', 'http://127.0.0.1:8080') scheduler_opts.add('v', '2') scheduler_opts.add('logtostderr', 'true') scheduler_opts.add('master', 'http://127.0.0.1:8080') cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ') check_call(cmd) cmd = (['snap', 'set', 'kube-controller-manager'] + controller_opts.to_s().split(' ')) check_call(cmd) cmd = ['snap', 'set', 'kube-scheduler'] + scheduler_opts.to_s().split(' ') check_call(cmd)
def render_files(): '''Use jinja templating to render the docker-compose.yml and master.json file to contain the dynamic data for the configuration files.''' context = {} config = hookenv.config() # Add the charm configuration data to the context. context.update(config) # Update the context with extra values: arch, and networking information context.update({ 'arch': arch(), 'master_address': hookenv.unit_get('private-address'), 'public_address': hookenv.unit_get('public-address'), 'private_address': hookenv.unit_get('private-address') }) api_opts = FlagManager('kube-apiserver') controller_opts = FlagManager('kube-controller-manager') scheduler_opts = FlagManager('kube-scheduler') # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') client_cert_path = layer_options.get('client_certificate_path') client_key_path = layer_options.get('client_key_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') # Handle static options for now api_opts.add('--min-request-timeout', '300') api_opts.add('--v', '4') api_opts.add('--client-ca-file', ca_cert_path) api_opts.add('--tls-cert-file', server_cert_path) api_opts.add('--tls-private-key-file', server_key_path) api_opts.add('--kubelet-certificate-authority', ca_cert_path) api_opts.add('--kubelet-client-certificate', client_cert_path) api_opts.add('--kubelet-client-key', client_key_path) scheduler_opts.add('--v', '2') # Default to 3 minute resync. TODO: Make this configureable? controller_opts.add('--min-resync-period', '3m') controller_opts.add('--v', '2') controller_opts.add('--root-ca-file', ca_cert_path) context.update({ 'kube_apiserver_flags': api_opts.to_s(), 'kube_scheduler_flags': scheduler_opts.to_s(), 'kube_controller_manager_flags': controller_opts.to_s() }) # Render the configuration files that contains parameters for # the apiserver, scheduler, and controller-manager render_service('kube-apiserver', context) render_service('kube-controller-manager', context) render_service('kube-scheduler', context) # explicitly render the generic defaults file render('kube-defaults.defaults', '/etc/default/kube-defaults', context) # when files change on disk, we need to inform systemd of the changes call(['systemctl', 'daemon-reload']) call(['systemctl', 'enable', 'kube-apiserver']) call(['systemctl', 'enable', 'kube-controller-manager']) call(['systemctl', 'enable', 'kube-scheduler'])
def sorted_snap_opts(): opts = layer.options('snap') opts = sorted(opts.items(), key=lambda item: item[0] != 'core') opts = OrderedDict(opts) return opts
def report_waiting(hadoop): cfg = layer.options('hadoop-client') if not cfg.get('silent'): hookenv.status_set('waiting', 'Waiting for Plugin to become ready')
def client_present(client): dist = DistConfig(data=layer.options('livy')) rest_port = dist.port('livy') client.send_rest_port(rest_port) client.set_spark_started()
def bootstrap_charm_deps(): """ Set up the base charm dependencies so that the reactive system can run. """ # execd must happen first, before any attempt to install packages or # access the network, because sites use this hook to do bespoke # configuration and install secrets so the rest of this bootstrap # and the charm itself can actually succeed. This call does nothing # unless the operator has created and populated $CHARM_DIR/exec.d. execd_preinstall() venv = os.path.abspath('../.venv') vbin = os.path.join(venv, 'bin') vpip = os.path.join(vbin, 'pip') vpy = os.path.join(vbin, 'python') if os.path.exists('wheelhouse/.bootstrapped'): from charms import layer cfg = layer.options('basic') if cfg.get('use_venv') and '.venv' not in sys.executable: # activate the venv os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']]) reload_interpreter(vpy) return # bootstrap wheelhouse if os.path.exists('wheelhouse'): apt_install(['python3-pip', 'python3-setuptools', 'python3-yaml']) from charms import layer cfg = layer.options('basic') # include packages defined in layer.yaml apt_install(cfg.get('packages', [])) # if we're using a venv, set it up if cfg.get('use_venv'): if not os.path.exists(venv): apt_install(['python-virtualenv']) cmd = ['virtualenv', '-ppython3', '--never-download', venv] if cfg.get('include_system_packages'): cmd.append('--system-site-packages') check_call(cmd) os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']]) pip = vpip else: pip = 'pip3' # save a copy of system pip to prevent `pip3 install -U pip` # from changing it if os.path.exists('/usr/bin/pip'): shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save') # need newer pip, to fix spurious Double Requirement error: # https://github.com/pypa/pip/issues/56 check_call( [pip, 'install', '-U', '--no-index', '-f', 'wheelhouse', 'pip']) # install the rest of the wheelhouse deps check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse'] + glob('wheelhouse/*')) if not cfg.get('use_venv'): # restore system pip to prevent `pip3 install -U pip` # from changing it if os.path.exists('/usr/bin/pip.save'): shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip') os.remove('/usr/bin/pip.save') # flag us as having already bootstrapped so we don't do it again open('wheelhouse/.bootstrapped', 'w').close() # Ensure that the newly bootstrapped libs are available. # Note: this only seems to be an issue with namespace packages. # Non-namespace-package libs (e.g., charmhelpers) are available # without having to reload the interpreter. :/ reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0])
def configure_apiserver(etcd_connection_string, leader_etcd_version): api_opts = {} # Get the tls paths from the layer data. layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') client_cert_path = layer_options.get('client_certificate_path') client_key_path = layer_options.get('client_key_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') if is_privileged(): api_opts['allow-privileged'] = 'true' set_state('kubernetes-master.privileged') else: api_opts['allow-privileged'] = 'false' remove_state('kubernetes-master.privileged') # Handle static options for now api_opts['service-cluster-ip-range'] = service_cidr() api_opts['min-request-timeout'] = '300' api_opts['v'] = '4' api_opts['tls-cert-file'] = server_cert_path api_opts['tls-private-key-file'] = server_key_path api_opts['kubelet-certificate-authority'] = ca_cert_path api_opts['kubelet-client-certificate'] = client_cert_path api_opts['kubelet-client-key'] = client_key_path api_opts['logtostderr'] = 'true' api_opts['insecure-bind-address'] = '127.0.0.1' api_opts['insecure-port'] = '8080' api_opts['storage-backend'] = leader_etcd_version api_opts['basic-auth-file'] = '/root/cdk/basic_auth.csv' api_opts['token-auth-file'] = '/root/cdk/known_tokens.csv' api_opts['service-account-key-file'] = '/root/cdk/serviceaccount.key' api_opts['kubelet-preferred-address-types'] = \ '[InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP]' etcd_dir = '/root/cdk/etcd' etcd_ca = os.path.join(etcd_dir, 'client-ca.pem') etcd_key = os.path.join(etcd_dir, 'client-key.pem') etcd_cert = os.path.join(etcd_dir, 'client-cert.pem') api_opts['etcd-cafile'] = etcd_ca api_opts['etcd-keyfile'] = etcd_key api_opts['etcd-certfile'] = etcd_cert api_opts['etcd-servers'] = etcd_connection_string admission_control = [ 'Initializers', 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount', 'ResourceQuota', 'DefaultTolerationSeconds' ] auth_mode = hookenv.config('authorization-mode') if 'Node' in auth_mode: admission_control.append('NodeRestriction') api_opts['authorization-mode'] = auth_mode if get_version('kube-apiserver') < (1, 6): hookenv.log('Removing DefaultTolerationSeconds from admission-control') admission_control.remove('DefaultTolerationSeconds') if get_version('kube-apiserver') < (1, 7): hookenv.log('Removing Initializers from admission-control') admission_control.remove('Initializers') api_opts['admission-control'] = ','.join(admission_control) configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args') restart_apiserver()