def update_nrpe_config(): # python-dbus is used by check_upstart_job apt_install('python-dbus') log('Refreshing nagios checks') if os.path.isdir(NAGIOS_PLUGINS): rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'check_ceph_status.py'), os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py')) script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh') rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'collect_ceph_status.sh'), script) cronjob = "{} root {}\n".format('*/5 * * * *', script) write_file(STATUS_CRONFILE, cronjob) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe_setup.add_check( shortname="ceph", description='Check Ceph health {%s}' % current_unit, check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE)) nrpe_setup.write()
def build_script(script_name, relation): # Install a wrapper to psql that connects it to the desired database # by default. One wrapper per unit per relation. script_path = os.path.abspath(os.path.join(SCRIPT_DIR, script_name)) pgpass_path = os.path.abspath(os.path.join(PGPASS_DIR, script_name)) script = dedent("""\ #!/bin/sh exec env \\ PGHOST={host} PGPORT={port} PGDATABASE={database} \\ PGUSER={user} PGPASSFILE={pgpass} \\ psql $@ """).format(host=relation['host'], port=relation['port'], database=relation.get('database', ''), user=relation['user'], pgpass=pgpass_path) log("Generating wrapper {}".format(script_path), INFO) host.write_file( script_path, script.encode('UTF8'), owner="ubuntu", group="ubuntu", perms=0o700) # The wrapper requires access to the password, stored in a .pgpass # file so it isn't exposed in an environment variable or on the # command line. pgpass = "******".format( user=relation['user'], password=relation['password']) host.write_file( pgpass_path, pgpass.encode('UTF8'), owner="ubuntu", group="ubuntu", perms=0o400)
def test_writes_content_to_a_file(self, os_, log, getgrnam, getpwnam): # Curly brackets here demonstrate that we are *not* rendering # these strings with Python's string formatting. This is a # change from the original behavior per Bug #1195634. uid = 123 gid = 234 owner = 'some-user-{foo}' group = 'some-group-{bar}' path = '/some/path/{baz}' contents = b'what is {juju}' perms = 0o644 fileno = 'some-fileno' getpwnam.return_value.pw_uid = uid getgrnam.return_value.gr_gid = gid with patch_open() as (mock_open, mock_file): mock_file.fileno.return_value = fileno host.write_file(path, contents, owner=owner, group=group, perms=perms) getpwnam.assert_called_with('some-user-{foo}') getgrnam.assert_called_with('some-group-{bar}') mock_open.assert_called_with('/some/path/{baz}', 'wb') os_.fchown.assert_called_with(fileno, uid, gid) os_.fchmod.assert_called_with(fileno, perms) mock_file.write.assert_called_with(b'what is {juju}')
def configure_cassandra_env(): cassandra_env_path = helpers.get_cassandra_env_file() assert os.path.exists(cassandra_env_path) helpers.maybe_backup(cassandra_env_path) overrides = [ ('max_heap_size', re.compile(r'^#?(MAX_HEAP_SIZE)=(.*)$', re.M)), ('heap_newsize', re.compile(r'^#?(HEAP_NEWSIZE)=(.*)$', re.M)), # We don't allow this to be overridden to ensure that tools # will find JMX using the default port. # ('jmx_port', re.compile(r'^#?(JMX_PORT)=(.*)$', re.M)), ] with open(cassandra_env_path, 'r') as f: env = f.read() config = hookenv.config() for key, regexp in overrides: if config[key]: val = shlex.quote(str(config[key])) env = regexp.sub(r'\g<1>={}'.format(val), env) else: env = regexp.sub(r'#\1=\2', env) host.write_file(cassandra_env_path, env.encode('UTF-8'))
def build_script(script_name, relation): # Install a wrapper to psql that connects it to the desired database # by default. One wrapper per unit per relation. script_path = os.path.abspath(os.path.join(SCRIPT_DIR, script_name)) pgpass_path = os.path.abspath(os.path.join(PGPASS_DIR, script_name)) script = dedent("""\ #!/bin/sh exec env \\ PGHOST={host} PGPORT={port} PGDATABASE={database} \\ PGUSER={user} PGPASSFILE={pgpass} \\ psql $@ """).format( host=relation["host"], port=relation["port"], database=relation.get("database", ""), user=relation["user"], pgpass=pgpass_path, ) log("Generating wrapper {}".format(script_path), INFO) host.write_file(script_path, script.encode("UTF8"), owner="ubuntu", group="ubuntu", perms=0o700) # The wrapper requires access to the password, stored in a .pgpass # file so it isn't exposed in an environment variable or on the # command line. pgpass = "******".format(user=relation["user"], password=relation["password"]) host.write_file(pgpass_path, pgpass.encode("UTF8"), owner="ubuntu", group="ubuntu", perms=0o400)
def inhibit_default_cluster_creation(): """Stop the PostgreSQL packages from creating the default cluster. We can't use the default cluster as it is likely created with an incorrect locale and without options such as data checksumming. Allowing the package to create the cluster is problematic, as the charm can't really tell between a cluster created by package installation that can be safely destroyed, and a cluster left from a previous installation that might contain precious data that we can't risk destroying. """ if host.get_distrib_codename() == "xenial": # Xenial's postgresql-common package does not support includes in # cluster configuration files. os.makedirs("/etc/postgresql-common", mode=0o755, exist_ok=True) content = "\n".join( [ "ssl = on", "stats_temp_directory = '/var/run/postgresql/%v-%c.pg_stat_tmp'", "log_line_prefix = '%%t [%%p-%%l] %%q%%u@%%d '", "create_main_cluster = false", ] ) host.write_file("/etc/postgresql-common/createcluster.conf", content, perms=0o444) return path = createcluster_conf_path() if os.path.exists(path): return os.makedirs(os.path.dirname(path), mode=0o755, exist_ok=True) host.write_file(path, "create_main_cluster = false", perms=0o444)
def create_repo(git): username = git.get_remote('username') service = remote_service_name() repo_path = os.path.join(repo_root(), service+'.git') host.add_group(username) host.adduser(username, password=host.pwgen(32), shell='/usr/bin/git-shell') ssh_public_key = git.get_remote('ssh-public-key') dotssh_dir = '/home/{}/.ssh/'.format(username) host.mkdir(dotssh_dir, username, username, 0o700) host.write_file(dotssh_dir + 'authorized_keys', ssh_public_key.encode('utf-8'), username, username, 0o400) host.mkdir(repo_path, group=username, perms=0o770) subprocess.check_call(['git', 'init', '--bare', '--shared=group', repo_path]) # Create server-side hook that will inform # clients whenever changes are committed. create_git_hooks(repo_path, username) # Make the repo owned by <username>. chown_repo(repo_path, username) # TODO(axw) read and publish all host keys. ssh_host_keys = [open(SSH_HOST_RSA_KEY).read()] git.configure(repo_path, ssh_host_keys) set_state('git.repo.created') status_set('active', '')
def apache_input(apache): template = """ [[inputs.apache]] urls = {{ urls }} """ config_path = '{}/{}.conf'.format(get_configs_dir(), 'apache') port = '8080' vhost = render(source='apache-server-status.tmpl', templates_dir=get_templates_dir(), target=None, context={'port': port}) relation_info = {"ports": port, "domain": "apache-status", "enabled": True, "site_config": vhost, "site_modules": "status"} urls = [] rels = hookenv.relations_of_type('apache') for rel in rels: hookenv.relation_set(rel['__relid__'], relation_settings=relation_info) addr = rel['private-address'] url = 'http://{}:{}/server-status?auto'.format(addr, port) urls.append(url) if urls: context = {"urls": json.dumps(urls)} input_config = render_template(template, context) + \ render_extra_options("inputs", "apache") hookenv.log("Updating {} plugin config file".format('apache')) host.write_file(config_path, input_config.encode('utf-8')) set_state('plugins.apache.configured') elif os.path.exists(config_path): os.unlink(config_path)
def get_rabbit_password_on_disk(username, password=None, local=False): ''' Retrieve, generate or store a rabbit password for the provided username on disk''' if local: _passwd_file = _local_named_passwd.format(service_name(), username) else: _passwd_file = _named_passwd.format(service_name(), username) _password = None if os.path.exists(_passwd_file): with open(_passwd_file, 'r') as passwd: _password = passwd.read().strip() else: mkdir(os.path.dirname(_passwd_file), owner=RABBIT_USER, group=RABBIT_USER, perms=0o775) os.chmod(os.path.dirname(_passwd_file), 0o775) _password = password or pwgen(length=64) write_file(_passwd_file, _password, owner=RABBIT_USER, group=RABBIT_USER, perms=0o660) return _password
def ensure_files(): """ Ensures all the required files or directories are present before it starts the datamover service. """ usr = DM_EXT_USR grp = DM_EXT_GRP dm_bin = '/usr/bin/tvault-contego' log_path = '/var/log/nova' log_file = '{}/tvault-contego.log'.format(log_path) conf_path = '/etc/tvault-contego' # Creates log directory if doesn't exists mkdir(log_path, owner=usr, group=grp, perms=501, force=True) write_file(log_file, '', owner=usr, group=grp, perms=501) if not check_presence(dm_bin): log("TrilioVault Datamover binary is not present") status_set( 'blocked', 'TrilioVault Datamover binary is not present on TVault VM') return False # Creates conf directory if doesn't exists mkdir(conf_path, owner=usr, group=grp, perms=501, force=True) return True
def store_cqlshrc_credentials(owner, username, password): cqlshrc_path = get_cqlshrc_path(owner) hookenv.log('Storing credentials for {} in {}'.format(owner, cqlshrc_path)) c = config() cqlshrc = configparser.ConfigParser(interpolation=None) cqlshrc.read([cqlshrc_path]) # We set items separately, rather than together, so that we have a # defined order for the ConfigParser to preserve and the tests to # rely on. cqlshrc.setdefault('authentication', {}) cqlshrc['authentication']['username'] = username cqlshrc['authentication']['password'] = password cqlshrc.setdefault('connection', {}) cqlshrc['connection']['hostname'] = rpc_broadcast_ip_address() if get_cassandra_version().startswith('2.0'): cqlshrc['connection']['port'] = str(c['rpc_port']) else: cqlshrc['connection']['port'] = str(c['native_transport_port']) ini = io.StringIO() cqlshrc.write(ini) host.mkdir(os.path.dirname(cqlshrc_path), perms=0o700, owner=owner) host.write_file(cqlshrc_path, ini.getvalue().encode('UTF-8'), perms=0o400, owner=owner)
def prometheus_client(prometheus): template = """ [[outputs.prometheus_client]] listen = "{{ listen }}" """ if get_prometheus_port(): hookenv.log("Prometheus configured globally, skipping plugin setup") prometheus.configure(get_prometheus_port()) # bail out, nothing more need to be configured here return port = 9126 extra_options = get_extra_options() options = extra_options['outputs'].get('prometheus-client', {}) listen = options.pop('listen', None) if listen is not None: hookenv.log("Configuring prometheus_client plugin to listen on: '{}'".format(listen)) port = int(listen.split(":", 1)[1]) else: listen = ":{}".format(port) check_port("prometheus_output", get_prometheus_port()) prometheus.configure(port) config_path = '{}/{}.conf'.format(get_configs_dir(), 'prometheus-client') hookenv.log("Updating {} plugin config file".format('prometheus-client')) context = {"listen": listen} content = render_template(template, context) + \ render_extra_options("outputs", "prometheus_client", extra_options=extra_options) host.write_file(config_path, content.encode('utf-8')) set_state('plugins.prometheus-client.configured')
def get_mysql_password_on_disk(self, username=None, password=None): """Retrieve, generate or store a mysql password for the provided username on disk.""" if username: template = self.user_passwd_file_template passwd_file = template.format(username) else: passwd_file = self.root_passwd_file_template _password = None if os.path.exists(passwd_file): log("Using existing password file '%s'" % passwd_file, level=DEBUG) with open(passwd_file, 'r') as passwd: _password = passwd.read().strip() else: log("Generating new password file '%s'" % passwd_file, level=DEBUG) if not os.path.isdir(os.path.dirname(passwd_file)): # NOTE: need to ensure this is not mysql root dir (which needs # to be mysql readable) mkdir(os.path.dirname(passwd_file), owner='root', group='root', perms=0o770) # Force permissions - for some reason the chmod in makedirs # fails os.chmod(os.path.dirname(passwd_file), 0o770) _password = password or pwgen(length=32) write_file(passwd_file, _password, owner='root', group='root', perms=0o660) return _password
def git_pre_install(): """Perform glance pre-install setup.""" dirs = [ '/var/lib/glance', '/var/lib/glance/images', '/var/lib/glance/image-cache', '/var/lib/glance/image-cache/incomplete', '/var/lib/glance/image-cache/invalid', '/var/lib/glance/image-cache/queue', '/var/log/glance', ] logs = [ '/var/log/glance/glance-api.log', '/var/log/glance/glance-registry.log', ] adduser('glance', shell='/bin/bash', system_user=True) add_group('glance', system_group=True) add_user_to_group('glance', 'glance') for d in dirs: mkdir(d, owner='glance', group='glance', perms=0755, force=False) for l in logs: write_file(l, '', owner='glance', group='glance', perms=0600)
def write_cqlshrc(self, owner): '''Helper to write a cqlsh configuration file Creates a ~user/.cassandra/cqlshrc for interactive use with the cqlsh command line tool. ''' cqlshrc_path = os.path.expanduser( '~{}/.cassandra/cqlshrc'.format(owner)) details = self.details if not details: if os.path.exists(cqlshrc_path): os.remove(cqlshrc_path) return first = details[0] cqlshrc = configparser.ConfigParser(interpolation=None) cqlshrc.read([cqlshrc_path]) cqlshrc.setdefault('authentication', {}) if first.username: cqlshrc['authentication']['username'] = first.username cqlshrc['authentication']['password'] = first.password cqlshrc.setdefault('connection', {}) cqlshrc['connection']['hostname'] = first.hosts.pop() cqlshrc['connection']['port'] = str(first.native_transport_port) ini = io.StringIO() cqlshrc.write(ini) host.mkdir(os.path.dirname(cqlshrc_path), perms=0o700, owner=owner) host.write_file(cqlshrc_path, ini.getvalue().encode('UTF-8'), perms=0o600, owner=owner)
def configure_cert(self, path, cert, key, cn=None): """Write out TLS certificate and key to disk. :param path: Directory to place files in :type path: str :param cert: TLS Certificate :type cert: str :param key: TLS Key :type key: str :param cn: Canonical name for service :type cn: Option[None, str] """ if not cn: cn = self.get_default_cn() ch_host.mkdir(path=path) if cn: cert_filename = 'cert_{}'.format(cn) key_filename = 'key_{}'.format(cn) else: cert_filename = 'cert' key_filename = 'key' ch_host.write_file(path=os.path.join(path, cert_filename), content=cert.encode('utf-8'), group=self.group, perms=0o640) ch_host.write_file(path=os.path.join(path, key_filename), content=key.encode('utf-8'), group=self.group, perms=0o640)
def reset_limits(): '''Set /etc/security/limits.d correctly for Ubuntu, so the startup scripts don't emit a spurious warning. Per Cassandra documentation, Ubuntu needs some extra twiddling in /etc/security/limits.d. I have no idea why the packages don't do this, since they are already setting limits for the cassandra user correctly. The real bug is that the limits of the user running the startup script are being checked, rather than the limits of the user that will actually run the process. ''' contents = dedent('''\ # Maintained by Juju root - memlock unlimited root - nofile 100000 root - nproc 32768 root - as unlimited ubuntu - memlock unlimited ubuntu - nofile 100000 ubuntu - nproc 32768 ubuntu - as unlimited ''') host.write_file('/etc/security/limits.d/cassandra-charm.conf', contents.encode('US-ASCII'))
def mongodb_input(mongodb): template = """ [[inputs.mongodb]] servers = {{ servers }} """ rels = hookenv.relations_of_type('mongodb') mongo_addresses = [] for rel in rels: addr = rel['private-address'] port = rel.get('port', None) if port: mongo_address = '{}:{}'.format(addr, port) else: mongo_address = addr mongo_addresses.append(mongo_address) config_path = '{}/{}.conf'.format(get_configs_dir(), 'mongodb') if mongo_addresses: context = {"servers": json.dumps(mongo_addresses)} input_config = render_template(template, context) + \ render_extra_options("inputs", "mongodb") hookenv.log("Updating {} plugin config file".format('mongodb')) host.write_file(config_path, input_config.encode('utf-8')) set_state('plugins.mongodb.configured') elif os.path.exists(config_path): os.unlink(config_path)
def configure_cert(self, cert, key, cn=None): """Configure service SSL cert and key Write out service SSL certificate and key for Apache. @param cert string SSL Certificate @param key string SSL Key @param cn string Canonical name for service """ if os_utils.snap_install_requested(): ssl_dir = '/var/snap/{snap_name}/common/etc/nginx/ssl'.format( snap_name=self.primary_snap) else: ssl_dir = os.path.join('/etc/apache2/ssl/', self.name) if not cn: cn = self.get_default_cn() ch_host.mkdir(path=ssl_dir) if cn: cert_filename = 'cert_{}'.format(cn) key_filename = 'key_{}'.format(cn) else: cert_filename = 'cert' key_filename = 'key' ch_host.write_file(path=os.path.join(ssl_dir, cert_filename), content=cert.encode('utf-8')) ch_host.write_file(path=os.path.join(ssl_dir, key_filename), content=key.encode('utf-8'))
def install_certs(ssl_dir, certs, chain=None, user='******', group='root'): """Install the certs passed into the ssl dir and append the chain if provided. :param ssl_dir: str Directory to create symlinks in :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} :param chain: str Chain to be appended to certs :param user: (Optional) Owner of certificate files. Defaults to 'root' :type user: str :param group: (Optional) Group of certificate files. Defaults to 'root' :type group: str """ for cn, bundle in certs.items(): cert_filename = 'cert_{}'.format(cn) key_filename = 'key_{}'.format(cn) cert_data = bundle['cert'] if chain: # Append chain file so that clients that trust the root CA will # trust certs signed by an intermediate in the chain cert_data = cert_data + os.linesep + chain write_file( path=os.path.join(ssl_dir, cert_filename), owner=user, group=group, content=cert_data, perms=0o640) write_file( path=os.path.join(ssl_dir, key_filename), owner=user, group=group, content=bundle['key'], perms=0o640)
def influxdb_api_output(influxdb): required_keys = ['hostname', 'port', 'user', 'password'] rels = hookenv.relations_of_type('influxdb-api') endpoints = [] user = None password = None for rel in rels: if all([rel.get(key) for key in required_keys]): endpoints.append("http://{}:{}".format(rel['hostname'], rel['port'])) if user is None: user = rel['user'] if password is None: password = rel['password'] config_path = '{}/{}.conf'.format(get_configs_dir(), 'influxdb-api') if endpoints: hookenv.log("Updating {} plugin config file".format('influxdb-api')) content = render(source='influxdb-api.conf.tmpl', target=None, templates_dir=get_templates_dir(), context={'urls': json.dumps(endpoints), 'username': '******'.format(user), 'password': '******'.format(password)}) extra_opts = render_extra_options("outputs", "influxdb") host.write_file(config_path, '\n'.join([content, extra_opts]).encode('utf-8')) set_state('plugins.influxdb-api.configured') elif os.path.exists(config_path): os.unlink(config_path)
def update_nrpe_config(): # python-dbus is used by check_upstart_job apt_install('python-dbus') log('Refreshing nagios checks') if os.path.isdir(NAGIOS_PLUGINS): rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'check_ceph_status.py'), os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py')) script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh') rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'collect_ceph_status.sh'), script) cronjob = "{} root {}\n".format('*/5 * * * *', script) write_file(STATUS_CRONFILE, cronjob) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe_setup.add_check( shortname="ceph", description='Check Ceph health {%s}' % current_unit, check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE) ) nrpe_setup.write()
def emit_systemd_overrides_file(): """Generate the systemd overrides file With Start and Stop timeout values Note: (David Ames) Bug#1654403 Work around May be removed if bug is resolved If timeout value is set to -1 pass infinity """ if not init_is_systemd(): return stop_timeout = int(config('service_stop_timeout')) if stop_timeout < 0: stop_timeout = 'infinity' start_timeout = int(config('service_start_timeout')) if start_timeout < 0: start_timeout = 'infinity' systemd_overrides_context = {'service_stop_timeout': stop_timeout, 'service_start_timeout': start_timeout, } for service in ['corosync', 'pacemaker']: overrides_dir = SYSTEMD_OVERRIDES_DIR.format(service) overrides_file = SYSTEMD_OVERRIDES_FILE.format(overrides_dir) if not os.path.isdir(overrides_dir): os.mkdir(overrides_dir) write_file(path=overrides_file, content=render_template('systemd-overrides.conf', systemd_overrides_context)) # Update systemd with the new information subprocess.check_call(['systemctl', 'daemon-reload'])
def configure_admin(self): """Configure the admin user.""" hookenv.log("Configuring user for jenkins") admin = self._admin_data() api = Api(packages=self._packages) api.update_password(admin.username, admin.password) # Save the password to a file. It's not used directly by this charm # but it's convenient for integration with third-party tools. host.write_file(paths.ADMIN_PASSWORD, admin.password.encode("utf-8"), owner="root", group="root", perms=0o0600) if not os.path.exists(paths.LAST_EXEC): # This mean it's the very first time we configure the user, # and we want to create this file in order to avoid Jenkins # presenting the setup wizard. host.write_file(paths.LAST_EXEC, "{}\n".format(api.version()).encode("utf-8"), owner="jenkins", group="nogroup", perms=0o0600)
def emit_systemd_overrides_file(): """Generate the systemd overrides file With Start and Stop timeout values Note: (David Ames) Bug#1654403 Work around May be removed if bug is resolved If timeout value is set to -1 pass infinity """ if not init_is_systemd(): return stop_timeout = int(config('service_stop_timeout')) if stop_timeout < 0: stop_timeout = 'infinity' start_timeout = int(config('service_start_timeout')) if start_timeout < 0: start_timeout = 'infinity' systemd_overrides_context = { 'service_stop_timeout': stop_timeout, 'service_start_timeout': start_timeout, } for service in ['corosync', 'pacemaker']: overrides_dir = SYSTEMD_OVERRIDES_DIR.format(service) overrides_file = SYSTEMD_OVERRIDES_FILE.format(overrides_dir) if not os.path.isdir(overrides_dir): os.mkdir(overrides_dir) write_file(path=overrides_file, content=render_template('systemd-overrides.conf', systemd_overrides_context)) # Update systemd with the new information subprocess.check_call(['systemctl', 'daemon-reload'])
def elasticsearch_input(es): template = """ [[inputs.elasticsearch]] servers = {{ servers }} """ hosts = [] rels = hookenv.relations_of_type('elasticsearch') for rel in rels: es_host = rel.get('host') port = rel.get('port') if not es_host or not port: hookenv.log('No host received for relation: {}.'.format(rel)) continue hosts.append("http://{}:{}".format(es_host, port)) config_path = '{}/{}.conf'.format(get_configs_dir(), 'elasticsearch') if hosts: context = {"servers": json.dumps(hosts)} input_config = render_template(template, context) + \ render_extra_options("inputs", "elasticsearch") hookenv.log("Updating {} plugin config file".format('elasticsearch')) host.write_file(config_path, input_config.encode('utf-8')) set_state('plugins.elasticsearch.configured') elif os.path.exists(config_path): os.unlink(config_path) remove_state('plugins.elasticsearch.configured')
def configure_cert(self, cert, key, cn=None): """Configure service SSL cert and key Write out service SSL certificate and key for Apache. @param cert string SSL Certificate @param key string SSL Key @param cn string Canonical name for service """ if os_utils.snap_install_requested(): ssl_dir = '/var/snap/{snap_name}/common/etc/nginx/ssl'.format( snap_name=self.primary_snap) else: ssl_dir = os.path.join('/etc/apache2/ssl/', self.name) if not cn: cn = self.get_default_cn() ch_host.mkdir(path=ssl_dir) if cn: cert_filename = 'cert_{}'.format(cn) key_filename = 'key_{}'.format(cn) else: cert_filename = 'cert' key_filename = 'key' ch_host.write_file(path=os.path.join(ssl_dir, cert_filename), content=cert.encode('utf-8'), group=self.group, perms=0o640) ch_host.write_file(path=os.path.join(ssl_dir, key_filename), content=key.encode('utf-8'), group=self.group, perms=0o640)
def install_certs(ssl_dir, certs, chain=None, user='******', group='root'): """Install the certs passed into the ssl dir and append the chain if provided. :param ssl_dir: str Directory to create symlinks in :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} :param chain: str Chain to be appended to certs :param user: (Optional) Owner of certificate files. Defaults to 'root' :type user: str :param group: (Optional) Group of certificate files. Defaults to 'root' :type group: str """ for cn, bundle in certs.items(): cert_filename = 'cert_{}'.format(cn) key_filename = 'key_{}'.format(cn) cert_data = bundle['cert'] if chain: # Append chain file so that clients that trust the root CA will # trust certs signed by an intermediate in the chain cert_data = cert_data + os.linesep + chain write_file(path=os.path.join(ssl_dir, cert_filename), owner=user, group=group, content=cert_data, perms=0o640) write_file(path=os.path.join(ssl_dir, key_filename), owner=user, group=group, content=bundle['key'], perms=0o640)
def configure_extra_plugins(): config = hookenv.config() plugins = config['extra_plugins'] if plugins: config_path = '{}/extra_plugins.conf'.format(get_configs_dir()) host.write_file(config_path, plugins.encode('utf-8')) set_state('extra_plugins.configured')
def update_nrpe_config(): # python-dbus is used by check_upstart_job apt_install(['python-dbus', 'lockfile-progs']) log('Refreshing nagios checks') if os.path.isdir(NAGIOS_PLUGINS): rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'check_ceph_status.py'), os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py')) script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh') rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'collect_ceph_status.sh'), script) cronjob = "{} root {}\n".format('*/5 * * * *', script) write_file(STATUS_CRONFILE, cronjob) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) check_cmd = 'check_ceph_status.py -f {} --degraded_thresh {}' \ ' --misplaced_thresh {}' \ ' --recovery_rate {}'.format(STATUS_FILE, config('nagios_degraded_thresh'), config('nagios_misplaced_thresh'), config('nagios_recovery_rate')) if config('nagios_raise_nodeepscrub'): check_cmd = check_cmd + ' --raise_nodeepscrub' nrpe_setup.add_check( shortname="ceph", description='Check Ceph health {{{}}}'.format(current_unit), check_cmd=check_cmd) nrpe_setup.write()
def copy_conf_to_policyd(): """Walk the conf_dir and copy everything into the policy_dir. This is used after processing the policy.d resource file to put the package and templated policy files in DASHBOARD_PKG_DIR/conf/ into the /etc/openstack-dashboard/policy.d/ """ log("policyd: copy files from conf to /etc/openstack-dashboard/policy.d", level=INFO) conf_dir = os.path.join(DASHBOARD_PKG_DIR, 'conf') conf_parts_count = len(conf_dir.split(os.path.sep)) policy_dir = policyd.policyd_dir_for('openstack-dashboard') for root, dirs, files in os.walk(conf_dir): # make _root relative to the conf_dir _root = os.path.sep.join(root.split(os.path.sep)[conf_parts_count:]) # make any dirs necessary for d in dirs: _dir = os.path.join(policy_dir, _root, d) if not os.path.exists(_dir): mkdir(_dir, owner='horizon', group='horizon', perms=0o775) # now copy the files. for f in files: source = os.path.join(conf_dir, _root, f) dest = os.path.join(policy_dir, _root, f) with open(source, 'r') as fh: content = fh.read() write_file(dest, content, 'horizon', 'horizon') log("...done.", level=INFO)
def render_config(self, restart_trigger): """Render the domain specific LDAP configuration for the application """ checksum = ch_host.file_hash(self.configuration_file) core.templating.render(source=KEYSTONE_CONF_TEMPLATE, template_loader=os_templating.get_loader( 'templates/', self.release), target=self.configuration_file, context=self.adapters_instance) tmpl_changed = (checksum != ch_host.file_hash(self.configuration_file)) cert = hookenv.config('tls-ca-ldap') cert_changed = False if cert: ca_file = self.options.backend_ca_file old_cert_csum = ch_host.file_hash(ca_file) ch_host.write_file(ca_file, cert, owner='root', group='root', perms=0o644) cert_csum = ch_host.file_hash(ca_file) cert_changed = (old_cert_csum != cert_csum) if tmpl_changed or cert_changed: restart_trigger()
def update_nrpe_config(): # python-dbus is used by check_upstart_job apt_install(['python-dbus', 'lockfile-progs']) log('Refreshing nagios checks') if os.path.isdir(NAGIOS_PLUGINS): rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'check_ceph_status.py'), os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py')) script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh') rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'collect_ceph_status.sh'), script) cronjob = "{} root {}\n".format('*/5 * * * *', script) write_file(STATUS_CRONFILE, cronjob) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) check_cmd = 'check_ceph_status.py -f {} --degraded_thresh {}' \ ' --misplaced_thresh {}' \ ' --recovery_rate {}'.format(STATUS_FILE, config('nagios_degraded_thresh'), config('nagios_misplaced_thresh'), config('nagios_recovery_rate')) if config('nagios_raise_nodeepscrub'): check_cmd = check_cmd + ' --raise_nodeepscrub' nrpe_setup.add_check( shortname="ceph", description='Check Ceph health {{{}}}'.format(current_unit), check_cmd=check_cmd) if config('nagios_additional_checks'): additional_critical = config('nagios_additional_checks_critical') x = ast.literal_eval(config('nagios_additional_checks')) for key, value in x.items(): name = "ceph-{}".format(key.replace(" ", "")) log("Adding check {}".format(name)) check_cmd = 'check_ceph_status.py -f {}' \ ' --additional_check \\\"{}\\\"' \ ' {}'.format(STATUS_FILE, value, "--additional_check_critical" if additional_critical is True else "") nrpe_setup.add_check( shortname=name, description='Additional Ceph checks {{{}}}'.format( current_unit), check_cmd=check_cmd) if config('nagios_check_num_osds'): check_cmd = 'check_ceph_status.py -f {} --check_num_osds'.format( STATUS_FILE) nrpe_setup.add_check( shortname='ceph_num_osds', description='Check whether all OSDs are up and in', check_cmd=check_cmd) nrpe_setup.write()
def render(source, target, context, owner='root', group='root', perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): """ Render a template. The `source` path, if not absolute, is relative to the `templates_dir`. The `target` path should be absolute. It can also be `None`, in which case no file will be written. The context should be a dict containing the values to be replaced in the template. The `owner`, `group`, and `perms` options will be passed to `write_file`. If omitted, `templates_dir` defaults to the `templates` folder in the charm. The rendered template will be written to the file as well as being returned as a string. Note: Using this requires python-jinja2; if it is not installed, calling this will attempt to use charmhelpers.fetch.apt_install to install it. """ try: from jinja2 import FileSystemLoader, Environment, exceptions except ImportError: try: from charmhelpers.fetch import apt_install except ImportError: hookenv.log('Could not import jinja2, and could not import ' 'charmhelpers.fetch to install it', level=hookenv.ERROR) raise apt_install('python-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions if template_loader: template_env = Environment(loader=template_loader) else: if templates_dir is None: templates_dir = os.path.join(hookenv.charm_dir(), 'templates') template_env = Environment(loader=FileSystemLoader(templates_dir)) try: source = source template = template_env.get_template(source) except exceptions.TemplateNotFound as e: hookenv.log('Could not load template %s from %s.' % (source, templates_dir), level=hookenv.ERROR) raise e content = template.render(context) if target is not None: target_dir = os.path.dirname(target) if not os.path.exists(target_dir): # This is a terrible default directory permission, as the file # or its siblings will often contain secrets. host.mkdir(os.path.dirname(target), owner, group, perms=0o755) host.write_file(target, content.encode(encoding), owner, group, perms) return content
def secrets_storage_changed(): vault_ca = relation_get('vault_ca') if vault_ca: vault_ca = base64.decodestring(json.loads(vault_ca).encode()) write_file('/usr/local/share/ca-certificates/vault-ca.crt', vault_ca, perms=0o644) subprocess.check_call(['update-ca-certificates', '--fresh']) prepare_disks_and_activate()
def secrets_storage_changed(): vault_ca = relation_get('vault_ca') if vault_ca: vault_ca = base64.decodebytes(json.loads(vault_ca).encode()) write_file('/usr/local/share/ca-certificates/vault-ca.crt', vault_ca, perms=0o644) subprocess.check_call(['update-ca-certificates', '--fresh']) prepare_disks_and_activate()
def ensure_files(): ''' Ensures PLUMgrid specific files exist before templates are written. ''' write_file(SUDOERS_CONF, "\nnova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n", owner='root', group='root', perms=0o644) _exec_cmd(cmd=['rm', '-f', IFC_LIST_GW])
def secrets_storage_changed(): vault_ca = relation_get('vault_ca') if vault_ca: vault_ca = base64.decodebytes(json.loads(vault_ca).encode()) write_file('/usr/local/share/ca-certificates/vault-ca.crt', vault_ca, perms=0o644) subprocess.check_call(['update-ca-certificates', '--fresh']) configure_local_ephemeral_storage()
def save_file(path, data, perms=0o400): if data: fdir = os.path.dirname(path) if not os.path.exists(fdir): os.makedirs(fdir) write_file(path, data, perms=perms) elif os.path.exists(path): os.remove(path)
def emit_corosync_conf(): corosync_conf_context = get_corosync_conf() if corosync_conf_context: write_file(path=COROSYNC_CONF, content=render_template('corosync.conf', corosync_conf_context)) return True return False
def create_snap_proxy_conf(path, proxy): host.mkdir(os.path.dirname(path)) content = dedent('''\ # Managed by Juju [Service] Environment=http_proxy={} Environment=https_proxy={} ''').format(proxy, proxy) host.write_file(path, content.encode())
def create_snap_proxy_conf(path, proxy): host.mkdir(os.path.dirname(path)) content = dedent('''\ # Managed by Juju [Service] ''') for proxy_key, proxy_value in proxy.items(): content += 'Environment={}={}\n'.format(proxy_key, proxy_value) host.write_file(path, content.encode())
def install_maintenance_crontab(): # Every unit should run repair once per week (at least once per # GCGraceSeconds, which defaults to 10 days but can be changed per # keyspace). # Distribute the repair time evenly over the week. unit_num = int(hookenv.local_unit().split('/')[-1]) dow, hour, minute = helpers.week_spread(unit_num) contents = jinja.render('cassandra_maintenance_cron.tmpl', vars()) cron_path = "/etc/cron.d/cassandra-maintenance" host.write_file(cron_path, contents.encode('US-ASCII'))
def configure_cassandra_rackdc(): config = hookenv.config() datacenter = config['datacenter'].strip() rack = config['rack'].strip() or hookenv.service_name() rackdc_properties = dedent('''\ dc={} rack={} ''').format(datacenter, rack) rackdc_path = helpers.get_cassandra_rackdc_file() host.write_file(rackdc_path, rackdc_properties.encode('UTF-8'))
def get_shared_secret(): secret = None if not os.path.exists(SHARED_SECRET): secret = str(uuid.uuid4()) write_file(SHARED_SECRET, secret, perms=0o400) else: os.chmod(SHARED_SECRET, 0o400) with open(SHARED_SECRET, 'r') as secret_file: secret = secret_file.read().strip() return secret
def configure_job_environment(): template_path = "{0}/templates/etc-default-reddit.tmpl".format( hookenv.charm_dir()) host.write_file( '/etc/default/reddit', Template(open(template_path).read()).render(CONFIG) ) return
def write_config_file(path, contents): '''Write out the config file to path Encodes contents to UTF-8 first. If using a snap edition, write to the snap config directory. ''' if get_edition() == 'apache-snap': set_snap_config_file(os.path.basename(path), contents) else: contents = contents.encode('UTF-8') host.write_file(path, contents)
def write_maas_dns_address(resource_name, resource_addr): """Writes the specified IP address to the resource file for MAAS dns. :param resource_name: the name of the resource the address belongs to. This is the name of the file that will be written in /etc/maas_dns. :param resource_addr: the IP address for the resource. This will be written to the resource_name file. """ mkdir(MAAS_DNS_CONF_DIR) write_file(os.path.join(MAAS_DNS_CONF_DIR, resource_name), content=resource_addr)
def emit_cephconf(upgrading=False): # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), group=ceph.ceph_user()) context = get_ceph_context(upgrading) write_file(charm_ceph_conf, render_template('ceph.conf', context), ceph.ceph_user(), ceph.ceph_user(), 0o644) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 90)
def setup_ssh_key(): key = hookenv.config('private_ssh_key') if key: key = b64decode(key).decode('ascii') with ensure_user_and_perms(PATHS): key_type = 'rsa' if 'RSA' in key else 'dsa' key_path = path.join(SSH_HOME_PATH, 'id_{}'.format(key_type)) write_file(key_path, key.encode('ascii'), owner='ubunet', perms=0o500) elif path.exists(SSH_HOME_PATH): rmtree(SSH_HOME_PATH)
def ensure_files(): ''' Ensures PLUMgrid specific files exist before templates are written. ''' release = os_release('nova-compute', base='kilo') if release == 'kilo': disable_apparmor_libvirt() write_file(SUDOERS_CONF, "\nnova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n", owner='root', group='root', perms=0o644) _exec_cmd(cmd=['mkdir', '-p', FILTERS_CONF_DIR]) _exec_cmd(cmd=['touch', FILTERS_CONF])