def configure_vsm_conf(): vsm_dashboard_context = { 'keystone_private': get_state('vsm.keystone.private-address'), 'keystone_credentials_host': get_state('vsm.keystone.credentials_host'), 'keystone_credentials_protocol': get_state('vsm.keystone.credentials_protocol'), 'keystone_credentials_port': get_state('vsm.keystone.credentials_port'), 'keystone_api_version': get_state('vsm.keystone.api_version'), 'keystone_auth_host': get_state('vsm.keystone.auth_host'), 'keystone_auth_protocol': get_state('vsm.keystone.auth_protocol'), 'keystone_auth_port': get_state('vsm.keystone.auth_port'), # Auth Data 'keystone_project': get_state('vsm.keystone.project'), 'keystone_username': get_state('vsm.keystone.username'), 'keystone_password': get_state('vsm.keystone.password'), 'keystone_project_id': get_state('vsm.keystone.project_id'), 'rabbit_hostname': get_state('vsm.rabbit.hostname'), 'rabbit_vhost': get_state('vsm.rabbit.vhost'), 'rabbit_username': get_state('vsm.rabbit.username'), 'rabbit_password': get_state('vsm.rabbit.password'), 'rabbit_port': get_state('vsm.rabbit.port'), } # TODO: Where do we put this file? vsm_conf_path = "/tmp" mkdir(os.path.dirname(vsm_conf_path), owner="vsm", group="vsm") render('vsm.conf', vsm_conf_path, vsm_dashboard_context, perms=0o644) set_state('vsm.configured')
def emit_cephconf(): networks = get_networks('ceph-public-network') public_network = ', '.join(networks) networks = get_networks('ceph-cluster-network') cluster_network = ', '.join(networks) cephcontext = { 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': leader_get('fsid'), 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': public_network, 'ceph_cluster_network': cluster_network, 'loglevel': config('loglevel'), } if config('prefer-ipv6'): dynamic_ipv6_address = get_ipv6_addr()[0] if not public_network: cephcontext['public_addr'] = dynamic_ipv6_address if not cluster_network: cephcontext['cluster_addr'] = dynamic_ipv6_address # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), group=ceph.ceph_user()) render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100)
def _install_oracle_jre_tarball(tarball): # Same directory as webupd8 to avoid surprising people, but it could # be anything. if 'jre-7u' in str(tarball): dest = '/usr/lib/jvm/java-7-oracle' else: dest = '/usr/lib/jvm/java-8-oracle' if not os.path.isdir(dest): host.mkdir(dest) jre_exists = os.path.exists(os.path.join(dest, 'bin', 'java')) config = hookenv.config() # Unpack the latest tarball if necessary. if config.get('oracle_jre_tarball', '') == tarball and jre_exists: hookenv.log('Already installed {}'.format(tarball)) else: hookenv.log('Unpacking {}'.format(tarball)) subprocess.check_call(['tar', '-xz', '-C', dest, '--strip-components=1', '-f', tarball]) config['oracle_jre_tarball'] = tarball # Set alternatives, so /usr/bin/java does what we want. for tool in ['java', 'javac']: tool_path = os.path.join(dest, 'bin', tool) subprocess.check_call(['update-alternatives', '--install', os.path.join('/usr/bin', tool), tool, tool_path, '1']) subprocess.check_call(['update-alternatives', '--set', tool, tool_path])
def install(self, plugins): """Install the given plugins, optionally removing unlisted ones. @params plugins: A whitespace-separated list of plugins to install. """ plugins = plugins or "" plugins = plugins.split() hookenv.log("Stopping jenkins for plugin update(s)") host.service_stop("jenkins") hookenv.log("Installing plugins (%s)" % " ".join(plugins)) host.mkdir( paths.PLUGINS, owner="jenkins", group="jenkins", perms=0o0755) existing_plugins = set(glob.glob("%s/*.hpi" % paths.PLUGINS)) installed_plugins = self._install_plugins(plugins) unlisted_plugins = existing_plugins - installed_plugins if unlisted_plugins: if hookenv.config()["remove-unlisted-plugins"] == "yes": self._remove_plugins(unlisted_plugins) else: hookenv.log( "Unlisted plugins: (%s) Not removed. Set " "remove-unlisted-plugins to 'yes' to clear them " "away." % ", ".join(unlisted_plugins)) hookenv.log("Starting jenkins to pickup configuration changes") host.service_start("jenkins")
def install(): config = hookenv.config() host.adduser(USER, password='') host.mkdir(BASE_DIR, owner=USER, group=USER) # Meteor install script needs this os.environ['HOME'] = os.path.expanduser('~' + USER) hookenv.log('Installing dependencies') fetch.add_source(NODEJS_REPO) fetch.apt_update() fetch.apt_install(PACKAGES) hookenv.log('Installing Meteor') subprocess.check_call(DOWNLOAD_CMD.split()) subprocess.check_call(INSTALL_CMD.split()) subprocess.check_call('npm install -g meteorite'.split()) init_code(config) init_bundle(config) init_dependencies(config) hookenv.open_port(config['port']) subprocess.check_call( ['chown', '-R', '{user}:{user}'.format(user=USER), BASE_DIR]) config['mongo_url'] = '' write_upstart(config)
def register_configs(): # Register config files with their respective contexts. # Regstration of some configs may not be required depending on # existing of certain relations. release = get_os_codename_package('glance-common', fatal=False) or 'essex' configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, openstack_release=release) confs = [GLANCE_REGISTRY_CONF, GLANCE_API_CONF, GLANCE_API_PASTE_INI, GLANCE_REGISTRY_PASTE_INI, HAPROXY_CONF] if relation_ids('ceph'): mkdir('/etc/ceph') confs.append(CEPH_CONF) for conf in confs: configs.register(conf, CONFIG_FILES[conf]['hook_contexts']) if os.path.exists('/etc/apache2/conf-available'): configs.register(HTTPS_APACHE_24_CONF, CONFIG_FILES[HTTPS_APACHE_24_CONF]['hook_contexts']) else: configs.register(HTTPS_APACHE_CONF, CONFIG_FILES[HTTPS_APACHE_CONF]['hook_contexts']) return configs
def add_dirs(self): for name, details in self.dirs.items(): host.mkdir( self.path(name), owner=details.get('owner', 'root'), group=details.get('group', 'root'), perms=details.get('perms', 0o755))
def git_pre_install(): """Perform pre-install setup.""" dirs = [ '/var/lib/nova', '/var/lib/nova/buckets', '/var/lib/nova/CA', '/var/lib/nova/CA/INTER', '/var/lib/nova/CA/newcerts', '/var/lib/nova/CA/private', '/var/lib/nova/CA/reqs', '/var/lib/nova/images', '/var/lib/nova/instances', '/var/lib/nova/keys', '/var/lib/nova/networks', '/var/lib/nova/tmp', '/var/lib/neutron', '/var/lib/neutron/lock', '/var/log/nova', '/etc/neutron', '/etc/neutron/plugins', '/etc/neutron/plugins/ml2', ] adduser('nova', shell='/bin/bash', system_user=True) subprocess.check_call(['usermod', '--home', '/var/lib/nova', 'nova']) add_group('nova', system_group=True) add_user_to_group('nova', 'nova') adduser('neutron', shell='/bin/bash', system_user=True) add_group('neutron', system_group=True) add_user_to_group('neutron', 'neutron') for d in dirs: mkdir(d, owner='nova', group='nova', perms=0755, force=False)
def create_repo(git): username = git.get_remote('username') service = remote_service_name() repo_path = os.path.join(repo_root(), service+'.git') host.add_group(username) host.adduser(username, password=host.pwgen(32), shell='/usr/bin/git-shell') ssh_public_key = git.get_remote('ssh-public-key') dotssh_dir = '/home/{}/.ssh/'.format(username) host.mkdir(dotssh_dir, username, username, 0o700) host.write_file(dotssh_dir + 'authorized_keys', ssh_public_key.encode('utf-8'), username, username, 0o400) host.mkdir(repo_path, group=username, perms=0o770) subprocess.check_call(['git', 'init', '--bare', '--shared=group', repo_path]) # Create server-side hook that will inform # clients whenever changes are committed. create_git_hooks(repo_path, username) # Make the repo owned by <username>. chown_repo(repo_path, username) # TODO(axw) read and publish all host keys. ssh_host_keys = [open(SSH_HOST_RSA_KEY).read()] git.configure(repo_path, ssh_host_keys) set_state('git.repo.created') status_set('active', '')
def prepare_env(): saved_path = os.getcwd() os.chdir('{}'.format(CHARM_LIB_DIR + "oracle_keystone")) cmd = "useradd --home-dir /var/lib/keystone --create-home \ --system --shell /bin/false keystone" os.system(cmd) mkdir("/var/log/keystone") mkdir("/etc/keystone") chownr("/var/log/keystone", 'keystone', 'keystone') chownr("/var/lib/keystone", 'keystone', 'keystone') chownr("/etc/keystone", 'keystone', 'keystone') cmd = "cp ./etc/keystone.conf.sample /etc/keystone/keystone.conf" os.system(cmd) cmd = "cp ./etc/keystone-paste.ini /etc/keystone/keystone-paste.ini" os.system(cmd) cmd = "cp ./etc/default_catalog.templates /etc/keystone/\ default_catalog.templates" os.system(cmd) cmd = "cp ./etc/logging.conf.sample /etc/keystone/logging.conf" os.system(cmd) cmd = "cp ./etc/policy.v3cloudsample.json /etc/keystone/policy.json" os.system(cmd) cmd = "cp ./etc/sso_callback_template.html /etc/keystone/\ sso_callback_template.html" os.system(cmd) cmd = "cp ./httpd/wsgi-keystone.conf /etc/apache2/sites-available/\ keystone.conf" os.system(cmd) cmd = "cp ./httpd/wsgi-keystone.conf /etc/apache2/sites-enabled/\ keystone.conf" os.system(cmd) os.chdir(saved_path)
def register_configs(): # Register config files with their respective contexts. # Regstration of some configs may not be required depending on # existing of certain relations. release = os_release("glance-common") configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, openstack_release=release) confs = [GLANCE_REGISTRY_CONF, GLANCE_API_CONF, GLANCE_API_PASTE_INI, GLANCE_REGISTRY_PASTE_INI, HAPROXY_CONF] if relation_ids("ceph"): mkdir(os.path.dirname(ceph_config_file())) mkdir(os.path.dirname(CEPH_CONF)) # Install ceph config as an alternative for co-location with # ceph and ceph-osd charms - glance ceph.conf will be # lower priority that both of these but thats OK if not os.path.exists(ceph_config_file()): # touch file for pre-templated generation open(ceph_config_file(), "w").close() install_alternative(os.path.basename(CEPH_CONF), CEPH_CONF, ceph_config_file()) confs.append(ceph_config_file()) for conf in confs: configs.register(conf, CONFIG_FILES[conf]["hook_contexts"]) if os.path.exists("/etc/apache2/conf-available"): configs.register(HTTPS_APACHE_24_CONF, CONFIG_FILES[HTTPS_APACHE_24_CONF]["hook_contexts"]) else: configs.register(HTTPS_APACHE_CONF, CONFIG_FILES[HTTPS_APACHE_CONF]["hook_contexts"]) return configs
def git_pre_install(): """Perform glance pre-install setup.""" dirs = [ '/var/lib/glance', '/var/lib/glance/images', '/var/lib/glance/image-cache', '/var/lib/glance/image-cache/incomplete', '/var/lib/glance/image-cache/invalid', '/var/lib/glance/image-cache/queue', '/var/log/glance', ] logs = [ '/var/log/glance/glance-api.log', '/var/log/glance/glance-registry.log', ] adduser('glance', shell='/bin/bash', system_user=True) add_group('glance', system_group=True) add_user_to_group('glance', 'glance') for d in dirs: mkdir(d, owner='glance', group='glance', perms=0755, force=False) for l in logs: write_file(l, '', owner='glance', group='glance', perms=0600)
def install(): """ Install Hook """ log('ftb-infinity: install') status_set('maintenance', 'installing FTB modpack') # Add user adduser(FTB_USER) mkdir(FTB_HOME, owner=FTB_USER, group=FTB_USER, perms=0o750) check_call(['usermod', '-s', '/bin/bash', '-d', FTB_HOME, FTB_USER]) # Download ftb ArchiveUrlFetchHandler().install(FTB_DL_URL, FTB_HOME) # Sanitize permissions, zip! chownr(FTB_HOME, FTB_USER, FTB_USER) path = os.path.join(FTB_HOME, 'FTBInstall.sh') s = os.stat(path) os.chmod(path, s.st_mode | stat.S_IXUSR | stat.S_IXGRP) # Accept EULA sed(os.path.join(FTB_HOME, 'eula.txt'), 'eula=false', 'eula=true') # Download minecraft jars with chdir(FTB_HOME): check_call(['sudo', '-u', FTB_USER, '-H', os.path.join(FTB_HOME, 'FTBInstall.sh')]) # Render server.properties ftb_config_server() # Deploy systemd service ftb_systemd_install() set_state(CHARM_STATE_AVAILABLE) status_set('waiting', 'ftb downloaded')
def setup_images_folder(): status_set("maintenance", "Setting up VM images folder") mkdir('/opt/VNF', owner='openvim', group='openvim', perms=0o775, force=False) symlink('/var/lib/libvirt/images', '/opt/VNF/images') chownr('/opt/VNF', owner='openvim', group='openvim', follow_links=False, chowntopdir=True) chownr('/var/lib/libvirt/images', owner='root', group='openvim', follow_links=False, chowntopdir=True) chmod('/var/lib/libvirt/images', 0o775)
def bootstrap_monitor_cluster(secret): hostname = get_unit_hostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) done = '{}/done'.format(path) upstart = '{}/upstart'.format(path) keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) if os.path.exists(done): log('bootstrap_monitor_cluster: mon already initialized.') else: # Ceph >= 0.61.3 needs this for ceph-mon fs creation mkdir('/var/run/ceph', perms=0o755) mkdir(path) # end changes for Ceph >= 0.61.3 try: subprocess.check_call(['ceph-authtool', keyring, '--create-keyring', '--name=mon.', '--add-key={}'.format(secret), '--cap', 'mon', 'allow *']) subprocess.check_call(['ceph-mon', '--mkfs', '-i', hostname, '--keyring', keyring]) with open(done, 'w'): pass with open(upstart, 'w'): pass service_restart('ceph-mon-all') except: raise finally: os.unlink(keyring)
def remember_devices(devs): """Add device to local store of ringed devices.""" d = os.path.dirname(KV_DB_PATH) if not os.path.isdir(d): mkdir(d) kvstore = KVStore(KV_DB_PATH) devstore = devstore_safe_load(kvstore.get(key='devices')) or {} env_uuid = os.environ['JUJU_ENV_UUID'] for dev in devs: blk_uuid = get_device_blkid("/dev/%s" % (dev)) key = "%s@%s" % (dev, env_uuid) if key in devstore and devstore[key].get('blkid') == blk_uuid: log("Device '%s' already in devstore (status:%s)" % (dev, devstore[key].get('status')), level=DEBUG) else: existing = [(k, v) for k, v in devstore.iteritems() if v.get('blkid') == blk_uuid and re.match("^(.+)@(.+)$", k).group(1) == dev] if existing: log("Device '%s' already in devstore but has a different " "JUJU_ENV_UUID (%s)" % (dev, re.match(".+@(.+)$", existing[0][0]).group(1)), level=WARNING) else: log("Adding device '%s' with blkid='%s' to devstore" % (blk_uuid, dev), level=DEBUG) devstore[key] = {'blkid': blk_uuid, 'status': 'active'} if devstore: kvstore.set(key='devices', value=json.dumps(devstore)) kvstore.flush() kvstore.close()
def configure_jupyter_notebook(): conf = hookenv.config() jupyter_dir = '/opt/jupyter' port = conf['open-port'] # Get or create and get password kv_store = unitdata.kv() password = kv_store.get('password') if not password: password = generate_password() kv_store.set('password', password) # Convert to string because some functions can't handle kv object type. password = str(password) password_hash = generate_hash(password) context = { 'port': port, 'password_hash': password_hash, } if data_changed('jupyter-conf', context): # Create config directory and render config file host.mkdir(jupyter_dir) templating.render( source='jupyter_notebook_config.py.jinja2', target=jupyter_dir + '/jupyter_notebook_config.py', context=context ) # Generate upstart template / service file context = {} if lsb_release.get_lsb_information()['RELEASE'] == "14.04": render_api_upstart_template(context) else: render_api_systemd_template(context) restart_notebook() chownr(jupyter_dir, 'ubuntu', 'ubuntu', chowntopdir=True)
def emit_cephconf(): cephcontext = { 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': leader_get('fsid'), 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': config('ceph-public-network'), 'ceph_cluster_network': config('ceph-cluster-network'), } if config('prefer-ipv6'): dynamic_ipv6_address = get_ipv6_addr()[0] if not config('ceph-public-network'): cephcontext['public_addr'] = dynamic_ipv6_address if not config('ceph-cluster-network'): cephcontext['cluster_addr'] = dynamic_ipv6_address # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) mkdir(os.path.dirname(charm_ceph_conf)) with open(charm_ceph_conf, 'w') as cephconf: cephconf.write(render_template('ceph.conf', cephcontext)) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100)
def config_changed(): if config('prefer-ipv6'): status_set('maintenance', 'configuring ipv6') setup_ipv6() sync_db_with_multi_ipv6_addresses(config('database'), config('database-user')) unison.ensure_user(user=SSH_USER, group='juju_keystone') # NOTE(coreycb): can just use group='keystone' once snap has drop privs support if snap_install_requested(): unison.ensure_user(user=SSH_USER, group='root') else: unison.ensure_user(user=SSH_USER, group='keystone') homedir = unison.get_homedir(SSH_USER) if not os.path.isdir(homedir): mkdir(homedir, SSH_USER, 'juju_keystone', 0o775) if git_install_requested(): if config_value_changed('openstack-origin-git'): status_set('maintenance', 'Running Git install') git_install(config('openstack-origin-git')) elif not config('action-managed-upgrade'): if openstack_upgrade_available('keystone'): status_set('maintenance', 'Running openstack upgrade') do_openstack_upgrade_reexec(configs=CONFIGS) config_changed_postupgrade()
def get_mysql_password_on_disk(self, username=None, password=None): """Retrieve, generate or store a mysql password for the provided username on disk.""" if username: template = self.user_passwd_file_template passwd_file = template.format(username) else: passwd_file = self.root_passwd_file_template _password = None if os.path.exists(passwd_file): log("Using existing password file '%s'" % passwd_file, level=DEBUG) with open(passwd_file, 'r') as passwd: _password = passwd.read().strip() else: log("Generating new password file '%s'" % passwd_file, level=DEBUG) if not os.path.isdir(os.path.dirname(passwd_file)): # NOTE: need to ensure this is not mysql root dir (which needs # to be mysql readable) mkdir(os.path.dirname(passwd_file), owner='root', group='root', perms=0o770) # Force permissions - for some reason the chmod in makedirs # fails os.chmod(os.path.dirname(passwd_file), 0o770) _password = password or pwgen(length=32) write_file(passwd_file, _password, owner='root', group='root', perms=0o660) return _password
def register_configs(): ''' Returns an OSTemplateRenderer object with all required configs registered. ''' release = os_release('nova-common') configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, openstack_release=release) if relation_ids('ceph'): # Add charm ceph configuration to resources and # ensure directory actually exists mkdir(os.path.dirname(ceph_config_file())) mkdir(os.path.dirname(CEPH_CONF)) # Install ceph config as an alternative for co-location with # ceph and ceph-osd charms - nova-compute ceph.conf will be # lower priority that both of these but thats OK if not os.path.exists(ceph_config_file()): # touch file for pre-templated generation open(ceph_config_file(), 'w').close() install_alternative(os.path.basename(CEPH_CONF), CEPH_CONF, ceph_config_file()) for cfg, d in resource_map().iteritems(): configs.register(cfg, d['contexts']) return configs
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() openstack_origin = config('openstack-origin') configure_installation_source(openstack_origin) neutron_plugin = config('neutron-plugin') additional_install_locations(neutron_plugin, openstack_origin) add_source(config('extra-source'), config('extra-key')) status_set('maintenance', 'Installing apt packages') apt_update(fatal=True) packages = determine_packages(openstack_origin) apt_install(packages, fatal=True) status_set('maintenance', 'Git install') git_install(config('openstack-origin-git')) [open_port(port) for port in determine_ports()] if neutron_plugin == 'midonet': mkdir('/etc/neutron/plugins/midonet', owner='neutron', group='neutron', perms=0o755, force=False) etcd_package_url = config('etcd-package-url') if etcd_package_url and etcd_package_url.startswith('http'): check_call([ "wget", etcd_package_url ]) check_call([ "dpkg", "-i", etcd_package_url.split('/')[-1] ])
def store_cqlshrc_credentials(owner, username, password): cqlshrc_path = get_cqlshrc_path(owner) hookenv.log('Storing credentials for {} in {}'.format(owner, cqlshrc_path)) c = config() cqlshrc = configparser.ConfigParser(interpolation=None) cqlshrc.read([cqlshrc_path]) # We set items separately, rather than together, so that we have a # defined order for the ConfigParser to preserve and the tests to # rely on. cqlshrc.setdefault('authentication', {}) cqlshrc['authentication']['username'] = username cqlshrc['authentication']['password'] = password cqlshrc.setdefault('connection', {}) cqlshrc['connection']['hostname'] = rpc_broadcast_ip_address() if get_cassandra_version().startswith('2.0'): cqlshrc['connection']['port'] = str(c['rpc_port']) else: cqlshrc['connection']['port'] = str(c['native_transport_port']) ini = io.StringIO() cqlshrc.write(ini) host.mkdir(os.path.dirname(cqlshrc_path), perms=0o700, owner=owner) host.write_file(cqlshrc_path, ini.getvalue().encode('UTF-8'), perms=0o400, owner=owner)
def install_ntpmon(): """ Install package dependencies, source files, and startup configuration. """ install_dir = layer.options.get('ntpmon', 'install-dir') service_name = layer.options.get('ntpmon', 'service-name') using_systemd = host.init_is_systemd() if install_dir: log('installing ntpmon') host.mkdir(os.path.dirname(install_dir)) host.rsync('src/', '{}/'.format(install_dir)) if service_name: if using_systemd: systemd_config = '/etc/systemd/system/' + service_name + '.service' log('installing systemd service: {}'.format(service_name)) with open(systemd_config, 'w') as conffile: conffile.write(templating.render('src/' + service_name + '.systemd', layer.options.get('ntpmon'))) subprocess.call(['systemd', 'daemon-reload']) else: upstart_config = '/etc/init/' + service_name + '.conf' log('installing upstart service: {}'.format(service_name)) with open(upstart_config, 'w') as conffile: conffile.write(templating.render('src/' + service_name + '.upstart', layer.options.get('ntpmon'))) set_flag('ntpmon.installed') clear_flag('ntpmon.configured')
def emit_cephconf(): cephcontext = { 'mon_hosts': config('monitor-hosts'), 'fsid': config('fsid'), 'use_syslog': str(config('use-syslog')).lower(), 'loglevel': config('loglevel'), } # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), group=ceph.ceph_user()) render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100) keyring = 'ceph.client.admin.keyring' keyring_path = '/etc/ceph/' + keyring render(keyring, keyring_path, {'admin_key': config('admin-key')}, owner=ceph.ceph_user(), perms=0o600) keyring = 'keyring' keyring_path = '/var/lib/ceph/mon/ceph-' + get_unit_hostname()+ '/' + keyring render('mon.keyring', keyring_path, {'admin_key': config('admin-key')}, owner=ceph.ceph_user(), perms=0o600) notify_radosgws() notify_client()
def configure_cert(self, cert, key, cn=None): """Configure service SSL cert and key Write out service SSL certificate and key for Apache. @param cert string SSL Certificate @param key string SSL Key @param cn string Canonical name for service """ if os_utils.snap_install_requested(): ssl_dir = '/var/snap/{snap_name}/common/etc/nginx/ssl'.format( snap_name=self.primary_snap) else: ssl_dir = os.path.join('/etc/apache2/ssl/', self.name) if not cn: cn = self.get_default_cn() ch_host.mkdir(path=ssl_dir) if cn: cert_filename = 'cert_{}'.format(cn) key_filename = 'key_{}'.format(cn) else: cert_filename = 'cert' key_filename = 'key' ch_host.write_file(path=os.path.join(ssl_dir, cert_filename), content=cert.encode('utf-8'), group=self.group, perms=0o640) ch_host.write_file(path=os.path.join(ssl_dir, key_filename), content=key.encode('utf-8'), group=self.group, perms=0o640)
def install(): execd_preinstall() #add_source(config_data['source'], config_data['key']) #apt_update(fatal=True) apt_install(packages=ROUTER_PACKAGES, fatal=True) host.adduser('vcap') dirs = [CF_DIR + '/src/github.com/cloudfoundry', CF_DIR + '/config', CF_DIR + '/src/github.com/stretchr', '/var/vcap/sys/run/gorouter', '/var/vcap/sys/log/gorouter'] for dir in dirs: host.mkdir(dir, owner='vcap', group='vcap', perms=0775) emit_routerconf() install_upstart_scripts() os.chdir(CF_DIR) os.environ['GOPATH'] = CF_DIR os.environ["PATH"] = CF_DIR + os.pathsep + os.environ["PATH"] os.chdir(CF_DIR + '/src/github.com/cloudfoundry') run(['git', 'clone', 'https://github.com/cloudfoundry/gorouter.git']) os.chdir(CF_DIR + '/src/github.com/stretchr/') run(['git', 'clone', 'https://github.com/stretchr/objx.git']) os.chdir(CF_DIR) run(['go', 'get', '-v', './src/github.com/cloudfoundry/gorouter/...']) run(['go', 'get', '-v', './...']) run(['go', 'build', '-v', './...']) chownr('/var/lib/cloudfoundry', owner='vcap', group='vcap') chownr('/var/vcap', owner='vcap', group='vcap')
def process_certificates(service_name, relation_id, unit, custom_hostname_link=None, user='******', group='root'): """Process the certificates supplied down the relation :param service_name: str Name of service the certifcates are for. :param relation_id: str Relation id providing the certs :param unit: str Unit providing the certs :param custom_hostname_link: str Name of custom link to create :param user: (Optional) Owner of certificate files. Defaults to 'root' :type user: str :param group: (Optional) Group of certificate files. Defaults to 'root' :type group: str """ data = relation_get(rid=relation_id, unit=unit) ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) mkdir(path=ssl_dir) name = local_unit().replace('/', '_') certs = data.get('{}.processed_requests'.format(name)) chain = data.get('chain') ca = data.get('ca') if certs: certs = json.loads(certs) install_ca_cert(ca.encode()) install_certs(ssl_dir, certs, chain, user=user, group=group) create_ip_cert_links( ssl_dir, custom_hostname_link=custom_hostname_link)
def install(self, source, dest=None, checksum=None, hash_type='sha1'): """ Download and install an archive file, with optional checksum validation. The checksum can also be given on the :param:`source` URL's fragment. For example:: handler.install('http://example.com/file.tgz#sha1=deadbeef') :param str source: URL pointing to an archive file. :param str dest: Local destination path to install to. If not given, installs to `$CHARM_DIR/archives/archive_file_name`. :param str checksum: If given, validate the archive file after download. :param str hash_type: Algorithm used to generate :param:`checksum`. Can be any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) except urllib2.URLError as e: raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) options = urlparse.parse_qs(url_parts.fragment) for key, value in options.items(): if key in hashlib.algorithms: check_hash(dld_file, value, key) if checksum: check_hash(dld_file, checksum, hash_type) return extract(dld_file, dest)
def render(source, target, context, owner='root', group='root', perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): """ Render a template. The `source` path, if not absolute, is relative to the `templates_dir`. The `target` path should be absolute. It can also be `None`, in which case no file will be written. The context should be a dict containing the values to be replaced in the template. The `owner`, `group`, and `perms` options will be passed to `write_file`. If omitted, `templates_dir` defaults to the `templates` folder in the charm. The rendered template will be written to the file as well as being returned as a string. Note: Using this requires python-jinja2; if it is not installed, calling this will attempt to use charmhelpers.fetch.apt_install to install it. """ try: from jinja2 import FileSystemLoader, Environment, exceptions except ImportError: try: from charmhelpers.fetch import apt_install except ImportError: hookenv.log('Could not import jinja2, and could not import ' 'charmhelpers.fetch to install it', level=hookenv.ERROR) raise apt_install('python-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions if template_loader: template_env = Environment(loader=template_loader) else: if templates_dir is None: templates_dir = os.path.join(hookenv.charm_dir(), 'templates') template_env = Environment(loader=FileSystemLoader(templates_dir)) try: source = source template = template_env.get_template(source) except exceptions.TemplateNotFound as e: hookenv.log('Could not load template %s from %s.' % (source, templates_dir), level=hookenv.ERROR) raise e content = template.render(context) if target is not None: target_dir = os.path.dirname(target) if not os.path.exists(target_dir): # This is a terrible default directory permission, as the file # or its siblings will often contain secrets. host.mkdir(os.path.dirname(target), owner, group, perms=0o755) host.write_file(target, content.encode(encoding), owner, group, perms) return content
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() openstack_origin = config('openstack-origin') configure_installation_source(openstack_origin) neutron_plugin = config('neutron-plugin') additional_install_locations(neutron_plugin, openstack_origin) add_source(config('extra-source'), config('extra-key')) status_set('maintenance', 'Installing apt packages') apt_update(fatal=True) packages = determine_packages(openstack_origin) apt_install(packages, fatal=True) if neutron_plugin == 'vsp': source = config('nuage-tarball-url') if source is not None: try: handler = ArchiveUrlFetchHandler() packages = ['nuage-neutron'] path = handler.install(source) for package in packages: package_path = os.path.join(path, package) if os.path.exists(package_path): log('install {0} from: {1}'.format( package, package_path)) check_output([ 'bash', '-c', 'cd {}; sudo python setup.py install'.format( package_path) ]) except Exception as e: log('install failed with error: {}'.format(e.message)) raise Exception(e) status_set('maintenance', 'Git install') git_install(config('openstack-origin-git')) [open_port(port) for port in determine_ports()] if neutron_plugin == 'midonet': mkdir('/etc/neutron/plugins/midonet', owner='neutron', group='neutron', perms=0o755, force=False)
def render(source, target, context, owner='root', group='root', perms=0o444, templates_dir=None, encoding='UTF-8'): """ Render a template. The `source` path, if not absolute, is relative to the `templates_dir`. The `target` path should be absolute. The context should be a dict containing the values to be replaced in the template. The `owner`, `group`, and `perms` options will be passed to `write_file`. If omitted, `templates_dir` defaults to the `templates` folder in the charm. Note: Using this requires python-jinja2; if it is not installed, calling this will attempt to use charmhelpers.fetch.apt_install to install it. """ try: from jinja2 import FileSystemLoader, Environment, exceptions except ImportError: try: from charmhelpers.fetch import apt_install except ImportError: hookenv.log('Could not import jinja2, and could not import ' 'charmhelpers.fetch to install it', level=hookenv.ERROR) raise apt_install('python-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions if templates_dir is None: templates_dir = os.path.join(hookenv.charm_dir(), 'templates') loader = Environment(loader=FileSystemLoader(templates_dir)) try: source = source template = loader.get_template(source) except exceptions.TemplateNotFound as e: hookenv.log('Could not load template %s from %s.' % (source, templates_dir), level=hookenv.ERROR) raise e content = template.render(context) host.mkdir(os.path.dirname(target), owner, group, perms=0o755) host.write_file(target, content.encode(encoding), owner, group, perms)
def setup_storage(): # Ensure /srv/node exists just in case no disks # are detected and used. mkdir(os.path.join('/srv', 'node'), owner='swift', group='swift', perms=0o755) reformat = str(config('overwrite')).lower() == "true" for dev in determine_block_devices(): if is_device_in_ring(os.path.basename(dev)): log("Device '%s' already in the ring - ignoring" % (dev)) continue if reformat: clean_storage(dev) try: # If not cleaned and in use, mkfs should fail. mkfs_xfs(dev, force=reformat) except subprocess.CalledProcessError as exc: # This is expected is a formatted device is provided and we are # forcing the format. log("Format device '%s' failed (%s) - continuing to next device" % (dev, exc), level=WARNING) continue basename = os.path.basename(dev) _mp = os.path.join('/srv', 'node', basename) mkdir(_mp, owner='swift', group='swift') options = None loopback_device = is_mapped_loopback_device(dev) if loopback_device: dev = loopback_device options = "loop, defaults" mountpoint = '/srv/node/%s' % basename filesystem = "xfs" mount(dev, mountpoint, filesystem=filesystem) fstab_add(dev, mountpoint, filesystem, options=options) check_call(['chown', '-R', 'swift:swift', mountpoint]) check_call(['chmod', '-R', '0755', mountpoint])
def install(): execd_preinstall() origin = config('openstack-origin') if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and origin == 'distro'): origin = 'cloud:precise-grizzly' configure_installation_source(origin) packages = filter_installed_packages(get_packages()) if packages: status_set('maintenance', 'Installing packages') apt_update(fatal=True) apt_install(packages, fatal=True) if init_is_systemd(): # NOTE(jamespage): ensure systemd override folder exists prior to # attempting to write override.conf mkdir(os.path.dirname(CEILOMETER_API_SYSTEMD_CONF)) if run_in_apache(): disable_package_apache_site()
def write_vaultlocker_conf(context, priority=100): """Write vaultlocker configuration to disk and install alternative :param context: Dict of data from vault-kv relation :ptype: context: dict :param priority: Priority of alternative configuration :ptype: priority: int""" charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( hookenv.service_name() ) host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) templating.render(source='vaultlocker.conf.j2', target=charm_vl_path, context=context, perms=0o600), alternatives.install_alternative('vaultlocker.conf', '/etc/vaultlocker/vaultlocker.conf', charm_vl_path, priority)
def install(self, source, dest=None, revno=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] if dest: dest_dir = os.path.join(dest, branch_name) else: dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) if dest and not os.path.exists(dest): mkdir(dest, perms=0o755) try: self.branch(source, dest_dir, revno) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir
def osdize_dir(path): if os.path.exists(os.path.join(path, 'upstart')): log('Path {} is already configured as an OSD - bailing'.format(path)) return if cmp_pkgrevno('ceph', "0.56.6") < 0: log('Unable to use directories for OSDs with ceph < 0.56.6', level=ERROR) raise mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) chownr('/var/lib/ceph', ceph_user(), ceph_user()) cmd = [ 'sudo', '-u', ceph_user(), 'ceph-disk', 'prepare', '--data-dir', path ] subprocess.check_call(cmd)
def process_certificates(service_name, relation_id, unit, custom_hostname_link=None, user='******', group='root', bindings=None): """Process the certificates supplied down the relation :param service_name: str Name of service the certifcates are for. :param relation_id: str Relation id providing the certs :param unit: str Unit providing the certs :param custom_hostname_link: str Name of custom link to create :param user: (Optional) Owner of certificate files. Defaults to 'root' :type user: str :param group: (Optional) Group of certificate files. Defaults to 'root' :type group: str :param bindings: List of bindings to check in addition to default api bindings. :type bindings: list of strings :returns: True if certificates processed for local unit or False :rtype: bool """ if bindings: # Add default API bindings to bindings list bindings = list(bindings + get_default_api_bindings()) else: # Use default API bindings bindings = get_default_api_bindings() data = relation_get(rid=relation_id, unit=unit) ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) mkdir(path=ssl_dir) name = local_unit().replace('/', '_') certs = data.get('{}.processed_requests'.format(name)) chain = data.get('chain') ca = data.get('ca') if certs: certs = json.loads(certs) _manage_ca_certs(ca, relation_id) install_certs(ssl_dir, certs, chain, user=user, group=group) create_ip_cert_links(ssl_dir, custom_hostname_link=custom_hostname_link, bindings=bindings) return True return False
def _add_hp_fstab_mount(pagesize, mount=True): log("Add {} mountpoint from fstab".format(pagesize)) mnt_point = '/dev/hugepages{}'.format(pagesize) mkdir(mnt_point, owner='root', group='root', perms=0o755) lfstab = fstab.Fstab() fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) if fstab_entry: lfstab.remove_entry(fstab_entry) # use different device name for 1G and 2M. # this name actually is not used by the system # but add_antry filter by device name. device = 'hugetlbfs{}'.format(pagesize) entry = lfstab.Entry(device, mnt_point, 'hugetlbfs', 'pagesize={}'.format(pagesize), 0, 0) lfstab.add_entry(entry) if mount: fstab_mount(mnt_point)
def hugepage_support(user, group='hugetlb', nr_hugepages=256, max_map_count=65536, mnt_point='/run/hugepages/kvm', pagesize='2MB', mount=True, set_shmmax=False): """Enable hugepages on system. Args: user (str) -- Username to allow access to hugepages to group (str) -- Group name to own hugepages nr_hugepages (int) -- Number of pages to reserve max_map_count (int) -- Number of Virtual Memory Areas a process can own mnt_point (str) -- Directory to mount hugepages on pagesize (str) -- Size of hugepages mount (bool) -- Whether to Mount hugepages """ group_info = add_group(group) gid = group_info.gr_gid add_user_to_group(user, group) if max_map_count < 2 * nr_hugepages: max_map_count = 2 * nr_hugepages sysctl_settings = { 'vm.nr_hugepages': nr_hugepages, 'vm.max_map_count': max_map_count, 'vm.hugetlb_shm_group': gid, } if set_shmmax: shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages if shmmax_minsize > shmmax_current: sysctl_settings['kernel.shmmax'] = shmmax_minsize sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) lfstab = fstab.Fstab() fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) if fstab_entry: lfstab.remove_entry(fstab_entry) entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) lfstab.add_entry(entry) if mount: fstab_mount(mnt_point)
def __call__(self): log('Generating template context for ' + self.rel_name, level=DEBUG) ctxt = {} if self.service and self.service_user: # This is required for pki token signing if we don't want /tmp to # be used. cachedir = '/var/cache/%s' % (self.service) if not os.path.isdir(cachedir): log("Creating service cache dir %s" % (cachedir), level=DEBUG) mkdir(path=cachedir, owner=self.service_user, group=self.service_user, perms=0o700) ctxt['signing_dir'] = cachedir for rid in relation_ids(self.rel_name): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) serv_host = rdata.get('service_host') serv_host = format_ipv6_addr(serv_host) or serv_host auth_host = rdata.get('auth_host') auth_host = format_ipv6_addr(auth_host) or auth_host svc_protocol = rdata.get('service_protocol') or 'http' auth_protocol = rdata.get('auth_protocol') or 'http' ctxt.update({ 'service_port': rdata.get('service_port'), 'service_host': serv_host, 'auth_host': auth_host, 'auth_port': rdata.get('auth_port'), 'admin_tenant_name': rdata.get('service_tenant'), 'admin_user': rdata.get('service_username'), 'admin_password': rdata.get('service_password'), 'service_protocol': svc_protocol, 'auth_protocol': auth_protocol }) if context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') return ctxt return {}
def test_creates_a_directory_with_defaults(self, os_, log): uid = 0 gid = 0 path = '/some/other/path/from/link' realpath = '/some/path' path_exists = False perms = 0o555 os_.path.abspath.return_value = realpath os_.path.exists.return_value = path_exists host.mkdir(path) os_.path.abspath.assert_called_with(path) os_.path.exists.assert_called_with(realpath) os_.makedirs.assert_called_with(realpath, perms) os_.chown.assert_called_with(realpath, uid, gid)
def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] if dest: dest_dir = os.path.join(dest, branch_name) else: dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch, depth) except GitCommandError as e: raise UnhandledSource(e.message) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir
def git_pre_install(): """Perform cinder pre-install setup.""" dirs = [{'path': '/etc/tgt', 'owner': 'cinder', 'group': 'cinder', 'perms': 0750, }, {'path': '/var/lib/cinder', 'owner': 'cinder', 'group': 'cinder', 'perms': 0755, }, {'path': '/var/lib/cinder/volumes', 'owner': 'cinder', 'group': 'cinder', 'perms': 0750, }, {'path': '/var/lock/cinder', 'owner': 'cinder', 'group': 'root', 'perms': 0750, }, {'path': '/var/log/cinder', 'owner': 'cinder', 'group': 'cinder', 'perms': 0750, }] logs = [ '/var/log/cinder/cinder-api.log', '/var/log/cinder/cinder-backup.log', '/var/log/cinder/cinder-scheduler.log', '/var/log/cinder/cinder-volume.log', ] adduser('cinder', shell='/bin/bash', system_user=True) add_group('cinder', system_group=True) add_user_to_group('cinder', 'cinder') for d in dirs: mkdir(d['path'], owner=d['owner'], group=d['group'], perms=d['perms'], force=False) for l in logs: write_file(l, '', owner='cinder', group='cinder', perms=0600)
def copy_file(src, dst, perms=None, force=False): """Copy file to destination and optionally set permissionss. If destination does not exist it will be created. """ if not os.path.isdir(dst): log('Creating directory %s' % dst, level=DEBUG) mkdir(dst) fdst = os.path.join(dst, os.path.basename(src)) if not os.path.isfile(fdst) or force: try: copy2(src, fdst) if perms: os.chmod(fdst, perms) except IOError: log('Failed to copy file from %s to %s.' % (src, dst), level=ERROR) raise
def render_web_override(): """ Render override.conf for the sentry.web systemd service """ if os.path.exists(SENTRY_WEB_SERVICE_OVERRIDE): os.remove(SENTRY_WEB_SERVICE_OVERRIDE) conf = config() env = conf['web-override'] if not env: return mkdir(os.path.dirname(SENTRY_WEB_SERVICE_OVERRIDE)) web_override_tmpl = \ load_template( 'web.override.conf.j2').render(environment=env) spew(SENTRY_WEB_SERVICE_OVERRIDE, web_override_tmpl)
def ensure_database_directory(config_path): '''Create the database directory if it doesn't exist, resetting ownership and other settings while we are at it. Returns the absolute path. ''' absdir = get_database_directory(config_path) # Work around Bug #1427150 by ensuring components of the path are # created with the required permissions, if necessary. component = os.sep for p in absdir.split(os.sep)[1:-1]: component = os.path.join(component, p) if not os.path.exists(p): host.mkdir(component) assert component == os.path.split(absdir)[0] host.mkdir(absdir, owner='cassandra', group='cassandra', perms=0o750) return absdir
def install_layer_telegraf(): """Installs the Telegraf software if it is not already installed.""" if is_telegraf_installed(): increment_number_telegrafs() else: status_set('maintenance', 'Installing Telegraf...') fetcher = ArchiveUrlFetchHandler() if not os.path.isdir('/opt/telegraf'): mkdir('/opt/telegraf') fetcher.download('https://dl.influxdata.com/telegraf/releases/telegraf_1.4.5-1_amd64.deb', '/opt/telegraf/telegraf_1.4.5-1_amd64.deb') subprocess.check_call(['dpkg', '--force-confdef', '-i', '/opt/telegraf/telegraf_1.4.5-1_amd64.deb']) shutil.copyfile('files/plugins.json', '/opt/telegraf/plugins.json') shutil.copyfile('files/telegraf.json', '/opt/telegraf/telegraf.json') increment_number_telegrafs() set_flag('layer-telegraf.installed')
def bootstrap_monitor_cluster(secret): hostname = get_unit_hostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) done = '{}/done'.format(path) if systemd(): init_marker = '{}/systemd'.format(path) else: init_marker = '{}/upstart'.format(path) keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) if os.path.exists(done): log('bootstrap_monitor_cluster: mon already initialized.') else: # Ceph >= 0.61.3 needs this for ceph-mon fs creation mkdir('/var/run/ceph', owner=ceph_user(), group=ceph_user(), perms=0o755) mkdir(path, owner=ceph_user(), group=ceph_user()) # end changes for Ceph >= 0.61.3 try: subprocess.check_call([ 'ceph-authtool', keyring, '--create-keyring', '--name=mon.', '--add-key={}'.format(secret), '--cap', 'mon', 'allow *' ]) subprocess.check_call( ['ceph-mon', '--mkfs', '-i', hostname, '--keyring', keyring]) chownr(path, ceph_user(), ceph_user()) with open(done, 'w'): pass with open(init_marker, 'w'): pass if systemd(): subprocess.check_call(['systemctl', 'enable', 'ceph-mon']) service_restart('ceph-mon') else: service_restart('ceph-mon-all') except: raise finally: os.unlink(keyring)
def install(self, source, dest=None, checksum=None, hash_type='sha1'): """ Download and install an archive file, with optional checksum validation. The checksum can also be given on the `source` URL's fragment. For example:: handler.install('http://example.com/file.tgz#sha1=deadbeef') :param str source: URL pointing to an archive file. :param str dest: Local destination path to install to. If not given, installs to `$CHARM_DIR/archives/archive_file_name`. :param str checksum: If given, validate the archive file after download. :param str hash_type: Algorithm used to generate `checksum`. Can be any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) except URLError as e: raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) options = parse_qs(url_parts.fragment) for key, value in options.items(): if not six.PY3: algorithms = hashlib.algorithms else: algorithms = hashlib.algorithms_available if key in algorithms: if len(value) != 1: raise TypeError("Expected 1 hash value, not %d" % len(value)) expected = value[0] check_hash(dld_file, expected, key) if checksum: check_hash(dld_file, checksum, hash_type) return extract(dld_file, dest)
def register_configs(): # Register config files with their respective contexts. # Regstration of some configs may not be required depending on # existing of certain relations. release = os_release('glance-common') cmp_release = CompareOpenStackReleases(release) configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, openstack_release=release) confs = [GLANCE_REGISTRY_CONF, GLANCE_API_CONF, HAPROXY_CONF] if relation_ids('ceph'): mkdir(os.path.dirname(ceph_config_file())) mkdir(os.path.dirname(CEPH_CONF)) # Install ceph config as an alternative for co-location with # ceph and ceph-osd charms - glance ceph.conf will be # lower priority that both of these but thats OK if not os.path.exists(ceph_config_file()): # touch file for pre-templated generation open(ceph_config_file(), 'w').close() install_alternative(os.path.basename(CEPH_CONF), CEPH_CONF, ceph_config_file()) confs.append(ceph_config_file()) for conf in confs: configs.register(conf, CONFIG_FILES[conf]['hook_contexts']) if os.path.exists('/etc/apache2/conf-available'): configs.register(HTTPS_APACHE_24_CONF, CONFIG_FILES[HTTPS_APACHE_24_CONF]['hook_contexts']) else: configs.register(HTTPS_APACHE_CONF, CONFIG_FILES[HTTPS_APACHE_CONF]['hook_contexts']) if enable_memcache(release=release): configs.register(MEMCACHED_CONF, [context.MemcacheContext()]) if cmp_release >= 'mitaka': configs.register(GLANCE_SWIFT_CONF, CONFIG_FILES[GLANCE_SWIFT_CONF]['hook_contexts']) return configs
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() openstack_origin = config('openstack-origin') configure_installation_source(openstack_origin) neutron_plugin = config('neutron-plugin') additional_install_locations(neutron_plugin, openstack_origin) add_source(config('extra-source'), config('extra-key')) status_set('maintenance', 'Installing apt packages') apt_update(fatal=True) packages = determine_packages(openstack_origin) apt_install(packages, fatal=True) [open_port(port) for port in determine_ports()] if neutron_plugin == 'midonet': mkdir('/etc/neutron/plugins/midonet', owner='neutron', group='neutron', perms=0o755, force=False)
def install(): if config.get("install-sources"): configure_sources(update=True, sources_var="install-sources", keys_var="install-keys") # install packages apt_install(PACKAGES, fatal=True) install_url = config["install-url"] if install_url: # install opendaylight from tarball # this extracts the archive too install_remote(install_url, dest="/opt") # The extracted dirname. Look at what's on disk instead of mangling, so # the distribution tar.gz's name doesn't matter. install_dir_name = [ f for f in os.listdir("/opt") if f.startswith("distribution-karaf")][0] if not os.path.exists("/opt/opendaylight-karaf"): os.symlink(install_dir_name, "/opt/opendaylight-karaf") else: apt_install([KARAF_PACKAGE], fatal=True) install_dir_name = "opendaylight-karaf" if init_is_systemd(): shutil.copy("files/odl-controller.service", "/lib/systemd/system") service('enable', 'odl-controller') else: shutil.copy("files/odl-controller.conf", "/etc/init") adduser("opendaylight", system_user=True) mkdir("/home/opendaylight", owner="opendaylight", group="opendaylight", perms=0755) check_call( ["chown", "-R", "opendaylight:opendaylight", os.path.join("/opt", install_dir_name)]) mkdir("/var/log/opendaylight", owner="opendaylight", group="opendaylight", perms=0755) # install features write_mvn_config() service_start("odl-controller")
def superuser_credentials(): '''Return (username, password) to connect to the Cassandra superuser. The credentials are persisted in the root user's cqlshrc file, making them easily accessible to the command line tools. ''' cqlshrc_path = get_cqlshrc_path() cqlshrc = configparser.ConfigParser(interpolation=None) cqlshrc.read([cqlshrc_path]) try: section = cqlshrc['authentication'] return section['username'], section['password'] except KeyError: hookenv.log( 'Generating superuser credentials into {}'.format(cqlshrc_path)) config = hookenv.config() username = superuser_username() password = host.pwgen() hookenv.log('Generated username {}'.format(username)) # We set items separately, rather than together, so that we have a # defined order for the ConfigParser to preserve and the tests to # rely on. cqlshrc.setdefault('authentication', {}) cqlshrc['authentication']['username'] = username cqlshrc['authentication']['password'] = password cqlshrc.setdefault('connection', {}) cqlshrc['connection']['hostname'] = hookenv.unit_public_ip() if get_cassandra_version().startswith('2.0'): cqlshrc['connection']['port'] = str(config['rpc_port']) else: cqlshrc['connection']['port'] = str(config['native_transport_port']) ini = io.StringIO() cqlshrc.write(ini) host.mkdir(os.path.dirname(cqlshrc_path), perms=0o700) host.write_file(cqlshrc_path, ini.getvalue().encode('UTF-8'), perms=0o400) return username, password
def setup_images_folder(): status_set("maintenance", "setting up VM images folder") mkdir('/opt/VNF', owner='openvim', group='openvim', perms=0o775, force=False) symlink('/var/lib/libvirt/images', '/opt/VNF/images') chownr('/opt/VNF', owner='openvim', group='openvim', follow_links=False, chowntopdir=True) chownr('/var/lib/libvirt/images', owner='root', group='openvim', follow_links=False, chowntopdir=True) chmod('/var/lib/libvirt/images', 0o775)
def git_pre_install(): """Perform pre-install setup.""" dirs = [ '/etc/neutron', '/etc/neutron/rootwrap.d', '/etc/neutron/plugins', '/etc/nova', '/var/lib/neutron', '/var/lib/neutron/lock', '/var/log/neutron', ] logs = [ '/var/log/neutron/bigswitch-agent.log', '/var/log/neutron/dhcp-agent.log', '/var/log/neutron/l3-agent.log', '/var/log/neutron/lbaas-agent.log', '/var/log/neutron/ibm-agent.log', '/var/log/neutron/linuxbridge-agent.log', '/var/log/neutron/metadata-agent.log', '/var/log/neutron/metering_agent.log', '/var/log/neutron/mlnx-agent.log', '/var/log/neutron/nec-agent.log', '/var/log/neutron/nvsd-agent.log', '/var/log/neutron/openflow-agent.log', '/var/log/neutron/openvswitch-agent.log', '/var/log/neutron/ovs-cleanup.log', '/var/log/neutron/ryu-agent.log', '/var/log/neutron/server.log', '/var/log/neutron/sriov-agent.log', '/var/log/neutron/vpn_agent.log', ] adduser('neutron', shell='/bin/bash', system_user=True) add_group('neutron', system_group=True) add_user_to_group('neutron', 'neutron') for d in dirs: mkdir(d, owner='neutron', group='neutron', perms=0755, force=False) for l in logs: write_file(l, '', owner='neutron', group='neutron', perms=0644)
def get_rabbit_password_on_disk(username, password=None, local=False): ''' Retrieve, generate or store a rabbit password for the provided username on disk''' if local: _passwd_file = _local_named_passwd.format(service_name(), username) else: _passwd_file = _named_passwd.format(service_name(), username) _password = None if os.path.exists(_passwd_file): with open(_passwd_file, 'r') as passwd: _password = passwd.read().strip() else: mkdir(os.path.dirname(_passwd_file), owner=RABBIT_USER, group=RABBIT_USER, perms=0o775) os.chmod(os.path.dirname(_passwd_file), 0o775) _password = password or pwgen(length=64) write_file(_passwd_file, _password, owner=RABBIT_USER, group=RABBIT_USER, perms=0o660) return _password
def git_pre_install(): """Perform pre-install setup.""" dirs = [ '/var/lib/neutron', '/var/lib/neutron/lock', '/var/log/neutron', ] logs = [ '/var/log/neutron/server.log', ] adduser('neutron', shell='/bin/bash', system_user=True) add_group('neutron', system_group=True) add_user_to_group('neutron', 'neutron') for d in dirs: mkdir(d, owner='neutron', group='neutron', perms=0755, force=False) for l in logs: write_file(l, '', owner='neutron', group='neutron', perms=0600)
def git_pre_install(): """Perform glance pre-install setup.""" dirs = [ '/var/lib/astara', '/var/log/astara', '/etc/astara', ] logs = [ '/var/log/astara/astara-orchestrator.log', ] adduser('astara', shell='/bin/bash', system_user=True) add_group('astara', system_group=True) add_user_to_group('astara', 'astara') for d in dirs: mkdir(d, owner='astara', group='astara', perms=0755, force=False) for l in logs: write_file(l, '', owner='astara', group='astara', perms=0600)
def install(): status_set('maintenance', 'Installing PDI') adduser('etl') mkdir('/home/etl') chownr('/home/etl', 'etl', 'etl', chowntopdir=True) os.chmod('/home/etl', 0o755) #au = ArchiveUrlFetchHandler() #au.install(hookenv.config()['pdi_url'], '/opt/') pdiarchive = hookenv.resource_get('pdi-archive') tar = tarfile.open(pdiarchive) tar.extractall("/opt/") chownr('/opt/data-integration', 'etl', 'etl', chowntopdir=True) st = os.stat('/opt/data-integration/spoon.sh') os.chmod('/opt/data-integration/spoon.sh', st.st_mode | stat.S_IEXEC) os.chmod('/opt/data-integration/carte.sh', st.st_mode | stat.S_IEXEC) os.chmod('/opt/data-integration/encr.sh', st.st_mode | stat.S_IEXEC) os.chmod('/opt/data-integration/kitchen.sh', st.st_mode | stat.S_IEXEC) os.chmod('/opt/data-integration/pan.sh', st.st_mode | stat.S_IEXEC) status_set('maintenance', 'PDI Installed') set_state('pdi.installed')