def test_adds_a_user_to_a_group(self, log, check_call): username = '******' group = 'bar' host.add_user_to_group(username, group) check_call.assert_called_with(['gpasswd', '-a', username, group])
def git_pre_install(): """Perform glance pre-install setup.""" dirs = [ '/var/lib/glance', '/var/lib/glance/images', '/var/lib/glance/image-cache', '/var/lib/glance/image-cache/incomplete', '/var/lib/glance/image-cache/invalid', '/var/lib/glance/image-cache/queue', '/var/log/glance', ] logs = [ '/var/log/glance/glance-api.log', '/var/log/glance/glance-registry.log', ] adduser('glance', shell='/bin/bash', system_user=True) add_group('glance', system_group=True) add_user_to_group('glance', 'glance') for d in dirs: mkdir(d, owner='glance', group='glance', perms=0755, force=False) for l in logs: write_file(l, '', owner='glance', group='glance', perms=0600)
def git_pre_install(): """Perform pre-install setup.""" dirs = [ '/var/lib/nova', '/var/lib/nova/buckets', '/var/lib/nova/CA', '/var/lib/nova/CA/INTER', '/var/lib/nova/CA/newcerts', '/var/lib/nova/CA/private', '/var/lib/nova/CA/reqs', '/var/lib/nova/images', '/var/lib/nova/instances', '/var/lib/nova/keys', '/var/lib/nova/networks', '/var/lib/nova/tmp', '/var/lib/neutron', '/var/lib/neutron/lock', '/var/log/nova', '/etc/neutron', '/etc/neutron/plugins', '/etc/neutron/plugins/ml2', ] adduser('nova', shell='/bin/bash', system_user=True) subprocess.check_call(['usermod', '--home', '/var/lib/nova', 'nova']) add_group('nova', system_group=True) add_user_to_group('nova', 'nova') adduser('neutron', shell='/bin/bash', system_user=True) add_group('neutron', system_group=True) add_user_to_group('neutron', 'neutron') for d in dirs: mkdir(d, owner='nova', group='nova', perms=0755, force=False)
def ensure_user(): adduser(CI_USER) add_user_to_group(CI_USER, CI_GROUP) home = os.path.join('/home', CI_USER) if not os.path.isdir(home): os.mkdir(home) subprocess.check_call(['chown', '-R', '%s:%s' % (CI_USER, CI_GROUP), home])
def install(): if is_state('oostore.available'): return host.adduser('oostore', system_user=True) host.add_group('oostore', system_group=True) host.add_user_to_group('oostore', 'oostore') install_workload() set_state('oostore.available')
def ensure_user(): adduser(CI_USER) add_user_to_group(CI_USER, CI_GROUP) home = os.path.join('/home', CI_USER) if not os.path.isdir(home): os.mkdir(home) subprocess.check_call( ['chown', '-R', '%s:%s' % (CI_USER, CI_GROUP), home])
def install(self): """Perform the normal charm install, and then kick off setting up the barbican_token in the softhsm2 token store. """ super(BarbicanSoftHSMCharm, self).install() # now add the barbican user to the softhsm group so that the # barbican-worker can access the softhsm2.conf file. ch_core_host.add_user_to_group('barbican', 'softhsm') self.setup_token_store() hookenv.status_set('waiting', 'Charm installed and token store configured')
def lxd_relation_changed(): user = relation_get('user') if user: add_user_to_group(user, 'lxd') for rid in relation_ids('lxd'): relation_set(relation_id=rid, nonce=uuid.uuid4()) # Re-fire lxd-migration relation to ensure that # remotes have been setup for the user for rid in relation_ids('lxd-migration'): for unit in related_units(rid): lxd_migration_relation_changed(rid, unit)
def install(self): """Perform the normal charm install, and then kick off setting up the barbican_token in the softhsm2 token store. """ super(BarbicanSoftHSMCharm, self).install() # now add the barbican user to the softhsm group so that the # barbican-worker can access the softhsm2.conf file. ch_core_host.add_user_to_group('barbican', 'softhsm') self.setup_token_store() hookenv.status_set( 'waiting', 'Charm installed and token store configured')
def add_user(): """ Adding passwordless sudo access to nova user and adding to required groups """ try: add_group(DMAPI_GRP, system_group=True) adduser(DMAPI_USR, password=None, shell='/bin/bash', system_user=True) add_user_to_group(DMAPI_USR, DMAPI_GRP) except Exception as e: log("Failed while adding user with msg: {}".format(e)) return False return True
def git_pre_install(): """Perform cinder pre-install setup.""" dirs = [{ 'path': '/etc/tgt', 'owner': 'cinder', 'group': 'cinder', 'perms': 0750, }, { 'path': '/var/lib/cinder', 'owner': 'cinder', 'group': 'cinder', 'perms': 0755, }, { 'path': '/var/lib/cinder/volumes', 'owner': 'cinder', 'group': 'cinder', 'perms': 0750, }, { 'path': '/var/lock/cinder', 'owner': 'cinder', 'group': 'root', 'perms': 0750, }, { 'path': '/var/log/cinder', 'owner': 'cinder', 'group': 'cinder', 'perms': 0750, }] logs = [ '/var/log/cinder/cinder-api.log', '/var/log/cinder/cinder-backup.log', '/var/log/cinder/cinder-scheduler.log', '/var/log/cinder/cinder-volume.log', ] adduser('cinder', shell='/bin/bash', system_user=True) add_group('cinder', system_group=True) add_user_to_group('cinder', 'cinder') for d in dirs: mkdir(d['path'], owner=d['owner'], group=d['group'], perms=d['perms'], force=False) for l in logs: write_file(l, '', owner='cinder', group='cinder', perms=0600)
def hugepage_support(user, group='hugetlb', nr_hugepages=256, max_map_count=65536, mnt_point='/run/hugepages/kvm', pagesize='2MB', mount=True, set_shmmax=False): """Enable hugepages on system. Args: user (str) -- Username to allow access to hugepages to group (str) -- Group name to own hugepages nr_hugepages (int) -- Number of pages to reserve max_map_count (int) -- Number of Virtual Memory Areas a process can own mnt_point (str) -- Directory to mount hugepages on pagesize (str) -- Size of hugepages mount (bool) -- Whether to Mount hugepages """ group_info = add_group(group) gid = group_info.gr_gid add_user_to_group(user, group) if max_map_count < 2 * nr_hugepages: max_map_count = 2 * nr_hugepages sysctl_settings = { 'vm.nr_hugepages': nr_hugepages, 'vm.max_map_count': max_map_count, 'vm.hugetlb_shm_group': gid, } if set_shmmax: shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages if shmmax_minsize > shmmax_current: sysctl_settings['kernel.shmmax'] = shmmax_minsize sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) lfstab = fstab.Fstab() fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) if fstab_entry: lfstab.remove_entry(fstab_entry) entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) lfstab.add_entry(entry) if mount: fstab_mount(mnt_point)
def git_pre_install(): """Perform cinder pre-install setup.""" dirs = [{'path': '/etc/tgt', 'owner': 'cinder', 'group': 'cinder', 'perms': 0750, }, {'path': '/var/lib/cinder', 'owner': 'cinder', 'group': 'cinder', 'perms': 0755, }, {'path': '/var/lib/cinder/volumes', 'owner': 'cinder', 'group': 'cinder', 'perms': 0750, }, {'path': '/var/lock/cinder', 'owner': 'cinder', 'group': 'root', 'perms': 0750, }, {'path': '/var/log/cinder', 'owner': 'cinder', 'group': 'cinder', 'perms': 0750, }] logs = [ '/var/log/cinder/cinder-api.log', '/var/log/cinder/cinder-backup.log', '/var/log/cinder/cinder-scheduler.log', '/var/log/cinder/cinder-volume.log', ] adduser('cinder', shell='/bin/bash', system_user=True) add_group('cinder', system_group=True) add_user_to_group('cinder', 'cinder') for d in dirs: mkdir(d['path'], owner=d['owner'], group=d['group'], perms=d['perms'], force=False) for l in logs: write_file(l, '', owner='cinder', group='cinder', perms=0600)
def add_users(): """ Adding passwordless sudo access to nova user and adding to required groups """ usr = config('tvault-datamover-ext-usr') source = '/usr/lib' destination = '/usr/lib64' try: # Adding nova user to system groups add_user_to_group(usr, 'kvm') add_user_to_group(usr, 'disk') # create symlink /usr/lib64/ symlink(source, destination) except Exception as e: log("Failed while adding user with msg: {}".format(e)) return False return True
def git_pre_install(): """Perform pre-install setup.""" dirs = [ '/etc/neutron', '/etc/neutron/rootwrap.d', '/etc/neutron/plugins', '/etc/nova', '/var/lib/neutron', '/var/lib/neutron/lock', '/var/log/neutron', ] logs = [ '/var/log/neutron/bigswitch-agent.log', '/var/log/neutron/dhcp-agent.log', '/var/log/neutron/l3-agent.log', '/var/log/neutron/lbaas-agent.log', '/var/log/neutron/ibm-agent.log', '/var/log/neutron/linuxbridge-agent.log', '/var/log/neutron/metadata-agent.log', '/var/log/neutron/metering_agent.log', '/var/log/neutron/mlnx-agent.log', '/var/log/neutron/nec-agent.log', '/var/log/neutron/nvsd-agent.log', '/var/log/neutron/openflow-agent.log', '/var/log/neutron/openvswitch-agent.log', '/var/log/neutron/ovs-cleanup.log', '/var/log/neutron/ryu-agent.log', '/var/log/neutron/server.log', '/var/log/neutron/sriov-agent.log', '/var/log/neutron/vpn_agent.log', ] adduser('neutron', shell='/bin/bash', system_user=True) add_group('neutron', system_group=True) add_user_to_group('neutron', 'neutron') for d in dirs: mkdir(d, owner='neutron', group='neutron', perms=0755, force=False) for l in logs: write_file(l, '', owner='neutron', group='neutron', perms=0644)
def configure_lxd_source(user='******'): '''Add required configuration and files when deploying LXD from source''' log('Configuring LXD Source') home = pwd.getpwnam(user).pw_dir GOPATH = os.path.join(home, 'go') templates_dir = 'templates' render('lxd_upstart', '/etc/init/lxd.conf', {}, perms=0o644, templates_dir=templates_dir) render('lxd_service', '/lib/systemd/system/lxd.service', {}, perms=0o644, templates_dir=templates_dir) add_group('lxd', system_group=True) add_user_to_group(user, 'lxd') service_stop('lxd') files = glob.glob('%s/bin/*' % GOPATH) for i in files: cmd = ['cp', i, '/usr/bin'] check_call(cmd) service_start('lxd')
def setup_lxd(self): """Set up custom LXD executor scripts.""" templating.render( "base.j2", self.executor_dir + "/base.sh", context="", owner=self.gitlab_user, group=self.gitlab_user, perms=0o775, ) templating.render( "prepare.j2", self.executor_dir + "/prepare.sh", context="", owner=self.gitlab_user, group=self.gitlab_user, perms=0o775, ) templating.render( "run.j2", self.executor_dir + "/run.sh", context="", owner=self.gitlab_user, group=self.gitlab_user, perms=0o775, ) templating.render( "cleanup.j2", self.executor_dir + "/cleanup.sh", context="", owner=self.gitlab_user, group=self.gitlab_user, perms=0o775, ) add_user_to_group(self.gitlab_user, "lxd") command = [ "lxd", "init", "--auto", ] subprocess.check_call(command, stderr=subprocess.STDOUT)
def git_pre_install(): """Perform glance pre-install setup.""" dirs = [ '/var/lib/astara', '/var/log/astara', '/etc/astara', ] logs = [ '/var/log/astara/astara-orchestrator.log', ] adduser('astara', shell='/bin/bash', system_user=True) add_group('astara', system_group=True) add_user_to_group('astara', 'astara') for d in dirs: mkdir(d, owner='astara', group='astara', perms=0755, force=False) for l in logs: write_file(l, '', owner='astara', group='astara', perms=0600)
def git_pre_install(): """Perform pre-install setup.""" dirs = [ '/var/lib/neutron', '/var/lib/neutron/lock', '/var/log/neutron', ] logs = [ '/var/log/neutron/server.log', ] adduser('neutron', shell='/bin/bash', system_user=True) add_group('neutron', system_group=True) add_user_to_group('neutron', 'neutron') for d in dirs: mkdir(d, owner='neutron', group='neutron', perms=0755, force=False) for l in logs: write_file(l, '', owner='neutron', group='neutron', perms=0600)
def git_pre_install(): """Perform horizon pre-install setup.""" dirs = [ '/etc/openstack-dashboard', '/usr/share/openstack-dashboard', '/usr/share/openstack-dashboard/bin/less', '/usr/share/openstack-dashboard-ubuntu-theme/static/ubuntu/css', '/usr/share/openstack-dashboard-ubuntu-theme/static/ubuntu/img', '/usr/share/openstack-dashboard-ubuntu-theme/templates', '/var/lib/openstack-dashboard', ] adduser('horizon', shell='/bin/bash', system_user=True) subprocess.check_call(['usermod', '--home', '/usr/share/openstack-dashboard/', 'horizon']) add_group('horizon', system_group=True) add_user_to_group('horizon', 'horizon') for d in dirs: if d is '/var/lib/openstack-dashboard': mkdir(d, owner='horizon', group='horizon', perms=0700, force=False) else: mkdir(d, owner='root', group='root', perms=0755, force=False)
def git_pre_install(): """Perform glance pre-install setup.""" dirs = [ "/var/lib/glance", "/var/lib/glance/images", "/var/lib/glance/image-cache", "/var/lib/glance/image-cache/incomplete", "/var/lib/glance/image-cache/invalid", "/var/lib/glance/image-cache/queue", "/var/log/glance", ] logs = ["/var/log/glance/glance-api.log", "/var/log/glance/glance-registry.log"] adduser("glance", shell="/bin/bash", system_user=True) add_group("glance", system_group=True) add_user_to_group("glance", "glance") for d in dirs: mkdir(d, owner="glance", group="glance", perms=0755, force=False) for l in logs: write_file(l, "", owner="glance", group="glance", perms=0600)
def git_pre_install(): """Perform horizon pre-install setup.""" dirs = [ '/etc/openstack-dashboard', '/usr/share/openstack-dashboard', '/usr/share/openstack-dashboard/bin/less', '/usr/share/openstack-dashboard-ubuntu-theme/static/ubuntu/css', '/usr/share/openstack-dashboard-ubuntu-theme/static/ubuntu/img', '/usr/share/openstack-dashboard-ubuntu-theme/templates', '/var/lib/openstack-dashboard', ] adduser('horizon', shell='/bin/bash', system_user=True) subprocess.check_call( ['usermod', '--home', '/usr/share/openstack-dashboard/', 'horizon']) add_group('horizon', system_group=True) add_user_to_group('horizon', 'horizon') for d in dirs: if d is '/var/lib/openstack-dashboard': mkdir(d, owner='horizon', group='horizon', perms=0700, force=False) else: mkdir(d, owner='root', group='root', perms=0755, force=False)
def add_users(): """ Adding passwordless sudo access to nova user and adding to required groups """ usr = config('tvault-datamover-ext-usr') path = '/etc/sudoers.d/tvault-nova' source = '/usr/lib' destination = '/usr/lib64' content = '{} ALL=(ALL) NOPASSWD: ALL'.format(usr) try: write_file(path, content, owner='root', group='root', perms=501) # Adding nova user to system groups add_user_to_group(usr, 'kvm') add_user_to_group(usr, 'disk') # create symlink /usr/lib64/ symlink(source, destination) except Exception as e: log("Failed while adding user with msg: {}".format(e)) return False return True
def git_pre_install(): """Perform pre-install setup.""" dirs = [ '/var/lib/nova', '/var/lib/nova/buckets', '/var/lib/nova/CA', '/var/lib/nova/CA/INTER', '/var/lib/nova/CA/newcerts', '/var/lib/nova/CA/private', '/var/lib/nova/CA/reqs', '/var/lib/nova/images', '/var/lib/nova/instances', '/var/lib/nova/keys', '/var/lib/nova/networks', '/var/lib/nova/tmp', '/var/log/nova', ] logs = [ '/var/log/nova/nova-api.log', '/var/log/nova/nova-compute.log', '/var/log/nova/nova-manage.log', '/var/log/nova/nova-network.log', ] adduser('nova', shell='/bin/bash', system_user=True) check_call(['usermod', '--home', '/var/lib/nova', 'nova']) add_group('nova', system_group=True) add_user_to_group('nova', 'nova') add_user_to_group('nova', 'libvirtd') for d in dirs: mkdir(d, owner='nova', group='nova', perms=0755, force=False) for l in logs: write_file(l, '', owner='nova', group='nova', perms=0644)
def ensure_user(user, group=None): adduser(user) if group: add_user_to_group(user, group)
def install_docker(self): """Install Docker which is required for running jobs.""" apt_install("docker.io") add_user_to_group(self.gitlab_user, "docker") service("enable", "docker") service("start", "docker")
def install_layer_samba(): sys.path.append(os.path.realpath('..')) # Do your setup here. # # If your charm has other dependencies before it can install, # add those as @when() clauses above., or as additional @when() # decorated handlers below # # See the following for information about reactive charms: # # * https://jujucharms.com/docs/devel/developer-getting-started # * https://github.com/juju-solutions/layer-basic#overview # config = hookenv.config() password = config['password'] server_name = config['server_name'] online = config['online'] hookenv.status_set('maintenance', 'Updating apt') apt.update() hookenv.status_set('maintenance', 'Installing packages') apt.queue_install(['samba']) apt.install_queued() #os.system('git clone https://github.com/bdrung/ionit.git') #os.system('python3 ionit/setup.py install') hookenv.status_set('maintenance', 'Configuring') host.add_group('juju-samba-ubuntu') host.adduser('ubuntu', password) host.add_user_to_group('ubuntu', 'juju-samba-ubuntu') cmd = ("sudo echo -e \"" + password + "\n" + password + "\" | smbpasswd -s -a ubuntu") os.system(cmd) if not os.path.exists('/opt/samba/share'): os.makedirs('/opt/samba/share') host.chownr('/opt/samba/share', 'ubuntu', 'juju-samba-ubuntu', True, True) if not os.path.exists('/etc/samba/smb.conf'): os.makedirs('/etc/samba') shutil.copy('opt/smb.conf', '/etc/samba/smb.conf') render(source='smb', target='/etc/samba/smb.conf', context={ "cfg": config, }, owner='root', perms=0o740) restartSamba() set_flag('layer-samba.installed') if (not online): stopSamba() hookenv.status_set('active', 'Stopped') hookenv.status_set('active', 'Started')
def add_user_group(): host.adduser(REDDIT_USER) host.add_user_to_group(REDDIT_USER, REDDIT_GROUP) host.chownr(REDDIT_HOME, REDDIT_USER, REDDIT_GROUP)
def ensure_user(user, group=None): adduser(user, pwgen()) if group: add_user_to_group(user, group) # Remove password expiry (Bug #1686085) remove_password_expiry(user)
def group_openvim_user(): status_set("maintenance", "Adding OpenVIM user to groups") add_user_to_group('openvim', 'libvirtd') add_user_to_group('openvim', 'sudo')
def setup_lxd(): hookenv.status_set('maintenance', 'configuring lxd') host.add_user_to_group('ubuntu', 'lxd') jujushell.setup_lxd()
def install_etcd(): ''' Attempt resource get on the "etcd" and "etcdctl" resources. If no resources are provided attempt to install from the archive only on the 16.04 (xenial) series. ''' status_set('maintenance', 'Installing etcd.') codename = host.lsb_release()['DISTRIB_CODENAME'] try: etcd_path = resource_get('etcd') etcdctl_path = resource_get('etcdctl') # Not obvious but this blocks juju 1.25 clients except NotImplementedError: status_set( 'blocked', 'This charm requires the resource feature available in juju 2+' ) # noqa return if not etcd_path or not etcdctl_path: if codename == 'xenial': # edge case where archive allows us a nice fallback on xenial status_set('maintenance', 'Attempting install of etcd from apt') pkg_list = ['etcd'] apt_update() apt_install(pkg_list, fatal=True) # Stop the service and remove the defaults # I hate that I have to do this. Sorry short-lived local data #RIP # State control is to prevent upgrade-charm from nuking cluster # data. if not is_state('etcd.package.adjusted'): host.service('stop', 'etcd') if os.path.exists('/var/lib/etcd/default'): shutil.rmtree('/var/lib/etcd/default') set_state('etcd.package.adjusted') set_state('etcd.installed') return else: # edge case status_set('blocked', 'Missing Resource: see README') else: install(etcd_path, '/usr/bin/etcd') install(etcdctl_path, '/usr/bin/etcdctl') host.add_group('etcd') if not host.user_exists('etcd'): host.adduser('etcd') host.add_user_to_group('etcd', 'etcd') os.makedirs('/var/lib/etcd/', exist_ok=True) etcd_uid = getpwnam('etcd').pw_uid os.chmod('/var/lib/etcd/', 0o775) os.chown('/var/lib/etcd/', etcd_uid, -1) # Trusty was the EOL for upstart, render its template if required if codename == 'trusty': render('upstart', '/etc/init/etcd.conf', {}, owner='root', group='root') set_state('etcd.installed') return if not os.path.exists('/etc/systemd/system/etcd.service'): render('systemd', '/etc/systemd/system/etcd.service', {}, owner='root', group='root') # This will cause some greif if its been run before # so allow it to be chatty and fail if we ever re-render # and attempt re-enablement. try: check_call(split('systemctl enable etcd')) except CalledProcessError: pass set_state('etcd.installed')
def group_openvim_user(): status_set("maintenance", "adding openvim user to groups") add_user_to_group('openvim', 'libvirtd') add_user_to_group('openvim', 'sudo')
def add_user_to_libvirt_group(): status_set("maintenance", "adding user to libvirtd group") add_user_to_group("ubuntu", "libvirtd")