def backup_delete(name): """ Delete a backup Keyword arguments: name -- Name of the local backup archive """ hook_callback('pre_backup_delete', args=[name]) archive_file = '%s/%s.tar.gz' % (archives_path, name) info_file = "%s/%s.info.json" % (archives_path, name) for backup_file in [archive_file,info_file]: if not os.path.isfile(backup_file): raise MoulinetteError(errno.EIO, m18n.n('backup_archive_name_unknown', name=backup_file)) try: os.remove(backup_file) except: logger.debug("unable to delete '%s'", backup_file, exc_info=1) raise MoulinetteError(errno.EIO, m18n.n('backup_delete_error', path=backup_file)) hook_callback('post_backup_delete', args=[name]) logger.success(m18n.n('backup_deleted'))
def user_delete(auth, username, purge=False): """ Delete user Keyword argument: username -- Username to delete purge """ from yunohost.app import app_ssowatconf from yunohost.hook import hook_callback if auth.remove('uid=%s,ou=users' % username): # Invalidate passwd to take user deletion into account subprocess.call(['nscd', '-i', 'passwd']) # Update SFTP user group memberlist = auth.search(filter='cn=sftpusers', attrs=['memberUid'])[0]['memberUid'] try: memberlist.remove(username) except: pass if auth.update('cn=sftpusers,ou=groups', { 'memberUid': memberlist }): if purge: subprocess.call(['rm', '-rf', '/home/{0}'.format(username)]) else: raise MoulinetteError(169, m18n.n('user_deletion_failed')) app_ssowatconf(auth) hook_callback('post_user_delete', args=[username, purge]) logger.success(m18n.n('user_deleted'))
def app_clearaccess(auth, apps): """ Reset access rights for the app Keyword argument: apps """ from yunohost.hook import hook_callback if not isinstance(apps, list): apps = [apps] for app in apps: if not _is_installed(app): raise MoulinetteError(errno.EINVAL, m18n.n('app_not_installed', app)) with open(apps_setting_path + app +'/settings.yml') as f: app_settings = yaml.load(f) if 'mode' in app_settings: app_setting(app, 'mode', delete=True) if 'allowed_users' in app_settings: app_setting(app, 'allowed_users', delete=True) hook_callback('post_app_clearaccess', args=[app]) app_ssowatconf(auth)
def firewall_reload(): """ Reload all firewall rules """ from yunohost.hook import hook_callback firewall = firewall_list(raw=True) upnp = firewall['uPnP']['enabled'] # IPv4 if os.system("iptables -P INPUT ACCEPT") != 0: raise MoulinetteError(errno.ESRCH, m18n.n('iptables_unavailable')) if upnp: firewall_upnp(action=['reload']) os.system("iptables -F") os.system("iptables -X") os.system("iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT") if ssh_port not in firewall['ipv4']['TCP']: firewall_allow(ssh_port) # Loop for protocol in ['TCP', 'UDP']: for port in firewall['ipv4'][protocol]: os.system("iptables -A INPUT -p %s --dport %d -j ACCEPT" % (protocol, port)) hook_callback('post_iptable_rules', [upnp, os.path.exists("/proc/net/if_inet6")]) os.system("iptables -A INPUT -i lo -j ACCEPT") os.system("iptables -A INPUT -p icmp -j ACCEPT") os.system("iptables -P INPUT DROP") # IPv6 if os.path.exists("/proc/net/if_inet6"): os.system("ip6tables -P INPUT ACCEPT") os.system("ip6tables -F") os.system("ip6tables -X") os.system("ip6tables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT") if ssh_port not in firewall['ipv6']['TCP']: firewall_allow(ssh_port, ipv6=True) # Loop v6 for protocol in ['TCP', 'UDP']: for port in firewall['ipv6'][protocol]: os.system("ip6tables -A INPUT -p %s --dport %d -j ACCEPT" % (protocol, port)) os.system("ip6tables -A INPUT -i lo -j ACCEPT") os.system("ip6tables -A INPUT -p icmpv6 -j ACCEPT") os.system("ip6tables -P INPUT DROP") os.system("service fail2ban restart") msignals.display(m18n.n('firewall_reloaded'), 'success') return firewall_list()
def app_addaccess(auth, apps, users): """ Grant access right to users (everyone by default) Keyword argument: users apps """ from yunohost.user import user_list, user_info from yunohost.hook import hook_callback if not users: users = [] for user in user_list(auth)['users']: users.append(user['username']) if not isinstance(users, list): users = [users] if not isinstance(apps, list): apps = [apps] for app in apps: if not _is_installed(app): raise MoulinetteError(errno.EINVAL, m18n.n('app_not_installed', app)) with open(apps_setting_path + app +'/settings.yml') as f: app_settings = yaml.load(f) if 'mode' not in app_settings: app_setting(app, 'mode', 'private') app_settings['mode'] = 'private' if app_settings['mode'] == 'private': if 'allowed_users' in app_settings: new_users = app_settings['allowed_users'] else: new_users = '' for allowed_user in users: if allowed_user not in new_users.split(','): try: user_info(auth, allowed_user) except MoulinetteError: continue if new_users == '': new_users = allowed_user else: new_users = new_users +','+ allowed_user app_setting(app, 'allowed_users', new_users.strip()) hook_callback('post_app_addaccess', args=[app, new_users]) app_ssowatconf(auth) return { 'allowed_users': new_users.split(',') }
def app_removeaccess(auth, apps, users): """ Revoke access right to users (everyone by default) Keyword argument: users apps """ from yunohost.user import user_list from yunohost.hook import hook_callback remove_all = False if not users: remove_all = True if not isinstance(users, list): users = [users] if not isinstance(apps, list): apps = [apps] for app in apps: new_users = '' if not _is_installed(app): raise MoulinetteError(errno.EINVAL, m18n.n('app_not_installed', app)) with open(apps_setting_path + app +'/settings.yml') as f: app_settings = yaml.load(f) if 'skipped_uris' not in app_settings or app_settings['skipped_uris'] != '/': if remove_all: new_users = '' elif 'allowed_users' in app_settings: for allowed_user in app_settings['allowed_users'].split(','): if allowed_user not in users: if new_users == '': new_users = allowed_user else: new_users = new_users +','+ allowed_user else: new_users='' for user in user_list(auth)['users']: if user['username'] not in users: if new_users == '': new_users = user['username'] new_users=new_users+','+user['username'] app_setting(app, 'allowed_users', new_users.strip()) hook_callback('post_app_removeaccess', args=[app, new_users]) app_ssowatconf(auth) return { 'allowed_users': new_users.split(',') }
def subscription_create(username, firstname, lastname, mail, password): """ Create subscription Keyword argument: firstname lastname username -- Must be unique mail -- Main mail address must be unique password """ import pwd from yunohost.domain import domain_list from yunohost.hook import hook_callback from yunohost.app import app_ssowatconf # Validate uniqueness of username in system users try: pwd.getpwnam(username) except KeyError: pass else: raise MoulinetteError(errno.EEXIST, m18n.n('system_username_exists')) # Check that the mail domain exists #~ if mail[mail.find('@')+1:] not in domain_list(auth)['domains']: #~ raise MoulinetteError(errno.EINVAL, #~ m18n.n('mail_domain_unknown', #~ mail[mail.find('@')+1:])) char_set = string.ascii_uppercase + string.digits salt = ''.join(random.sample(char_set,8)) salt = '$1$' + salt + '$' pwd = '{CRYPT}' + crypt.crypt(str(password), salt) cur = _get_db() try: cur.execute("INSERT INTO prefix_subscriptions VALUES (%s,%s,%s,%s,%s)", [username,firstname,lastname,mail,pwd]) except: raise MoulinetteError(169, m18n.n('subscription_creation_failed')) else: msignals.display(m18n.n('subscription_created'), 'success') hook_callback('post_subscription_create', [username, mail, password, firstname, lastname]) return { 'firstname' : firstname, 'lastname':lastname, 'username' : username, 'mail' : mail }
def domain_remove(auth, domain, force=False): """ Delete domains Keyword argument: domain -- Domain to delete force -- Force the domain removal """ from yunohost.hook import hook_callback if not force and domain not in domain_list(auth)['domains']: raise MoulinetteError(errno.EINVAL, m18n.n('domain_unknown')) # Check if apps are installed on the domain for app in os.listdir('/etc/yunohost/apps/'): with open('/etc/yunohost/apps/' + app +'/settings.yml') as f: try: app_domain = yaml.load(f)['domain'] except: continue else: if app_domain == domain: raise MoulinetteError(errno.EPERM, m18n.n('domain_uninstall_app_first')) if auth.remove('virtualdomain=' + domain + ',ou=domains') or force: os.system('rm -rf /etc/yunohost/certs/%s' % domain) else: raise MoulinetteError(errno.EIO, m18n.n('domain_deletion_failed')) service_regen_conf(names=['nginx', 'metronome', 'dnsmasq']) os.system('yunohost app ssowatconf > /dev/null 2>&1') hook_callback('post_domain_remove', args=[domain]) logger.success(m18n.n('domain_deleted'))
def user_create(auth, username, firstname, lastname, mail, password, mailbox_quota=0): """ Create user Keyword argument: firstname lastname username -- Must be unique mail -- Main mail address must be unique password mailbox_quota -- Mailbox size quota """ import pwd from yunohost.domain import domain_list from yunohost.hook import hook_callback from yunohost.app import app_ssowatconf # Validate uniqueness of username and mail in LDAP auth.validate_uniqueness({ 'uid' : username, 'mail' : mail }) # Validate uniqueness of username in system users try: pwd.getpwnam(username) except KeyError: pass else: raise MoulinetteError(errno.EEXIST, m18n.n('system_username_exists')) # Check that the mail domain exists if mail[mail.find('@')+1:] not in domain_list(auth)['domains']: raise MoulinetteError(errno.EINVAL, m18n.n('mail_domain_unknown', domain=mail[mail.find('@')+1:])) # Get random UID/GID uid_check = gid_check = 0 while uid_check == 0 and gid_check == 0: uid = str(random.randint(200, 99999)) uid_check = os.system("getent passwd %s" % uid) gid_check = os.system("getent group %s" % uid) # Adapt values for LDAP fullname = '%s %s' % (firstname, lastname) rdn = 'uid=%s,ou=users' % username char_set = string.ascii_uppercase + string.digits salt = ''.join(random.sample(char_set,8)) salt = '$1$' + salt + '$' user_pwd = '{CRYPT}' + crypt.crypt(str(password), salt) attr_dict = { 'objectClass' : ['mailAccount', 'inetOrgPerson', 'posixAccount'], 'givenName' : firstname, 'sn' : lastname, 'displayName' : fullname, 'cn' : fullname, 'uid' : username, 'mail' : mail, 'maildrop' : username, 'mailuserquota' : mailbox_quota, 'userPassword' : user_pwd, 'gidNumber' : uid, 'uidNumber' : uid, 'homeDirectory' : '/home/' + username, 'loginShell' : '/bin/false' } # If it is the first user, add some aliases if not auth.search(base='ou=users,dc=yunohost,dc=org', filter='uid=*'): with open('/etc/yunohost/current_host') as f: main_domain = f.readline().rstrip() aliases = [ 'root@'+ main_domain, 'admin@'+ main_domain, 'webmaster@'+ main_domain, 'postmaster@'+ main_domain, ] attr_dict['mail'] = [ attr_dict['mail'] ] + aliases # If exists, remove the redirection from the SSO try: with open('/etc/ssowat/conf.json.persistent') as json_conf: ssowat_conf = json.loads(str(json_conf.read())) if 'redirected_urls' in ssowat_conf and '/' in ssowat_conf['redirected_urls']: del ssowat_conf['redirected_urls']['/'] with open('/etc/ssowat/conf.json.persistent', 'w+') as f: json.dump(ssowat_conf, f, sort_keys=True, indent=4) except IOError: pass if auth.add(rdn, attr_dict): # Invalidate passwd to take user creation into account subprocess.call(['nscd', '-i', 'passwd']) # Update SFTP user group memberlist = auth.search(filter='cn=sftpusers', attrs=['memberUid'])[0]['memberUid'] memberlist.append(username) if auth.update('cn=sftpusers,ou=groups', { 'memberUid': memberlist }): try: # Attempt to create user home folder subprocess.check_call( ['su', '-', username, '-c', "''"]) except subprocess.CalledProcessError: if not os.path.isdir('/home/{0}'.format(username)): logger.warning(m18n.n('user_home_creation_failed'), exc_info=1) app_ssowatconf(auth) #TODO: Send a welcome mail to user logger.success(m18n.n('user_created')) hook_callback('post_user_create', args=[username, mail, password, firstname, lastname]) return { 'fullname' : fullname, 'username' : username, 'mail' : mail } raise MoulinetteError(169, m18n.n('user_creation_failed'))
def firewall_reload(skip_upnp=False): """ Reload all firewall rules Keyword arguments: skip_upnp -- Do not refresh port forwarding using UPnP """ from yunohost.hook import hook_callback from yunohost.service import _run_service_command reloaded = False errors = False # Check if SSH port is allowed ssh_port = _get_ssh_port() if ssh_port not in firewall_list()["opened_ports"]: firewall_allow("TCP", ssh_port, no_reload=True) # Retrieve firewall rules and UPnP status firewall = firewall_list(raw=True) upnp = firewall_upnp()["enabled"] if not skip_upnp else False # IPv4 try: process.check_output("iptables -w -L") except process.CalledProcessError as e: logger.debug( "iptables seems to be not available, it outputs:\n%s", prependlines(e.output.rstrip(), "> "), ) logger.warning(m18n.n("iptables_unavailable")) else: rules = [ "iptables -w -F", "iptables -w -X", "iptables -w -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", ] # Iterate over ports and add rule for protocol in ["TCP", "UDP"]: for port in firewall["ipv4"][protocol]: rules.append( "iptables -w -A INPUT -p %s --dport %s -j ACCEPT" % (protocol, process.quote(str(port)))) rules += [ "iptables -w -A INPUT -i lo -j ACCEPT", "iptables -w -A INPUT -p icmp -j ACCEPT", "iptables -w -P INPUT DROP", ] # Execute each rule if process.run_commands(rules, callback=_on_rule_command_error): errors = True reloaded = True # IPv6 try: process.check_output("ip6tables -L") except process.CalledProcessError as e: logger.debug( "ip6tables seems to be not available, it outputs:\n%s", prependlines(e.output.rstrip(), "> "), ) logger.warning(m18n.n("ip6tables_unavailable")) else: rules = [ "ip6tables -w -F", "ip6tables -w -X", "ip6tables -w -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", ] # Iterate over ports and add rule for protocol in ["TCP", "UDP"]: for port in firewall["ipv6"][protocol]: rules.append( "ip6tables -w -A INPUT -p %s --dport %s -j ACCEPT" % (protocol, process.quote(str(port)))) rules += [ "ip6tables -w -A INPUT -i lo -j ACCEPT", "ip6tables -w -A INPUT -p icmpv6 -j ACCEPT", "ip6tables -w -P INPUT DROP", ] # Execute each rule if process.run_commands(rules, callback=_on_rule_command_error): errors = True reloaded = True if not reloaded: raise YunohostError("firewall_reload_failed") hook_callback("post_iptable_rules", args=[upnp, os.path.exists("/proc/net/if_inet6")]) if upnp: # Refresh port forwarding with UPnP firewall_upnp(no_refresh=False) _run_service_command("reload", "fail2ban") if errors: logger.warning(m18n.n("firewall_rules_cmd_failed")) else: logger.success(m18n.n("firewall_reloaded")) return firewall_list()
def user_update(operation_logger, username, firstname=None, lastname=None, mail=None, change_password=None, add_mailforward=None, remove_mailforward=None, add_mailalias=None, remove_mailalias=None, mailbox_quota=None): """ Update user informations Keyword argument: lastname mail firstname add_mailalias -- Mail aliases to add remove_mailforward -- Mailforward addresses to remove username -- Username of user to update add_mailforward -- Mailforward addresses to add change_password -- New password to set remove_mailalias -- Mail aliases to remove """ from yunohost.domain import domain_list, _get_maindomain from yunohost.app import app_ssowatconf from yunohost.utils.password import assert_password_is_strong_enough from yunohost.utils.ldap import _get_ldap_interface from yunohost.hook import hook_callback domains = domain_list()['domains'] # Populate user informations ldap = _get_ldap_interface() attrs_to_fetch = ['givenName', 'sn', 'mail', 'maildrop'] result = ldap.search(base='ou=users,dc=yunohost,dc=org', filter='uid=' + username, attrs=attrs_to_fetch) if not result: raise YunohostError('user_unknown', user=username) user = result[0] env_dict = {"YNH_USER_USERNAME": username} # Get modifications from arguments new_attr_dict = {} if firstname: new_attr_dict['givenName'] = [firstname] # TODO: Validate new_attr_dict['cn'] = new_attr_dict['displayName'] = [ firstname + ' ' + user['sn'][0] ] env_dict["YNH_USER_FIRSTNAME"] = firstname if lastname: new_attr_dict['sn'] = [lastname] # TODO: Validate new_attr_dict['cn'] = new_attr_dict['displayName'] = [ user['givenName'][0] + ' ' + lastname ] env_dict["YNH_USER_LASTNAME"] = lastname if lastname and firstname: new_attr_dict['cn'] = new_attr_dict['displayName'] = [ firstname + ' ' + lastname ] # change_password is None if user_update is not called to change the password if change_password is not None: # when in the cli interface if the option to change the password is called # without a specified value, change_password will be set to the const 0. # In this case we prompt for the new password. if msettings.get('interface') == 'cli' and not change_password: change_password = msignals.prompt(m18n.n("ask_password"), True, True) # Ensure sufficiently complex password assert_password_is_strong_enough("user", change_password) new_attr_dict['userPassword'] = [_hash_user_password(change_password)] env_dict["YNH_USER_PASSWORD"] = change_password if mail: main_domain = _get_maindomain() aliases = [ 'root@' + main_domain, 'admin@' + main_domain, 'webmaster@' + main_domain, 'postmaster@' + main_domain, ] try: ldap.validate_uniqueness({'mail': mail}) except Exception as e: raise YunohostError('user_update_failed', user=username, error=e) if mail[mail.find('@') + 1:] not in domains: raise YunohostError('mail_domain_unknown', domain=mail[mail.find('@') + 1:]) if mail in aliases: raise YunohostError('mail_unavailable') del user['mail'][0] new_attr_dict['mail'] = [mail] + user['mail'] if add_mailalias: if not isinstance(add_mailalias, list): add_mailalias = [add_mailalias] for mail in add_mailalias: try: ldap.validate_uniqueness({'mail': mail}) except Exception as e: raise YunohostError('user_update_failed', user=username, error=e) if mail[mail.find('@') + 1:] not in domains: raise YunohostError('mail_domain_unknown', domain=mail[mail.find('@') + 1:]) user['mail'].append(mail) new_attr_dict['mail'] = user['mail'] if remove_mailalias: if not isinstance(remove_mailalias, list): remove_mailalias = [remove_mailalias] for mail in remove_mailalias: if len(user['mail']) > 1 and mail in user['mail'][1:]: user['mail'].remove(mail) else: raise YunohostError('mail_alias_remove_failed', mail=mail) new_attr_dict['mail'] = user['mail'] if 'mail' in new_attr_dict: env_dict["YNH_USER_MAILS"] = ','.join(new_attr_dict['mail']) if add_mailforward: if not isinstance(add_mailforward, list): add_mailforward = [add_mailforward] for mail in add_mailforward: if mail in user['maildrop'][1:]: continue user['maildrop'].append(mail) new_attr_dict['maildrop'] = user['maildrop'] if remove_mailforward: if not isinstance(remove_mailforward, list): remove_mailforward = [remove_mailforward] for mail in remove_mailforward: if len(user['maildrop']) > 1 and mail in user['maildrop'][1:]: user['maildrop'].remove(mail) else: raise YunohostError('mail_forward_remove_failed', mail=mail) new_attr_dict['maildrop'] = user['maildrop'] if 'maildrop' in new_attr_dict: env_dict["YNH_USER_MAILFORWARDS"] = ','.join(new_attr_dict['maildrop']) if mailbox_quota is not None: new_attr_dict['mailuserquota'] = [mailbox_quota] env_dict["YNH_USER_MAILQUOTA"] = mailbox_quota operation_logger.start() try: ldap.update('uid=%s,ou=users' % username, new_attr_dict) except Exception as e: raise YunohostError('user_update_failed', user=username, error=e) # Trigger post_user_update hooks hook_callback('post_user_update', env=env_dict) logger.success(m18n.n('user_updated')) app_ssowatconf() return user_info(username)
def backup_create(name=None, description=None, output_directory=None, no_compress=False, ignore_hooks=False, hooks=[], ignore_apps=False, apps=[]): """ Create a backup local archive Keyword arguments: name -- Name of the backup archive description -- Short description of the backup output_directory -- Output directory for the backup no_compress -- Do not create an archive file hooks -- List of backup hooks names to execute ignore_hooks -- Do not execute backup hooks apps -- List of application names to backup ignore_apps -- Do not backup apps """ # TODO: Add a 'clean' argument to clean output directory tmp_dir = None # Validate what to backup if ignore_hooks and ignore_apps: raise MoulinetteError(errno.EINVAL, m18n.n('backup_action_required')) # Validate and define backup name timestamp = int(time.time()) if not name: name = time.strftime('%Y%m%d-%H%M%S') if name in backup_list()['archives']: raise MoulinetteError(errno.EINVAL, m18n.n('backup_archive_name_exists')) # Validate additional arguments if no_compress and not output_directory: raise MoulinetteError(errno.EINVAL, m18n.n('backup_output_directory_required')) if output_directory: output_directory = os.path.abspath(output_directory) # Check for forbidden folders if output_directory.startswith(archives_path) or \ re.match(r'^/(|(bin|boot|dev|etc|lib|root|run|sbin|sys|usr|var)(|/.*))$', output_directory): raise MoulinetteError(errno.EINVAL, m18n.n('backup_output_directory_forbidden')) # Create the output directory if not os.path.isdir(output_directory): logger.debug("creating output directory '%s'", output_directory) os.makedirs(output_directory, 0750) # Check that output directory is empty elif no_compress and os.listdir(output_directory): raise MoulinetteError(errno.EIO, m18n.n('backup_output_directory_not_empty')) # Define temporary directory if no_compress: tmp_dir = output_directory else: output_directory = archives_path # Create temporary directory if not tmp_dir: tmp_dir = "%s/tmp/%s" % (backup_path, name) if os.path.isdir(tmp_dir): logger.debug("temporary directory for backup '%s' already exists", tmp_dir) filesystem.rm(tmp_dir, recursive=True) filesystem.mkdir(tmp_dir, 0750, parents=True, uid='admin') def _clean_tmp_dir(retcode=0): ret = hook_callback('post_backup_create', args=[tmp_dir, retcode]) if not ret['failed']: filesystem.rm(tmp_dir, True, True) else: logger.warning(m18n.n('backup_cleaning_failed')) # Initialize backup info info = { 'description': description or '', 'created_at': timestamp, 'apps': {}, 'hooks': {}, } # Run system hooks if not ignore_hooks: # Check hooks availibility hooks_filtered = set() if hooks: for hook in hooks: try: hook_info('backup', hook) except: logger.error(m18n.n('backup_hook_unknown', hook=hook)) else: hooks_filtered.add(hook) if not hooks or hooks_filtered: logger.info(m18n.n('backup_running_hooks')) ret = hook_callback('backup', hooks_filtered, args=[tmp_dir]) if ret['succeed']: info['hooks'] = ret['succeed'] # Save relevant restoration hooks tmp_hooks_dir = tmp_dir + '/hooks/restore' filesystem.mkdir(tmp_hooks_dir, 0750, True, uid='admin') for h in ret['succeed'].keys(): try: i = hook_info('restore', h) except: logger.warning(m18n.n('restore_hook_unavailable', hook=h), exc_info=1) else: for f in i['hooks']: shutil.copy(f['path'], tmp_hooks_dir) # Backup apps if not ignore_apps: # Filter applications to backup apps_list = set(os.listdir('/etc/yunohost/apps')) apps_filtered = set() if apps: for a in apps: if a not in apps_list: logger.warning(m18n.n('unbackup_app', app=a)) else: apps_filtered.add(a) else: apps_filtered = apps_list # Run apps backup scripts tmp_script = '/tmp/backup_' + str(timestamp) for app_instance_name in apps_filtered: app_setting_path = '/etc/yunohost/apps/' + app_instance_name # Check if the app has a backup and restore script app_script = app_setting_path + '/scripts/backup' app_restore_script = app_setting_path + '/scripts/restore' if not os.path.isfile(app_script): logger.warning(m18n.n('unbackup_app', app=app_instance_name)) continue elif not os.path.isfile(app_restore_script): logger.warning(m18n.n('unrestore_app', app=app_instance_name)) tmp_app_dir = '{:s}/apps/{:s}'.format(tmp_dir, app_instance_name) tmp_app_bkp_dir = tmp_app_dir + '/backup' logger.info(m18n.n('backup_running_app_script', app=app_instance_name)) try: # Prepare backup directory for the app filesystem.mkdir(tmp_app_bkp_dir, 0750, True, uid='admin') shutil.copytree(app_setting_path, tmp_app_dir + '/settings') # Copy app backup script in a temporary folder and execute it subprocess.call(['install', '-Dm555', app_script, tmp_script]) # Prepare env. var. to pass to script env_dict = {} app_id, app_instance_nb = _parse_app_instance_name(app_instance_name) env_dict["YNH_APP_ID"] = app_id env_dict["YNH_APP_INSTANCE_NAME"] = app_instance_name env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) env_dict["YNH_APP_BACKUP_DIR"] = tmp_app_bkp_dir hook_exec(tmp_script, args=[tmp_app_bkp_dir, app_instance_name], raise_on_error=True, chdir=tmp_app_bkp_dir, env=env_dict) except: logger.exception(m18n.n('backup_app_failed', app=app_instance_name)) # Cleaning app backup directory shutil.rmtree(tmp_app_dir, ignore_errors=True) else: # Add app info i = app_info(app_instance_name) info['apps'][app_instance_name] = { 'version': i['version'], 'name': i['name'], 'description': i['description'], } finally: filesystem.rm(tmp_script, force=True) # Check if something has been saved if not info['hooks'] and not info['apps']: _clean_tmp_dir(1) raise MoulinetteError(errno.EINVAL, m18n.n('backup_nothings_done')) # Calculate total size size = subprocess.check_output( ['du','-sb', tmp_dir]).split()[0].decode('utf-8') info['size'] = int(size) # Create backup info file with open("%s/info.json" % tmp_dir, 'w') as f: f.write(json.dumps(info)) # Create the archive if not no_compress: logger.info(m18n.n('backup_creating_archive')) archive_file = "%s/%s.tar.gz" % (output_directory, name) try: tar = tarfile.open(archive_file, "w:gz") except: tar = None # Create the archives directory and retry if not os.path.isdir(archives_path): os.mkdir(archives_path, 0750) try: tar = tarfile.open(archive_file, "w:gz") except: logger.debug("unable to open '%s' for writing", archive_file, exc_info=1) tar = None else: logger.debug("unable to open '%s' for writing", archive_file, exc_info=1) if tar is None: _clean_tmp_dir(2) raise MoulinetteError(errno.EIO, m18n.n('backup_archive_open_failed')) tar.add(tmp_dir, arcname='') tar.close() # Move info file os.rename(tmp_dir + '/info.json', '{:s}/{:s}.info.json'.format(archives_path, name)) # Clean temporary directory if tmp_dir != output_directory: _clean_tmp_dir() logger.success(m18n.n('backup_complete')) # Return backup info info['name'] = name return { 'archive': info }
def _clean_tmp_dir(retcode=0): ret = hook_callback('post_backup_restore', args=[tmp_dir, retcode]) if not ret['failed']: filesystem.rm(tmp_dir, True, True) else: logger.warning(m18n.n('restore_cleaning_failed'))
def backup_restore(auth, name, hooks=[], ignore_hooks=False, apps=[], ignore_apps=False, force=False): """ Restore from a local backup archive Keyword argument: name -- Name of the local backup archive hooks -- List of restoration hooks names to execute ignore_hooks -- Do not execute backup hooks apps -- List of application names to restore ignore_apps -- Do not restore apps force -- Force restauration on an already installed system """ # Validate what to restore if ignore_hooks and ignore_apps: raise MoulinetteError(errno.EINVAL, m18n.n('restore_action_required')) # Retrieve and open the archive info = backup_info(name) archive_file = info['path'] try: tar = tarfile.open(archive_file, "r:gz") except: logger.debug("cannot open backup archive '%s'", archive_file, exc_info=1) raise MoulinetteError(errno.EIO, m18n.n('backup_archive_open_failed')) # Check temporary directory tmp_dir = "%s/tmp/%s" % (backup_path, name) if os.path.isdir(tmp_dir): logger.debug("temporary directory for restoration '%s' already exists", tmp_dir) os.system('rm -rf %s' % tmp_dir) # Check available disk space statvfs = os.statvfs(backup_path) free_space = statvfs.f_frsize * statvfs.f_bavail if free_space < info['size']: logger.debug("%dB left but %dB is needed", free_space, info['size']) raise MoulinetteError( errno.EIO, m18n.n('not_enough_disk_space', path=backup_path)) def _clean_tmp_dir(retcode=0): ret = hook_callback('post_backup_restore', args=[tmp_dir, retcode]) if not ret['failed']: filesystem.rm(tmp_dir, True, True) else: logger.warning(m18n.n('restore_cleaning_failed')) # Extract the tarball logger.info(m18n.n('backup_extracting_archive')) tar.extractall(tmp_dir) tar.close() # Retrieve backup info info_file = "%s/info.json" % tmp_dir try: with open(info_file, 'r') as f: info = json.load(f) except IOError: logger.debug("unable to load '%s'", info_file, exc_info=1) raise MoulinetteError(errno.EIO, m18n.n('backup_invalid_archive')) else: logger.debug("restoring from backup '%s' created on %s", name, time.ctime(info['created_at'])) # Initialize restauration summary result result = { 'apps': [], 'hooks': {}, } # Check if YunoHost is installed if os.path.isfile('/etc/yunohost/installed'): logger.warning(m18n.n('yunohost_already_installed')) if not force: try: # Ask confirmation for restoring i = msignals.prompt(m18n.n('restore_confirm_yunohost_installed', answers='y/N')) except NotImplemented: pass else: if i == 'y' or i == 'Y': force = True if not force: _clean_tmp_dir() raise MoulinetteError(errno.EEXIST, m18n.n('restore_failed')) else: # Retrieve the domain from the backup try: with open("%s/conf/ynh/current_host" % tmp_dir, 'r') as f: domain = f.readline().rstrip() except IOError: logger.debug("unable to retrieve current_host from the backup", exc_info=1) raise MoulinetteError(errno.EIO, m18n.n('backup_invalid_archive')) logger.debug("executing the post-install...") tools_postinstall(domain, 'yunohost', True) # Run system hooks if not ignore_hooks: # Filter hooks to execute hooks_list = set(info['hooks'].keys()) _is_hook_in_backup = lambda h: True if hooks: def _is_hook_in_backup(h): if h in hooks_list: return True logger.error(m18n.n('backup_archive_hook_not_exec', hook=h)) return False else: hooks = hooks_list # Check hooks availibility hooks_filtered = set() for h in hooks: if not _is_hook_in_backup(h): continue try: hook_info('restore', h) except: tmp_hooks = glob('{:s}/hooks/restore/*-{:s}'.format(tmp_dir, h)) if not tmp_hooks: logger.exception(m18n.n('restore_hook_unavailable', hook=h)) continue # Add restoration hook from the backup to the system # FIXME: Refactor hook_add and use it instead restore_hook_folder = custom_hook_folder + 'restore' filesystem.mkdir(restore_hook_folder, 755, True) for f in tmp_hooks: logger.debug("adding restoration hook '%s' to the system " "from the backup archive '%s'", f, archive_file) shutil.copy(f, restore_hook_folder) hooks_filtered.add(h) if hooks_filtered: logger.info(m18n.n('restore_running_hooks')) ret = hook_callback('restore', hooks_filtered, args=[tmp_dir]) result['hooks'] = ret['succeed'] # Add apps restore hook if not ignore_apps: # Filter applications to restore apps_list = set(info['apps'].keys()) apps_filtered = set() if apps: for a in apps: if a not in apps_list: logger.error(m18n.n('backup_archive_app_not_found', app=a)) else: apps_filtered.add(a) else: apps_filtered = apps_list for app_instance_name in apps_filtered: tmp_app_dir = '{:s}/apps/{:s}'.format(tmp_dir, app_instance_name) tmp_app_bkp_dir = tmp_app_dir + '/backup' # Check if the app is not already installed if _is_installed(app_instance_name): logger.error(m18n.n('restore_already_installed_app', app=app_instance_name)) continue # Check if the app has a restore script app_script = tmp_app_dir + '/settings/scripts/restore' if not os.path.isfile(app_script): logger.warning(m18n.n('unrestore_app', app=app_instance_name)) continue tmp_script = '/tmp/restore_' + app_instance_name app_setting_path = '/etc/yunohost/apps/' + app_instance_name logger.info(m18n.n('restore_running_app_script', app=app_instance_name)) try: # Copy app settings and set permissions shutil.copytree(tmp_app_dir + '/settings', app_setting_path) filesystem.chmod(app_setting_path, 0555, 0444, True) filesystem.chmod(app_setting_path + '/settings.yml', 0400) # Execute app restore script subprocess.call(['install', '-Dm555', app_script, tmp_script]) # Prepare env. var. to pass to script env_dict = {} app_id, app_instance_nb = _parse_app_instance_name(app_instance_name) env_dict["YNH_APP_ID"] = app_id env_dict["YNH_APP_INSTANCE_NAME"] = app_instance_name env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) env_dict["YNH_APP_BACKUP_DIR"] = tmp_app_bkp_dir hook_exec(tmp_script, args=[tmp_app_bkp_dir, app_instance_name], raise_on_error=True, chdir=tmp_app_bkp_dir, env=env_dict) except: logger.exception(m18n.n('restore_app_failed', app=app_instance_name)) # Cleaning app directory shutil.rmtree(app_setting_path, ignore_errors=True) else: result['apps'].append(app_instance_name) finally: filesystem.rm(tmp_script, force=True) # Check if something has been restored if not result['hooks'] and not result['apps']: _clean_tmp_dir(1) raise MoulinetteError(errno.EINVAL, m18n.n('restore_nothings_done')) if result['apps']: app_ssowatconf(auth) _clean_tmp_dir() logger.success(m18n.n('restore_complete')) return result
def domain_remove(operation_logger, domain, force=False): """ Delete domains Keyword argument: domain -- Domain to delete force -- Force the domain removal """ from yunohost.hook import hook_callback from yunohost.app import app_ssowatconf, app_info from yunohost.utils.ldap import _get_ldap_interface if not force and domain not in domain_list()['domains']: raise YunohostError('domain_name_unknown', domain=domain) # Check domain is not the main domain if domain == _get_maindomain(): other_domains = domain_list()["domains"] other_domains.remove(domain) if other_domains: raise YunohostError('domain_cannot_remove_main', domain=domain, other_domains="\n * " + ("\n * ".join(other_domains))) else: raise YunohostError('domain_cannot_remove_main_add_new_one', domain=domain) # Check if apps are installed on the domain apps_on_that_domain = [] for app in _installed_apps(): settings = _get_app_settings(app) label = app_info(app)["name"] if settings.get("domain") == domain: apps_on_that_domain.append(" - %s \"%s\" on https://%s%s" % (app, label, domain, settings["path"]) if "path" in settings else app) if apps_on_that_domain: raise YunohostError('domain_uninstall_app_first', apps="\n".join(apps_on_that_domain)) operation_logger.start() ldap = _get_ldap_interface() try: ldap.remove('virtualdomain=' + domain + ',ou=domains') except Exception as e: raise YunohostError('domain_deletion_failed', domain=domain, error=e) os.system('rm -rf /etc/yunohost/certs/%s' % domain) # Sometime we have weird issues with the regenconf where some files # appears as manually modified even though they weren't touched ... # There are a few ideas why this happens (like backup/restore nginx # conf ... which we shouldnt do ...). This in turns creates funky # situation where the regenconf may refuse to re-create the conf # (when re-creating a domain..) # # So here we force-clear the has out of the regenconf if it exists. # This is a pretty ad hoc solution and only applied to nginx # because it's one of the major service, but in the long term we # should identify the root of this bug... _force_clear_hashes(["/etc/nginx/conf.d/%s.conf" % domain]) # And in addition we even force-delete the file Otherwise, if the file was # manually modified, it may not get removed by the regenconf which leads to # catastrophic consequences of nginx breaking because it can't load the # cert file which disappeared etc.. if os.path.exists("/etc/nginx/conf.d/%s.conf" % domain): _process_regen_conf("/etc/nginx/conf.d/%s.conf" % domain, new_conf=None, save=True) regen_conf(names=['nginx', 'metronome', 'dnsmasq', 'postfix']) app_ssowatconf() hook_callback('post_domain_remove', args=[domain]) logger.success(m18n.n('domain_deleted'))
def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run=False, list_pending=False): """ Regenerate the configuration file(s) Keyword argument: names -- Categories to regenerate configuration of with_diff -- Show differences in case of configuration changes force -- Override all manual modifications in configuration files dry_run -- Show what would have been regenerated list_pending -- List pending configuration files and exit """ result = {} # Return the list of pending conf if list_pending: pending_conf = _get_pending_conf(names) if not with_diff: return pending_conf for category, conf_files in pending_conf.items(): for system_path, pending_path in conf_files.items(): pending_conf[category][system_path] = { 'pending_conf': pending_path, 'diff': _get_files_diff(system_path, pending_path, True), } return pending_conf if not dry_run: operation_logger.related_to = [('configuration', x) for x in names] if not names: operation_logger.name_parameter_override = 'all' elif len(names) != 1: operation_logger.name_parameter_override = str( len(operation_logger.related_to)) + '_categories' operation_logger.start() # Clean pending conf directory if os.path.isdir(PENDING_CONF_DIR): if not names: shutil.rmtree(PENDING_CONF_DIR, ignore_errors=True) else: for name in names: shutil.rmtree(os.path.join(PENDING_CONF_DIR, name), ignore_errors=True) else: filesystem.mkdir(PENDING_CONF_DIR, 0o755, True) # Format common hooks arguments common_args = [1 if force else 0, 1 if dry_run else 0] # Execute hooks for pre-regen pre_args = [ 'pre', ] + common_args def _pre_call(name, priority, path, args): # create the pending conf directory for the category category_pending_path = os.path.join(PENDING_CONF_DIR, name) filesystem.mkdir(category_pending_path, 0o755, True, uid='root') # return the arguments to pass to the script return pre_args + [ category_pending_path, ] ssh_explicitly_specified = isinstance(names, list) and "ssh" in names # By default, we regen everything if not names: names = hook_list('conf_regen', list_by='name', show_info=False)['hooks'] # Dirty hack for legacy code : avoid attempting to regen the conf for # glances because it got removed ... This is only needed *once* # during the upgrade from 3.7 to 3.8 because Yunohost will attempt to # regen glance's conf *before* it gets automatically removed from # services.yml (which will happens only during the regen-conf of # 'yunohost', so at the very end of the regen-conf cycle) Anyway, # this can be safely removed once we're in >= 4.0 if "glances" in names: names.remove("glances") # [Optimization] We compute and feed the domain list to the conf regen # hooks to avoid having to call "yunohost domain list" so many times which # ends up in wasted time (about 3~5 seconds per call on a RPi2) from yunohost.domain import domain_list env = {} # Well we can only do domain_list() if postinstall is done ... # ... but hooks that effectively need the domain list are only # called only after the 'installed' flag is set so that's all good, # though kinda tight-coupled to the postinstall logic :s if os.path.exists("/etc/yunohost/installed"): env["YNH_DOMAINS"] = " ".join(domain_list()["domains"]) pre_result = hook_callback('conf_regen', names, pre_callback=_pre_call, env=env) # Keep only the hook names with at least one success names = [ hook for hook, infos in pre_result.items() if any(result["state"] == "succeed" for result in infos.values()) ] # FIXME : what do in case of partial success/failure ... if not names: ret_failed = [ hook for hook, infos in pre_result.items() if any(result["state"] == "failed" for result in infos.values()) ] raise YunohostError('regenconf_failed', categories=', '.join(ret_failed)) # Set the processing method _regen = _process_regen_conf if not dry_run else lambda *a, **k: True operation_logger.related_to = [] # Iterate over categories and process pending conf for category, conf_files in _get_pending_conf(names).items(): if not dry_run: operation_logger.related_to.append(('configuration', category)) if dry_run: logger.debug( m18n.n('regenconf_pending_applying', category=category)) else: logger.debug( m18n.n('regenconf_dry_pending_applying', category=category)) conf_hashes = _get_conf_hashes(category) succeed_regen = {} failed_regen = {} # Here we are doing some weird legacy shit # The thing is, on some very old or specific setup, the sshd_config file # was absolutely not managed by the regenconf ... # But we now want to make sure that this file is managed. # However, we don't want to overwrite a specific custom sshd_config # which may make the admin unhappy ... # So : if the hash for this file does not exists, we set the hash as the # hash of the pending configuration ... # That way, the file will later appear as manually modified. sshd_config = "/etc/ssh/sshd_config" if category == "ssh" and sshd_config not in conf_hashes and sshd_config in conf_files: conf_hashes[sshd_config] = _calculate_hash(conf_files[sshd_config]) _update_conf_hashes(category, conf_hashes) # Consider the following scenario: # - you add a domain foo.bar # - the regen-conf creates file /etc/dnsmasq.d/foo.bar # - the admin manually *deletes* /etc/dnsmasq.d/foo.bar # - the file is now understood as manually deleted because there's the old file hash in regenconf.yml # # ... so far so good, that's the expected behavior. # # But then: # - the admin remove domain foo.bar entirely # - but now the hash for /etc/dnsmasq.d/foo.bar is *still* in # regenconf.yml and and the file is still flagged as manually # modified/deleted... And the user cannot even do anything about it # except removing the hash in regenconf.yml... # # Expected behavior: it should forget about that # hash because dnsmasq's regen-conf doesn't say anything about what's # the state of that file so it should assume that it should be deleted. # # - then the admin tries to *re-add* foo.bar ! # - ... but because the file is still flagged as manually modified # the regen-conf refuses to re-create the file. # # Excepted behavior : the regen-conf should have forgot about the hash # from earlier and this wouldnt happen. # ------ # conf_files contain files explicitly set by the current regen conf run # conf_hashes contain all files known from the past runs # we compare these to get the list of stale hashes and flag the file as # "should be removed" stale_files = set(conf_hashes.keys()) - set(conf_files.keys()) stale_files_with_non_empty_hash = [ f for f in stale_files if conf_hashes.get(f) ] for f in stale_files_with_non_empty_hash: conf_files[f] = None # </> End discussion about stale file hashes force_update_hashes_for_this_category = False for system_path, pending_path in conf_files.items(): logger.debug("processing pending conf '%s' to system conf '%s'", pending_path, system_path) conf_status = None regenerated = False # Get the diff between files conf_diff = _get_files_diff(system_path, pending_path, True) if with_diff else None # Check if the conf must be removed to_remove = True if pending_path and os.path.getsize( pending_path) == 0 else False # Retrieve and calculate hashes system_hash = _calculate_hash(system_path) saved_hash = conf_hashes.get(system_path, None) new_hash = None if to_remove else _calculate_hash(pending_path) # -> configuration was previously managed by yunohost but should now # be removed / unmanaged if system_path in stale_files_with_non_empty_hash: # File is already deleted, so let's just silently forget about this hash entirely if not system_hash: logger.debug("> forgetting about stale file/hash") conf_hashes[system_path] = None conf_status = 'forget-about-it' regenerated = True # Otherwise there's still a file on the system but it's not managed by # Yunohost anymore... But if user requested --force we shall # force-erase it elif force: logger.debug("> force-remove stale file") regenerated = _regen(system_path) conf_status = 'force-removed' # Otherwise, flag the file as manually modified else: logger.warning( m18n.n('regenconf_file_manually_modified', conf=system_path)) conf_status = 'modified' # -> system conf does not exists elif not system_hash: if to_remove: logger.debug("> system conf is already removed") os.remove(pending_path) conf_hashes[system_path] = None conf_status = 'forget-about-it' force_update_hashes_for_this_category = True continue elif not saved_hash or force: if force: logger.debug("> system conf has been manually removed") conf_status = 'force-created' else: logger.debug("> system conf does not exist yet") conf_status = 'created' regenerated = _regen(system_path, pending_path, save=False) else: logger.info( m18n.n('regenconf_file_manually_removed', conf=system_path)) conf_status = 'removed' # -> system conf is not managed yet elif not saved_hash: logger.debug("> system conf is not managed yet") if system_hash == new_hash: logger.debug("> no changes to system conf has been made") conf_status = 'managed' regenerated = True elif not to_remove: # If the conf exist but is not managed yet, and is not to be removed, # we assume that it is safe to regen it, since the file is backuped # anyway (by default in _regen), as long as we warn the user # appropriately. logger.info( m18n.n('regenconf_now_managed_by_yunohost', conf=system_path, category=category)) regenerated = _regen(system_path, pending_path) conf_status = 'new' elif force: regenerated = _regen(system_path) conf_status = 'force-removed' else: logger.info( m18n.n('regenconf_file_kept_back', conf=system_path, category=category)) conf_status = 'unmanaged' # -> system conf has not been manually modified elif system_hash == saved_hash: if to_remove: regenerated = _regen(system_path) conf_status = 'removed' elif system_hash != new_hash: regenerated = _regen(system_path, pending_path) conf_status = 'updated' else: logger.debug("> system conf is already up-to-date") os.remove(pending_path) continue else: logger.debug("> system conf has been manually modified") if system_hash == new_hash: logger.debug("> new conf is as current system conf") conf_status = 'managed' regenerated = True elif force and system_path == sshd_config and not ssh_explicitly_specified: logger.warning( m18n.n('regenconf_need_to_explicitly_specify_ssh')) conf_status = 'modified' elif force: regenerated = _regen(system_path, pending_path) conf_status = 'force-updated' else: logger.warning( m18n.n('regenconf_file_manually_modified', conf=system_path)) conf_status = 'modified' # Store the result conf_result = {'status': conf_status} if conf_diff is not None: conf_result['diff'] = conf_diff if regenerated: succeed_regen[system_path] = conf_result conf_hashes[system_path] = new_hash if pending_path and os.path.isfile(pending_path): os.remove(pending_path) else: failed_regen[system_path] = conf_result # Check for category conf changes if not succeed_regen and not failed_regen: logger.debug(m18n.n('regenconf_up_to_date', category=category)) continue elif not failed_regen: if not dry_run: logger.success(m18n.n('regenconf_updated', category=category)) else: logger.success( m18n.n('regenconf_would_be_updated', category=category)) if (succeed_regen or force_update_hashes_for_this_category) and not dry_run: _update_conf_hashes(category, conf_hashes) # Append the category results result[category] = {'applied': succeed_regen, 'pending': failed_regen} # Return in case of dry run if dry_run: return result # Execute hooks for post-regen post_args = [ 'post', ] + common_args def _pre_call(name, priority, path, args): # append coma-separated applied changes for the category if name in result and result[name]['applied']: regen_conf_files = ','.join(result[name]['applied'].keys()) else: regen_conf_files = '' return post_args + [ regen_conf_files, ] hook_callback('conf_regen', names, pre_callback=_pre_call, env=env) operation_logger.success() return result
def user_create(operation_logger, username, firstname, lastname, domain, password, mailbox_quota="0", mail=None): from yunohost.domain import domain_list, _get_maindomain from yunohost.hook import hook_callback from yunohost.utils.password import assert_password_is_strong_enough from yunohost.utils.ldap import _get_ldap_interface # Ensure sufficiently complex password assert_password_is_strong_enough("user", password) if mail is not None: logger.warning( "Packagers ! Using --mail in 'yunohost user create' is deprecated ... please use --domain instead." ) domain = mail.split("@")[-1] # Validate domain used for email address/xmpp account if domain is None: if msettings.get('interface') == 'api': raise YunohostError('Invalide usage, specify domain argument') else: # On affiche les differents domaines possibles msignals.display(m18n.n('domains_available')) for domain in domain_list()['domains']: msignals.display("- {}".format(domain)) maindomain = _get_maindomain() domain = msignals.prompt( m18n.n('ask_user_domain') + ' (default: %s)' % maindomain) if not domain: domain = maindomain # Check that the domain exists if domain not in domain_list()['domains']: raise YunohostError('domain_name_unknown', domain=domain) mail = username + '@' + domain ldap = _get_ldap_interface() if username in user_list()["users"]: raise YunohostError("user_already_exists", user=username) # Validate uniqueness of username and mail in LDAP try: ldap.validate_uniqueness({ 'uid': username, 'mail': mail, 'cn': username }) except Exception as e: raise YunohostError('user_creation_failed', user=username, error=e) # Validate uniqueness of username in system users all_existing_usernames = {x.pw_name for x in pwd.getpwall()} if username in all_existing_usernames: raise YunohostError('system_username_exists') main_domain = _get_maindomain() aliases = [ 'root@' + main_domain, 'admin@' + main_domain, 'webmaster@' + main_domain, 'postmaster@' + main_domain, 'abuse@' + main_domain, ] if mail in aliases: raise YunohostError('mail_unavailable') operation_logger.start() # Get random UID/GID all_uid = {str(x.pw_uid) for x in pwd.getpwall()} all_gid = {str(x.gr_gid) for x in grp.getgrall()} uid_guid_found = False while not uid_guid_found: # LXC uid number is limited to 65536 by default uid = str(random.randint(1001, 65000)) uid_guid_found = uid not in all_uid and uid not in all_gid # Adapt values for LDAP fullname = '%s %s' % (firstname, lastname) attr_dict = { 'objectClass': ['mailAccount', 'inetOrgPerson', 'posixAccount', 'userPermissionYnh'], 'givenName': [firstname], 'sn': [lastname], 'displayName': [fullname], 'cn': [fullname], 'uid': [username], 'mail': mail, # NOTE: this one seems to be already a list 'maildrop': [username], 'mailuserquota': [mailbox_quota], 'userPassword': [_hash_user_password(password)], 'gidNumber': [uid], 'uidNumber': [uid], 'homeDirectory': ['/home/' + username], 'loginShell': ['/bin/false'] } # If it is the first user, add some aliases if not ldap.search(base='ou=users,dc=yunohost,dc=org', filter='uid=*'): attr_dict['mail'] = [attr_dict['mail']] + aliases try: ldap.add('uid=%s,ou=users' % username, attr_dict) except Exception as e: raise YunohostError('user_creation_failed', user=username, error=e) # Invalidate passwd and group to take user and group creation into account subprocess.call(['nscd', '-i', 'passwd']) subprocess.call(['nscd', '-i', 'group']) try: # Attempt to create user home folder subprocess.check_call(["mkhomedir_helper", username]) except subprocess.CalledProcessError: if not os.path.isdir('/home/{0}'.format(username)): logger.warning(m18n.n('user_home_creation_failed'), exc_info=1) # Create group for user and add to group 'all_users' user_group_create(groupname=username, gid=uid, primary_group=True, sync_perm=False) user_group_update(groupname='all_users', add=username, force=True, sync_perm=True) # Trigger post_user_create hooks env_dict = { "YNH_USER_USERNAME": username, "YNH_USER_MAIL": mail, "YNH_USER_PASSWORD": password, "YNH_USER_FIRSTNAME": firstname, "YNH_USER_LASTNAME": lastname } hook_callback('post_user_create', args=[username, mail], env=env_dict) # TODO: Send a welcome mail to user logger.success(m18n.n('user_created')) return {'fullname': fullname, 'username': username, 'mail': mail}
def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, list_pending=False): """ Regenerate the configuration file(s) for a service Keyword argument: names -- Services name to regenerate configuration of with_diff -- Show differences in case of configuration changes force -- Override all manual modifications in configuration files dry_run -- Show what would have been regenerated list_pending -- List pending configuration files and exit """ result = {} # Return the list of pending conf if list_pending: pending_conf = _get_pending_conf(names) if with_diff: for service, conf_files in pending_conf.items(): for system_path, pending_path in conf_files.items(): pending_conf[service][system_path] = { 'pending_conf': pending_path, 'diff': _get_files_diff( system_path, pending_path, True), } return pending_conf # Clean pending conf directory shutil.rmtree(pending_conf_dir, ignore_errors=True) filesystem.mkdir(pending_conf_dir, 0755, True) # Format common hooks arguments common_args = [1 if force else 0, 1 if dry_run else 0] # Execute hooks for pre-regen pre_args = ['pre',] + common_args def _pre_call(name, priority, path, args): # create the pending conf directory for the service service_pending_path = os.path.join(pending_conf_dir, name) filesystem.mkdir(service_pending_path, 0755, True, uid='admin') # return the arguments to pass to the script return pre_args + [service_pending_path,] pre_result = hook_callback('conf_regen', names, pre_callback=_pre_call) # Update the services name names = pre_result['succeed'].keys() if not names: raise MoulinetteError(errno.EIO, m18n.n('service_regenconf_failed', services=', '.join(pre_result['failed']))) # Set the processing method _regen = _process_regen_conf if not dry_run else lambda *a, **k: True # Iterate over services and process pending conf for service, conf_files in _get_pending_conf(names).items(): logger.info(m18n.n( 'service_regenconf_pending_applying' if not dry_run else \ 'service_regenconf_dry_pending_applying', service=service)) conf_hashes = _get_conf_hashes(service) succeed_regen = {} failed_regen = {} for system_path, pending_path in conf_files.items(): logger.debug("processing pending conf '%s' to system conf '%s'", pending_path, system_path) conf_status = None regenerated = False # Get the diff between files conf_diff = _get_files_diff( system_path, pending_path, True) if with_diff else None # Check if the conf must be removed to_remove = True if os.path.getsize(pending_path) == 0 else False # Retrieve and calculate hashes current_hash = conf_hashes.get(system_path, None) system_hash = _calculate_hash(system_path) new_hash = None if to_remove else _calculate_hash(pending_path) # -> system conf does not exists if not system_hash: if to_remove: logger.debug("> system conf is already removed") os.remove(pending_path) continue if not current_hash or force: if force: logger.debug("> system conf has been manually removed") conf_status = 'force-created' else: logger.debug("> system conf does not exist yet") conf_status = 'created' regenerated = _regen( system_path, pending_path, save=False) else: logger.warning(m18n.n( 'service_conf_file_manually_removed', conf=system_path)) conf_status = 'removed' # -> system conf is not managed yet elif not current_hash: logger.debug("> system conf is not managed yet") if system_hash == new_hash: logger.debug("> no changes to system conf has been made") conf_status = 'managed' regenerated = True elif force and to_remove: regenerated = _regen(system_path) conf_status = 'force-removed' elif force: regenerated = _regen(system_path, pending_path) conf_status = 'force-updated' else: logger.warning(m18n.n('service_conf_file_not_managed', conf=system_path)) conf_status = 'unmanaged' # -> system conf has not been manually modified elif system_hash == current_hash: if to_remove: regenerated = _regen(system_path) conf_status = 'removed' elif system_hash != new_hash: regenerated = _regen(system_path, pending_path) conf_status = 'updated' else: logger.debug("> system conf is already up-to-date") os.remove(pending_path) continue else: logger.debug("> system conf has been manually modified") if force: regenerated = _regen(system_path, pending_path) conf_status = 'force-updated' else: logger.warning(m18n.n( 'service_conf_file_manually_modified', conf=system_path)) conf_status = 'modified' # Store the result conf_result = {'status': conf_status} if conf_diff is not None: conf_result['diff'] = conf_diff if regenerated: succeed_regen[system_path] = conf_result conf_hashes[system_path] = new_hash if os.path.isfile(pending_path): os.remove(pending_path) else: failed_regen[system_path] = conf_result # Check for service conf changes if not succeed_regen and not failed_regen: logger.info(m18n.n('service_conf_up_to_date', service=service)) continue elif not failed_regen: logger.success(m18n.n( 'service_conf_updated' if not dry_run else \ 'service_conf_would_be_updated', service=service)) if succeed_regen and not dry_run: _update_conf_hashes(service, conf_hashes) # Append the service results result[service] = { 'applied': succeed_regen, 'pending': failed_regen } # Return in case of dry run if dry_run: return result # Execute hooks for post-regen post_args = ['post',] + common_args def _pre_call(name, priority, path, args): # append coma-separated applied changes for the service if name in result and result[name]['applied']: regen_conf_files = ','.join(result[name]['applied'].keys()) else: regen_conf_files = '' return post_args + [regen_conf_files,] hook_callback('conf_regen', names, pre_callback=_pre_call) return result
def user_create(auth, username, firstname, lastname, mail, password): """ Create user Keyword argument: firstname lastname username -- Must be unique mail -- Main mail address must be unique password """ from yunohost.domain import domain_list from yunohost.hook import hook_callback # Validate password length if len(password) < 4: raise MoulinetteError(errno.EINVAL, m18n.n('password_too_short')) auth.validate_uniqueness({ 'uid' : username, 'mail' : mail }) if mail[mail.find('@')+1:] not in domain_list(auth)['domains']: raise MoulinetteError(errno.EINVAL, m18n.n('mail_domain_unknown') % mail[mail.find('@')+1:]) # Get random UID/GID uid_check = gid_check = 0 while uid_check == 0 and gid_check == 0: uid = str(random.randint(200, 99999)) uid_check = os.system("getent passwd %s" % uid) gid_check = os.system("getent group %s" % uid) # Adapt values for LDAP fullname = '%s %s' % (firstname, lastname) rdn = 'uid=%s,ou=users' % username char_set = string.ascii_uppercase + string.digits salt = ''.join(random.sample(char_set,8)) salt = '$1$' + salt + '$' pwd = '{CRYPT}' + crypt.crypt(str(password), salt) attr_dict = { 'objectClass' : ['mailAccount', 'inetOrgPerson', 'posixAccount'], 'givenName' : firstname, 'sn' : lastname, 'displayName' : fullname, 'cn' : fullname, 'uid' : username, 'mail' : mail, 'maildrop' : username, 'userPassword' : pwd, 'gidNumber' : uid, 'uidNumber' : uid, 'homeDirectory' : '/home/' + username, 'loginShell' : '/bin/false' } # If it is the first user, add some aliases if not auth.search(base='ou=users,dc=yunohost,dc=org', filter='uid=*'): with open('/etc/yunohost/current_host') as f: main_domain = f.readline().rstrip() aliases = [ 'root@'+ main_domain, 'admin@'+ main_domain, 'webmaster@'+ main_domain, 'postmaster@'+ main_domain, ] attr_dict['mail'] = [ attr_dict['mail'] ] + aliases # If exists, remove the redirection from the SSO try: with open('/etc/ssowat/conf.json.persistent') as json_conf: ssowat_conf = json.loads(str(json_conf.read())) if 'redirected_urls' in ssowat_conf and '/' in ssowat_conf['redirected_urls']: del ssowat_conf['redirected_urls']['/'] with open('/etc/ssowat/conf.json.persistent', 'w+') as f: json.dump(ssowat_conf, f, sort_keys=True, indent=4) except IOError: pass if auth.add(rdn, attr_dict): # Update SFTP user group memberlist = auth.search(filter='cn=sftpusers', attrs=['memberUid'])[0]['memberUid'] memberlist.append(username) if auth.update('cn=sftpusers,ou=groups', { 'memberUid': memberlist }): os.system("su - %s -c ''" % username) os.system('yunohost app ssowatconf > /dev/null 2>&1') #TODO: Send a welcome mail to user msignals.display(m18n.n('user_created'), 'success') hook_callback('post_user_create', [username, mail, password, firstname, lastname]) return { 'fullname' : fullname, 'username' : username, 'mail' : mail } raise MoulinetteError(169, m18n.n('user_creation_failed'))
def domain_add(operation_logger, domain, dyndns=False): """ Create a custom domain Keyword argument: domain -- Domain name to add dyndns -- Subscribe to DynDNS """ from yunohost.hook import hook_callback from yunohost.app import app_ssowatconf from yunohost.utils.ldap import _get_ldap_interface if domain.startswith("xmpp-upload."): raise YunohostError("domain_cannot_add_xmpp_upload") ldap = _get_ldap_interface() try: ldap.validate_uniqueness({'virtualdomain': domain}) except MoulinetteError: raise YunohostError('domain_exists') operation_logger.start() # Lower domain to avoid some edge cases issues # See: https://forum.yunohost.org/t/invalid-domain-causes-diagnosis-web-to-fail-fr-on-demand/11765 domain = domain.lower() # DynDNS domain if dyndns: # Do not allow to subscribe to multiple dyndns domains... if os.path.exists('/etc/cron.d/yunohost-dyndns'): raise YunohostError('domain_dyndns_already_subscribed') from yunohost.dyndns import dyndns_subscribe, _dyndns_provides # Check that this domain can effectively be provided by # dyndns.yunohost.org. (i.e. is it a nohost.me / noho.st) if not _dyndns_provides("dyndns.yunohost.org", domain): raise YunohostError('domain_dyndns_root_unknown') # Actually subscribe dyndns_subscribe(domain=domain) try: import yunohost.certificate yunohost.certificate._certificate_install_selfsigned([domain], False) attr_dict = { 'objectClass': ['mailDomain', 'top'], 'virtualdomain': domain, } try: ldap.add('virtualdomain=%s,ou=domains' % domain, attr_dict) except Exception as e: raise YunohostError('domain_creation_failed', domain=domain, error=e) # Don't regen these conf if we're still in postinstall if os.path.exists('/etc/yunohost/installed'): # Sometime we have weird issues with the regenconf where some files # appears as manually modified even though they weren't touched ... # There are a few ideas why this happens (like backup/restore nginx # conf ... which we shouldnt do ...). This in turns creates funky # situation where the regenconf may refuse to re-create the conf # (when re-creating a domain..) # So here we force-clear the has out of the regenconf if it exists. # This is a pretty ad hoc solution and only applied to nginx # because it's one of the major service, but in the long term we # should identify the root of this bug... _force_clear_hashes(["/etc/nginx/conf.d/%s.conf" % domain]) regen_conf( names=['nginx', 'metronome', 'dnsmasq', 'postfix', 'rspamd']) app_ssowatconf() except Exception: # Force domain removal silently try: domain_remove(domain, True) except Exception: pass raise hook_callback('post_domain_add', args=[domain]) logger.success(m18n.n('domain_created'))
def _build_dns_conf(domain, ttl=3600, include_empty_AAAA_if_no_ipv6=False): """ Internal function that will returns a data structure containing the needed information to generate/adapt the dns configuration The returned datastructure will have the following form: { "basic": [ # if ipv4 available {"type": "A", "name": "@", "value": "123.123.123.123", "ttl": 3600}, # if ipv6 available {"type": "AAAA", "name": "@", "value": "valid-ipv6", "ttl": 3600}, ], "xmpp": [ {"type": "SRV", "name": "_xmpp-client._tcp", "value": "0 5 5222 domain.tld.", "ttl": 3600}, {"type": "SRV", "name": "_xmpp-server._tcp", "value": "0 5 5269 domain.tld.", "ttl": 3600}, {"type": "CNAME", "name": "muc", "value": "@", "ttl": 3600}, {"type": "CNAME", "name": "pubsub", "value": "@", "ttl": 3600}, {"type": "CNAME", "name": "vjud", "value": "@", "ttl": 3600} {"type": "CNAME", "name": "xmpp-upload", "value": "@", "ttl": 3600} ], "mail": [ {"type": "MX", "name": "@", "value": "10 domain.tld.", "ttl": 3600}, {"type": "TXT", "name": "@", "value": "\"v=spf1 a mx ip4:123.123.123.123 ipv6:valid-ipv6 -all\"", "ttl": 3600 }, {"type": "TXT", "name": "mail._domainkey", "value": "\"v=DKIM1; k=rsa; p=some-super-long-key\"", "ttl": 3600}, {"type": "TXT", "name": "_dmarc", "value": "\"v=DMARC1; p=none\"", "ttl": 3600} ], "extra": [ # if ipv4 available {"type": "A", "name": "*", "value": "123.123.123.123", "ttl": 3600}, # if ipv6 available {"type": "AAAA", "name": "*", "value": "valid-ipv6", "ttl": 3600}, {"type": "CAA", "name": "@", "value": "128 issue \"letsencrypt.org\"", "ttl": 3600}, ], "example_of_a_custom_rule": [ {"type": "SRV", "name": "_matrix", "value": "domain.tld.", "ttl": 3600} ], } """ ipv4 = get_public_ip() ipv6 = get_public_ip(6) ########################### # Basic ipv4/ipv6 records # ########################### basic = [] if ipv4: basic.append(["@", ttl, "A", ipv4]) if ipv6: basic.append(["@", ttl, "AAAA", ipv6]) elif include_empty_AAAA_if_no_ipv6: basic.append(["@", ttl, "AAAA", None]) ######### # Email # ######### mail = [ ["@", ttl, "MX", "10 %s." % domain], ["@", ttl, "TXT", '"v=spf1 a mx -all"'], ] # DKIM/DMARC record dkim_host, dkim_publickey = _get_DKIM(domain) if dkim_host: mail += [ [dkim_host, ttl, "TXT", dkim_publickey], ["_dmarc", ttl, "TXT", '"v=DMARC1; p=none"'], ] ######## # XMPP # ######## xmpp = [ ["_xmpp-client._tcp", ttl, "SRV", "0 5 5222 %s." % domain], ["_xmpp-server._tcp", ttl, "SRV", "0 5 5269 %s." % domain], ["muc", ttl, "CNAME", "@"], ["pubsub", ttl, "CNAME", "@"], ["vjud", ttl, "CNAME", "@"], ["xmpp-upload", ttl, "CNAME", "@"], ] ######### # Extra # ######### extra = [] if ipv4: extra.append(["*", ttl, "A", ipv4]) if ipv6: extra.append(["*", ttl, "AAAA", ipv6]) elif include_empty_AAAA_if_no_ipv6: extra.append(["*", ttl, "AAAA", None]) extra.append(["@", ttl, "CAA", '128 issue "letsencrypt.org"']) #################### # Standard records # #################### records = { "basic": [{ "name": name, "ttl": ttl_, "type": type_, "value": value } for name, ttl_, type_, value in basic], "xmpp": [{ "name": name, "ttl": ttl_, "type": type_, "value": value } for name, ttl_, type_, value in xmpp], "mail": [{ "name": name, "ttl": ttl_, "type": type_, "value": value } for name, ttl_, type_, value in mail], "extra": [{ "name": name, "ttl": ttl_, "type": type_, "value": value } for name, ttl_, type_, value in extra], } ################## # Custom records # ################## # Defined by custom hooks ships in apps for example ... hook_results = hook_callback('custom_dns_rules', args=[domain]) for hook_name, results in hook_results.items(): # # There can be multiple results per hook name, so results look like # {'/some/path/to/hook1': # { 'state': 'succeed', # 'stdreturn': [{'type': 'SRV', # 'name': 'stuff.foo.bar.', # 'value': 'yoloswag', # 'ttl': 3600}] # }, # '/some/path/to/hook2': # { ... }, # [...] # # Loop over the sub-results custom_records = [ v['stdreturn'] for v in results.values() if v and v['stdreturn'] ] records[hook_name] = [] for record_list in custom_records: # Check that record_list is indeed a list of dict # with the required keys if not isinstance(record_list, list) \ or any(not isinstance(record, dict) for record in record_list) \ or any(key not in record for record in record_list for key in ["name", "ttl", "type", "value"]): # Display an error, mainly for app packagers trying to implement a hook logger.warning( "Ignored custom record from hook '%s' because the data is not a *list* of dict with keys name, ttl, type and value. Raw data : %s" % (hook_name, record_list)) continue records[hook_name].extend(record_list) return records
def _update_ldap_group_permission(permission, allowed, label=None, show_tile=None, protected=None, sync_perm=True): """ Internal function that will rewrite user permission permission -- Name of the permission (e.g. mail or nextcloud or wordpress.editors) allowed -- (optional) A list of group/user to allow for the permission label -- (optional) Define a name for the permission. This label will be shown on the SSO and in the admin show_tile -- (optional) Define if a tile will be shown in the SSO protected -- (optional) Define if the permission can be added/removed to the visitor group Assumptions made, that should be checked before calling this function: - the permission does currently exists ... - the 'allowed' list argument is *different* from the current permission state ... otherwise ldap will miserably fail in such case... - the 'allowed' list contains *existing* groups. """ from yunohost.hook import hook_callback from yunohost.utils.ldap import _get_ldap_interface ldap = _get_ldap_interface() existing_permission = user_permission_info(permission) update = {} if allowed is not None: allowed = [allowed] if not isinstance(allowed, list) else allowed # Guarantee uniqueness of values in allowed, which would otherwise make ldap.update angry. allowed = set(allowed) update["groupPermission"] = [ "cn=" + g + ",ou=groups,dc=yunohost,dc=org" for g in allowed ] if label is not None: update["label"] = [str(label)] if protected is not None: update["isProtected"] = [str(protected).upper()] if show_tile is not None: if show_tile is True: if not existing_permission["url"]: logger.warning( m18n.n( "show_tile_cant_be_enabled_for_url_not_defined", permission=permission, )) show_tile = False elif existing_permission["url"].startswith("re:"): logger.warning( m18n.n("show_tile_cant_be_enabled_for_regex", permission=permission)) show_tile = False update["showTile"] = [str(show_tile).upper()] try: ldap.update("cn=%s,ou=permission" % permission, update) except Exception as e: raise YunohostError("permission_update_failed", permission=permission, error=e) # Trigger permission sync if asked if sync_perm: permission_sync_to_user() new_permission = user_permission_info(permission) # Trigger app callbacks app = permission.split(".")[0] sub_permission = permission.split(".")[1] old_corresponding_users = set(existing_permission["corresponding_users"]) new_corresponding_users = set(new_permission["corresponding_users"]) old_allowed_users = set(existing_permission["allowed"]) new_allowed_users = set(new_permission["allowed"]) effectively_added_users = new_corresponding_users - old_corresponding_users effectively_removed_users = old_corresponding_users - new_corresponding_users effectively_added_group = (new_allowed_users - old_allowed_users - effectively_added_users) effectively_removed_group = (old_allowed_users - new_allowed_users - effectively_removed_users) if effectively_added_users or effectively_added_group: hook_callback( "post_app_addaccess", args=[ app, ",".join(effectively_added_users), sub_permission, ",".join(effectively_added_group), ], ) if effectively_removed_users or effectively_removed_group: hook_callback( "post_app_removeaccess", args=[ app, ",".join(effectively_removed_users), sub_permission, ",".join(effectively_removed_group), ], ) return new_permission
def domain_add(auth, domain, dyndns=False): """ Create a custom domain Keyword argument: domain -- Domain name to add dyndns -- Subscribe to DynDNS """ from yunohost.hook import hook_callback attr_dict = { 'objectClass' : ['mailDomain', 'top'] } now = datetime.datetime.now() timestamp = str(now.year) + str(now.month) + str(now.day) if domain in domain_list(auth)['domains']: raise MoulinetteError(errno.EEXIST, m18n.n('domain_exists')) # DynDNS domain if dyndns: if len(domain.split('.')) < 3: raise MoulinetteError(errno.EINVAL, m18n.n('domain_dyndns_invalid')) from yunohost.dyndns import dyndns_subscribe try: r = requests.get('https://dyndns.yunohost.org/domains') except requests.ConnectionError: pass else: dyndomains = json.loads(r.text) dyndomain = '.'.join(domain.split('.')[1:]) if dyndomain in dyndomains: if os.path.exists('/etc/cron.d/yunohost-dyndns'): raise MoulinetteError(errno.EPERM, m18n.n('domain_dyndns_already_subscribed')) dyndns_subscribe(domain=domain) else: raise MoulinetteError(errno.EINVAL, m18n.n('domain_dyndns_root_unknown')) try: # Commands ssl_dir = '/usr/share/yunohost/yunohost-config/ssl/yunoCA' ssl_domain_path = '/etc/yunohost/certs/%s' % domain with open('%s/serial' % ssl_dir, 'r') as f: serial = f.readline().rstrip() try: os.listdir(ssl_domain_path) except OSError: os.makedirs(ssl_domain_path) command_list = [ 'cp %s/openssl.cnf %s' % (ssl_dir, ssl_domain_path), 'sed -i "s/yunohost.org/%s/g" %s/openssl.cnf' % (domain, ssl_domain_path), 'openssl req -new -config %s/openssl.cnf -days 3650 -out %s/certs/yunohost_csr.pem -keyout %s/certs/yunohost_key.pem -nodes -batch' % (ssl_domain_path, ssl_dir, ssl_dir), 'openssl ca -config %s/openssl.cnf -days 3650 -in %s/certs/yunohost_csr.pem -out %s/certs/yunohost_crt.pem -batch' % (ssl_domain_path, ssl_dir, ssl_dir), 'ln -s /etc/ssl/certs/ca-yunohost_crt.pem %s/ca.pem' % ssl_domain_path, 'cp %s/certs/yunohost_key.pem %s/key.pem' % (ssl_dir, ssl_domain_path), 'cp %s/newcerts/%s.pem %s/crt.pem' % (ssl_dir, serial, ssl_domain_path), 'chmod 755 %s' % ssl_domain_path, 'chmod 640 %s/key.pem' % ssl_domain_path, 'chmod 640 %s/crt.pem' % ssl_domain_path, 'chmod 600 %s/openssl.cnf' % ssl_domain_path, 'chown root:metronome %s/key.pem' % ssl_domain_path, 'chown root:metronome %s/crt.pem' % ssl_domain_path, 'cat %s/ca.pem >> %s/crt.pem' % (ssl_domain_path, ssl_domain_path) ] for command in command_list: if os.system(command) != 0: raise MoulinetteError(errno.EIO, m18n.n('domain_cert_gen_failed')) try: auth.validate_uniqueness({ 'virtualdomain': domain }) except MoulinetteError: raise MoulinetteError(errno.EEXIST, m18n.n('domain_exists')) attr_dict['virtualdomain'] = domain if not auth.add('virtualdomain=%s,ou=domains' % domain, attr_dict): raise MoulinetteError(errno.EIO, m18n.n('domain_creation_failed')) try: with open('/etc/yunohost/installed', 'r') as f: service_regen_conf(names=[ 'nginx', 'metronome', 'dnsmasq', 'rmilter']) os.system('yunohost app ssowatconf > /dev/null 2>&1') except IOError: pass except: # Force domain removal silently try: domain_remove(auth, domain, True) except: pass raise hook_callback('post_domain_add', args=[domain]) logger.success(m18n.n('domain_created'))
def backup_restore(name, ignore_apps=False, force=False): """ Restore from a local backup archive Keyword argument: name -- Name of the local backup archive ignore_apps -- Do not restore apps force -- Force restauration on an already installed system """ from yunohost.hook import hook_add from yunohost.hook import hook_callback # Retrieve and open the archive archive_file = backup_info(name)['path'] try: tar = tarfile.open(archive_file, "r:gz") except: logger.exception("unable to open the archive '%s' for reading", archive_file) raise MoulinetteError(errno.EIO, m18n.n('backup_archive_open_failed')) # Check temporary directory tmp_dir = "%s/tmp/%s" % (backup_path, name) if os.path.isdir(tmp_dir): logger.warning("temporary directory for restoration '%s' already exists", tmp_dir) os.system('rm -rf %s' % tmp_dir) # Extract the tarball msignals.display(m18n.n('backup_extracting_archive')) tar.extractall(tmp_dir) tar.close() # Retrieve backup info try: with open("%s/info.json" % tmp_dir, 'r') as f: info = json.load(f) except IOError: logger.error("unable to retrieve backup info from '%s/info.json'", tmp_dir) raise MoulinetteError(errno.EIO, m18n.n('backup_invalid_archive')) else: logger.info("restoring from backup '%s' created on %s", name, time.ctime(info['created_at'])) # Retrieve domain from the backup try: with open("%s/yunohost/current_host" % tmp_dir, 'r') as f: domain = f.readline().rstrip() except IOError: logger.error("unable to retrieve domain from '%s/yunohost/current_host'", tmp_dir) raise MoulinetteError(errno.EIO, m18n.n('backup_invalid_archive')) # Check if YunoHost is installed if os.path.isfile('/etc/yunohost/installed'): msignals.display(m18n.n('yunohost_already_installed'), 'warning') if not force: try: # Ask confirmation for restoring i = msignals.prompt(m18n.n('restore_confirm_yunohost_installed', answers='y/N')) except NotImplemented: pass else: if i == 'y' or i == 'Y': force = True if not force: raise MoulinetteError(errno.EEXIST, m18n.n('restore_failed')) else: from yunohost.tools import tools_postinstall logger.info("executing the post-install...") tools_postinstall(domain, 'yunohost', True) # Add apps restore hook if not ignore_apps: for app_id in info['apps'].keys(): hook = "/etc/yunohost/apps/%s/scripts/restore" % app_id if os.path.isfile(hook): hook_add(app_id, hook) logger.info("app '%s' will be restored", app_id) else: msignals.display(m18n.n('unrestore_app', app_id), 'warning') # Run hooks msignals.display(m18n.n('restore_running_hooks')) hook_callback('restore', [tmp_dir]) # Remove temporary directory os.system('rm -rf %s' % tmp_dir) msignals.display(m18n.n('restore_complete'), 'success')
def domain_remove(operation_logger, domain, remove_apps=False, force=False): """ Delete domains Keyword argument: domain -- Domain to delete remove_apps -- Remove applications installed on the domain force -- Force the domain removal and don't not ask confirmation to remove apps if remove_apps is specified """ from yunohost.hook import hook_callback from yunohost.app import app_ssowatconf, app_info, app_remove from yunohost.utils.ldap import _get_ldap_interface # the 'force' here is related to the exception happening in domain_add ... # we don't want to check the domain exists because the ldap add may have # failed if not force and domain not in domain_list()['domains']: raise YunohostValidationError('domain_name_unknown', domain=domain) # Check domain is not the main domain if domain == _get_maindomain(): other_domains = domain_list()["domains"] other_domains.remove(domain) if other_domains: raise YunohostValidationError( "domain_cannot_remove_main", domain=domain, other_domains="\n * " + ("\n * ".join(other_domains)), ) else: raise YunohostValidationError( "domain_cannot_remove_main_add_new_one", domain=domain) # Check if apps are installed on the domain apps_on_that_domain = [] for app in _installed_apps(): settings = _get_app_settings(app) label = app_info(app)["name"] if settings.get("domain") == domain: apps_on_that_domain.append( (app, " - %s \"%s\" on https://%s%s" % (app, label, domain, settings["path"]) if "path" in settings else app)) if apps_on_that_domain: if remove_apps: if msettings.get('interface') == "cli" and not force: answer = msignals.prompt(m18n.n( 'domain_remove_confirm_apps_removal', apps="\n".join([x[1] for x in apps_on_that_domain]), answers='y/N'), color="yellow") if answer.upper() != "Y": raise YunohostError("aborting") for app, _ in apps_on_that_domain: app_remove(app) else: raise YunohostValidationError( 'domain_uninstall_app_first', apps="\n".join([x[1] for x in apps_on_that_domain])) operation_logger.start() ldap = _get_ldap_interface() try: ldap.remove("virtualdomain=" + domain + ",ou=domains") except Exception as e: raise YunohostError("domain_deletion_failed", domain=domain, error=e) os.system("rm -rf /etc/yunohost/certs/%s" % domain) # Delete dyndns keys for this domain (if any) os.system('rm -rf /etc/yunohost/dyndns/K%s.+*' % domain) # Sometime we have weird issues with the regenconf where some files # appears as manually modified even though they weren't touched ... # There are a few ideas why this happens (like backup/restore nginx # conf ... which we shouldnt do ...). This in turns creates funky # situation where the regenconf may refuse to re-create the conf # (when re-creating a domain..) # # So here we force-clear the has out of the regenconf if it exists. # This is a pretty ad hoc solution and only applied to nginx # because it's one of the major service, but in the long term we # should identify the root of this bug... _force_clear_hashes(["/etc/nginx/conf.d/%s.conf" % domain]) # And in addition we even force-delete the file Otherwise, if the file was # manually modified, it may not get removed by the regenconf which leads to # catastrophic consequences of nginx breaking because it can't load the # cert file which disappeared etc.. if os.path.exists("/etc/nginx/conf.d/%s.conf" % domain): _process_regen_conf("/etc/nginx/conf.d/%s.conf" % domain, new_conf=None, save=True) regen_conf(names=["nginx", "metronome", "dnsmasq", "postfix"]) app_ssowatconf() hook_callback("post_domain_remove", args=[domain]) logger.success(m18n.n("domain_deleted"))
def firewall_reload(): """ Reload all firewall rules """ from yunohost.hook import hook_callback reloaded = False errors = False # Check if SSH port is allowed ssh_port = _get_ssh_port() if ssh_port not in firewall_list()['opened_ports']: firewall_allow(ssh_port, no_reload=True) # Retrieve firewall rules and UPnP status firewall = firewall_list(raw=True) upnp = firewall_upnp()['enabled'] # IPv4 try: process.check_output("iptables -L") except process.CalledProcessError as e: logger.info('iptables seems to be not available, it outputs:\n%s', prependlines(e.output.rstrip(), '> ')) msignals.display(m18n.n('iptables_unavailable'), 'info') else: rules = [ "iptables -F", "iptables -X", "iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", ] # Iterate over ports and add rule for protocol in ['TCP', 'UDP']: for port in firewall['ipv4'][protocol]: rules.append("iptables -A INPUT -p %s --dport %s -j ACCEPT" \ % (protocol, process.quote(str(port)))) rules += [ "iptables -A INPUT -i lo -j ACCEPT", "iptables -A INPUT -p icmp -j ACCEPT", "iptables -P INPUT DROP", ] # Execute each rule if process.check_commands(rules, callback=_on_rule_command_error): errors = True reloaded = True # IPv6 try: process.check_output("ip6tables -L") except process.CalledProcessError as e: logger.info('ip6tables seems to be not available, it outputs:\n%s', prependlines(e.output.rstrip(), '> ')) msignals.display(m18n.n('ip6tables_unavailable'), 'info') else: rules = [ "ip6tables -F", "ip6tables -X", "ip6tables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", ] # Iterate over ports and add rule for protocol in ['TCP', 'UDP']: for port in firewall['ipv6'][protocol]: rules.append("ip6tables -A INPUT -p %s --dport %s -j ACCEPT" \ % (protocol, process.quote(str(port)))) rules += [ "ip6tables -A INPUT -i lo -j ACCEPT", "ip6tables -A INPUT -p icmpv6 -j ACCEPT", "ip6tables -P INPUT DROP", ] # Execute each rule if process.check_commands(rules, callback=_on_rule_command_error): errors = True reloaded = True if not reloaded: raise MoulinetteError(errno.ESRCH, m18n.n('firewall_reload_failed')) hook_callback('post_iptable_rules', args=[upnp, os.path.exists("/proc/net/if_inet6")]) if upnp: # Refresh port forwarding with UPnP firewall_upnp(no_refresh=False) # TODO: Use service_restart os.system("service fail2ban restart") if errors: msignals.display(m18n.n('firewall_rules_cmd_failed'), 'warning') else: msignals.display(m18n.n('firewall_reloaded'), 'success') return firewall_list()
def backup_create(name=None, description=None, output_directory=None, no_compress=False, ignore_apps=False): """ Create a backup local archive Keyword arguments: name -- Name of the backup archive description -- Short description of the backup output_directory -- Output directory for the backup no_compress -- Do not create an archive file ignore_apps -- Do not backup apps """ # TODO: Add a 'clean' argument to clean output directory from yunohost.hook import hook_add from yunohost.hook import hook_callback tmp_dir = None # Validate and define backup name timestamp = int(time.time()) if not name: name = str(timestamp) if name in backup_list()['archives']: raise MoulinetteError(errno.EINVAL, m18n.n('backup_archive_name_exists')) # Validate additional arguments if no_compress and not output_directory: raise MoulinetteError(errno.EINVAL, m18n.n('backup_output_directory_required')) if output_directory: output_directory = os.path.abspath(output_directory) # Check for forbidden folders if output_directory.startswith(archives_path) or \ re.match(r'^/(|(bin|boot|dev|etc|lib|root|run|sbin|sys|usr|var)(|/.*))$', output_directory): logger.error("forbidden output directory '%'", output_directory) raise MoulinetteError(errno.EINVAL, m18n.n('backup_output_directory_forbidden')) # Create the output directory if not os.path.isdir(output_directory): logger.info("creating output directory '%s'", output_directory) os.makedirs(output_directory, 0750) # Check that output directory is empty elif no_compress and os.listdir(output_directory): logger.error("not empty output directory '%'", output_directory) raise MoulinetteError(errno.EIO, m18n.n('backup_output_directory_not_empty')) # Define temporary directory if no_compress: tmp_dir = output_directory else: output_directory = archives_path # Create temporary directory if not tmp_dir: tmp_dir = "%s/tmp/%s" % (backup_path, name) if os.path.isdir(tmp_dir): logger.warning("temporary directory for backup '%s' already exists", tmp_dir) os.system('rm -rf %s' % tmp_dir) try: os.mkdir(tmp_dir, 0750) except OSError: # Create temporary directory recursively os.makedirs(tmp_dir, 0750) os.system('chown -hR admin: %s' % backup_path) else: os.system('chown -hR admin: %s' % tmp_dir) # Initialize backup info info = { 'description': description or '', 'created_at': timestamp, 'apps': {}, } # Add apps backup hook if not ignore_apps: from yunohost.app import app_info try: for app_id in os.listdir('/etc/yunohost/apps'): hook = '/etc/yunohost/apps/%s/scripts/backup' % app_id if os.path.isfile(hook): hook_add(app_id, hook) # Add app info i = app_info(app_id) info['apps'][app_id] = { 'version': i['version'], } else: logger.warning("unable to find app's backup hook '%s'", hook) msignals.display(m18n.n('unbackup_app', app_id), 'warning') except IOError as e: logger.info("unable to add apps backup hook: %s", str(e)) # Run hooks msignals.display(m18n.n('backup_running_hooks')) hook_callback('backup', [tmp_dir]) # Create backup info file with open("%s/info.json" % tmp_dir, 'w') as f: f.write(json.dumps(info)) # Create the archive if not no_compress: msignals.display(m18n.n('backup_creating_archive')) archive_file = "%s/%s.tar.gz" % (output_directory, name) try: tar = tarfile.open(archive_file, "w:gz") except: tar = None # Create the archives directory and retry if not os.path.isdir(archives_path): os.mkdir(archives_path, 0750) try: tar = tarfile.open(archive_file, "w:gz") except: logger.exception("unable to open the archive '%s' for writing " "after creating directory '%s'", archive_file, archives_path) tar = None else: logger.exception("unable to open the archive '%s' for writing", archive_file) if tar is None: raise MoulinetteError(errno.EIO, m18n.n('backup_archive_open_failed')) tar.add(tmp_dir, arcname='') tar.close() # Copy info file os.system('mv %s/info.json %s/%s.info.json' % (tmp_dir, archives_path, name)) # Clean temporary directory if tmp_dir != output_directory: os.system('rm -rf %s' % tmp_dir) msignals.display(m18n.n('backup_complete'), 'success')
def firewall_reload(): """ Reload all firewall rules """ from yunohost.hook import hook_callback firewall = firewall_list(raw=True) upnp = firewall['uPnP']['enabled'] # IPv4 if os.system("iptables -P INPUT ACCEPT") != 0: raise MoulinetteError(errno.ESRCH, m18n.n('iptables_unavailable')) if upnp: try: upnpc = miniupnpc.UPnP() upnpc.discoverdelay = 3000 if upnpc.discover() == 1: upnpc.selectigd() for protocol in ['TCP', 'UDP']: for port in firewall['uPnP'][protocol]: if upnpc.getspecificportmapping(port, protocol): try: upnpc.deleteportmapping(port, protocol) except: pass upnpc.addportmapping(port, protocol, upnpc.lanaddr, port, 'yunohost firewall : port %d' % port, '') else: raise MoulinetteError(errno.ENXIO, m18n.n('upnp_dev_not_found')) except: msignals.display(m18n.n('upnp_port_open_failed'), 'warning') os.system("iptables -F") os.system("iptables -X") os.system("iptables -A INPUT -m state --state ESTABLISHED -j ACCEPT") if 22 not in firewall['ipv4']['TCP']: firewall_allow(22) # Loop for protocol in ['TCP', 'UDP']: for port in firewall['ipv4'][protocol]: os.system("iptables -A INPUT -p %s --dport %d -j ACCEPT" % (protocol, port)) hook_callback('post_iptable_rules', [upnp, os.path.exists("/proc/net/if_inet6")]) os.system("iptables -A INPUT -i lo -j ACCEPT") os.system("iptables -A INPUT -p icmp -j ACCEPT") os.system("iptables -P INPUT DROP") # IPv6 if os.path.exists("/proc/net/if_inet6"): os.system("ip6tables -P INPUT ACCEPT") os.system("ip6tables -F") os.system("ip6tables -X") os.system("ip6tables -A INPUT -m state --state ESTABLISHED -j ACCEPT") if 22 not in firewall['ipv6']['TCP']: firewall_allow(22, ipv6=True) # Loop v6 for protocol in ['TCP', 'UDP']: for port in firewall['ipv6'][protocol]: os.system("ip6tables -A INPUT -p %s --dport %d -j ACCEPT" % (protocol, port)) os.system("ip6tables -A INPUT -i lo -j ACCEPT") os.system("ip6tables -A INPUT -p icmpv6 -j ACCEPT") os.system("ip6tables -P INPUT DROP") os.system("service fail2ban restart") msignals.display(m18n.n('firewall_reloaded'), 'success') return firewall_list()
def firewall_reload(skip_upnp=False): """ Reload all firewall rules Keyword arguments: skip_upnp -- Do not refresh port forwarding using UPnP """ from yunohost.hook import hook_callback reloaded = False errors = False # Check if SSH port is allowed ssh_port = _get_ssh_port() if ssh_port not in firewall_list()['opened_ports']: firewall_allow('TCP', ssh_port, no_reload=True) # Retrieve firewall rules and UPnP status firewall = firewall_list(raw=True) upnp = firewall_upnp()['enabled'] if not skip_upnp else False # IPv4 try: process.check_output("iptables -w -L") except process.CalledProcessError as e: logger.debug('iptables seems to be not available, it outputs:\n%s', prependlines(e.output.rstrip(), '> ')) logger.warning(m18n.n('iptables_unavailable')) else: rules = [ "iptables -w -F", "iptables -w -X", "iptables -w -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", ] # Iterate over ports and add rule for protocol in ['TCP', 'UDP']: for port in firewall['ipv4'][protocol]: rules.append("iptables -w -A INPUT -p %s --dport %s -j ACCEPT" % (protocol, process.quote(str(port)))) rules += [ "iptables -w -A INPUT -i lo -j ACCEPT", "iptables -w -A INPUT -p icmp -j ACCEPT", "iptables -w -P INPUT DROP", ] # Execute each rule if process.check_commands(rules, callback=_on_rule_command_error): errors = True reloaded = True # IPv6 try: process.check_output("ip6tables -L") except process.CalledProcessError as e: logger.debug('ip6tables seems to be not available, it outputs:\n%s', prependlines(e.output.rstrip(), '> ')) logger.warning(m18n.n('ip6tables_unavailable')) else: rules = [ "ip6tables -w -F", "ip6tables -w -X", "ip6tables -w -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", ] # Iterate over ports and add rule for protocol in ['TCP', 'UDP']: for port in firewall['ipv6'][protocol]: rules.append("ip6tables -w -A INPUT -p %s --dport %s -j ACCEPT" % (protocol, process.quote(str(port)))) rules += [ "ip6tables -w -A INPUT -i lo -j ACCEPT", "ip6tables -w -A INPUT -p icmpv6 -j ACCEPT", "ip6tables -w -P INPUT DROP", ] # Execute each rule if process.check_commands(rules, callback=_on_rule_command_error): errors = True reloaded = True if not reloaded: raise MoulinetteError(errno.ESRCH, m18n.n('firewall_reload_failed')) hook_callback('post_iptable_rules', args=[upnp, os.path.exists("/proc/net/if_inet6")]) if upnp: # Refresh port forwarding with UPnP firewall_upnp(no_refresh=False) # TODO: Use service_restart os.system("service fail2ban restart") if errors: logger.warning(m18n.n('firewall_rules_cmd_failed')) else: logger.success(m18n.n('firewall_reloaded')) return firewall_list()