def _give_lock(action, service, p): # Depending of the action, systemctl calls the PID differently :/ if action == "start" or action == "restart": systemctl_PID_name = "MainPID" else: systemctl_PID_name = "ControlPID" cmd_get_son_PID = "systemctl show %s -p %s" % (service, systemctl_PID_name) son_PID = 0 # As long as we did not found the PID and that the command is still running while son_PID == 0 and p.poll() is None: # Call systemctl to get the PID # Output of the command is e.g. ControlPID=1234 son_PID = check_output(cmd_get_son_PID).split("=")[1] son_PID = int(son_PID) time.sleep(1) # If we found a PID if son_PID != 0: # Append the PID to the lock file logger.debug( "Giving a lock to PID %s for service %s !" % (str(son_PID), service) ) append_to_file(MOULINETTE_LOCK, "\n%s" % str(son_PID)) return son_PID
def _list_upgradable_apt_packages(): # List upgradable packages # LC_ALL=C is here to make sure the results are in english upgradable_raw = check_output("LC_ALL=C apt list --upgradable") # Dirty parsing of the output upgradable_raw = [ line.strip() for line in upgradable_raw.split("\n") if line.strip() ] for line in upgradable_raw: # Remove stupid warning and verbose messages >.> if "apt does not have a stable CLI interface" in line or "Listing..." in line: continue # line should look like : # yunohost/stable 3.5.0.2+201903211853 all [upgradable from: 3.4.2.4+201903080053] line = line.split() if len(line) != 6: logger.warning("Failed to parse this line : %s" % " ".join(line)) continue yield { "name": line[0].split("/")[0], "new_version": line[1], "current_version": line[5].strip("]"), }
def get_domain_expiration(self, domain): """ Return the expiration datetime of a domain or None """ command = "whois -H %s || echo failed" % (domain) out = check_output(command).split("\n") # Reduce output to determine if whois answer is equivalent to NOT FOUND filtered_out = [ line for line in out if re.search(r"^[a-zA-Z0-9 ]{4,25}:", line, re.IGNORECASE) and not re.match(r">>> Last update of whois", line, re.IGNORECASE) and not re.match(r"^NOTICE:", line, re.IGNORECASE) and not re.match(r"^%%", line, re.IGNORECASE) and not re.match(r'"https?:"', line, re.IGNORECASE) ] # If there is less than 7 lines, it's NOT FOUND response if len(filtered_out) <= 6: return "not_found" for line in out: match = re.search(r"Expir.+(\d{4}-\d{2}-\d{2})", line, re.IGNORECASE) if match is not None: return datetime.strptime(match.group(1), "%Y-%m-%d") match = re.search(r"Expir.+(\d{2}-\w{3}-\d{4})", line, re.IGNORECASE) if match is not None: return datetime.strptime(match.group(1), "%d-%b-%Y") return "expiration_not_found"
def check_queue(self): """ Check mail queue is not filled with hundreds of email pending """ command = 'postqueue -p | grep -v "Mail queue is empty" | grep -c "^[A-Z0-9]" || true' try: output = check_output(command) pending_emails = int(output) except (ValueError, CalledProcessError) as e: yield dict(meta={"test": "mail_queue"}, data={"error": str(e)}, status="ERROR", summary="diagnosis_mail_queue_unavailable", details="diagnosis_mail_queue_unavailable_details") else: if pending_emails > 100: yield dict(meta={"test": "mail_queue"}, data={'nb_pending': pending_emails}, status="WARNING", summary="diagnosis_mail_queue_too_big") else: yield dict(meta={"test": "mail_queue"}, data={'nb_pending': pending_emails}, status="SUCCESS", summary="diagnosis_mail_queue_ok")
def get_public_ip_from_remote_server(protocol=4): """Retrieve the public IP address from ip.yunohost.org""" # We can know that ipv6 is not available directly if this file does not exists if protocol == 6 and not os.path.exists("/proc/net/if_inet6"): logger.debug("IPv6 appears not at all available on the system, so assuming there's no IP address for that version") return None # If we are indeed connected in ipv4 or ipv6, we should find a default route routes = check_output("ip -%s route show table all" % protocol).split("\n") def is_default_route(r): # Typically the default route starts with "default" # But of course IPv6 is more complex ... e.g. on internet cube there's # no default route but a /3 which acts as a default-like route... # e.g. 2000:/3 dev tun0 ... return r.startswith("default") or (":" in r and re.match(r".*/[0-3]$", r.split()[0])) if not any(is_default_route(r) for r in routes): logger.debug("No default route for IPv%s, so assuming there's no IP address for that version" % protocol) return None url = 'https://ip%s.yunohost.org' % (protocol if protocol != 4 else '') logger.debug("Fetching IP from %s " % url) try: return download_text(url, timeout=30).strip() except Exception as e: logger.debug("Could not get public IPv%s : %s" % (str(protocol), str(e))) return None
def _get_journalctl_logs(service, number="all"): services = _get_services() systemd_service = services.get(service, {}).get("actual_systemd_service", service) try: return check_output("journalctl --no-hostname --no-pager -u {0} -n{1}".format(systemd_service, number)) except: import traceback return "error while get services logs from journalctl:\n%s" % traceback.format_exc()
def dpkg_is_broken(): if check_output("dpkg --audit") != "": return True # If dpkg is broken, /var/lib/dpkg/updates # will contains files like 0001, 0002, ... # ref: https://sources.debian.org/src/apt/1.4.9/apt-pkg/deb/debsystem.cc/#L141-L174 if not os.path.isdir("/var/lib/dpkg/updates/"): return False return any(re.match("^[0-9]+$", f) for f in os.listdir("/var/lib/dpkg/updates/"))
def get_gateway(): output = check_output('ip route show') m = re.search(r'default via (.*) dev ([a-z]+[0-9]?)', output) if not m: return None addr = _extract_inet(m.group(1), True) return addr.popitem()[1] if len(addr) == 1 else None
def get_apps_equivs_packages(self): command = "dpkg --get-selections" \ " | grep -v deinstall" \ " | awk '{print $1}'" \ " | { grep 'ynh-deps$' || true; }" output = check_output(command) return output.split('\n') if output else []
def debian_major_version(self): # The python module "platform" and lsb_release are not reliable because # on some setup, they may still return Release=9 even after upgrading to # buster ... (Apparently this is related to OVH overriding some stuff # with /etc/lsb-release for instance -_-) # Instead, we rely on /etc/os-release which should be the raw info from # the distribution... return int( check_output( "grep VERSION_ID /etc/os-release | head -n 1 | tr '\"' ' ' | cut -d ' ' -f2" ))
def validate_and_upgrade_cert_if_necessary(self): active_certs = set( check_output("grep -roh '/.*crt.pem' /etc/nginx/").split("\n") ) cmd = "LC_ALL=C openssl x509 -in %s -text -noout | grep -i 'Signature Algorithm:' | awk '{print $3}' | uniq" default_crt = "/etc/yunohost/certs/yunohost.org/crt.pem" default_key = "/etc/yunohost/certs/yunohost.org/key.pem" default_signature = ( check_output(cmd % default_crt) if default_crt in active_certs else None ) if default_signature is not None and ( default_signature.startswith("md5") or default_signature.startswith("sha1") ): logger.warning( "%s is using a pretty old certificate incompatible with newer versions of nginx ... attempting to regenerate a fresh one" % default_crt ) os.system("mv %s %s.old" % (default_crt, default_crt)) os.system("mv %s %s.old" % (default_key, default_key)) ret = os.system("/usr/share/yunohost/hooks/conf_regen/02-ssl init") if ret != 0 or not os.path.exists(default_crt): logger.error("Upgrading the certificate failed ... reverting") os.system("mv %s.old %s" % (default_crt, default_crt)) os.system("mv %s.old %s" % (default_key, default_key)) signatures = {cert: check_output(cmd % cert) for cert in active_certs} def cert_is_weak(cert): sig = signatures[cert] return sig.startswith("md5") or sig.startswith("sha1") weak_certs = [cert for cert in signatures.keys() if cert_is_weak(cert)] if weak_certs: raise YunohostError( "migration_0015_weak_certs", certs=", ".join(weak_certs) )
def bad_sury_packages(self): packages_to_check = ["openssl", "libssl1.1", "libssl-dev"] for package in packages_to_check: cmd = "dpkg --list | grep '^ii' | grep gbp | grep -q -w %s" % package # If version currently installed is not from sury, nothing to report if os.system(cmd) != 0: continue cmd = "LC_ALL=C apt policy %s 2>&1 | grep http -B1 | tr -d '*' | grep '+deb' | grep -v 'gbp' | head -n 1 | awk '{print $1}'" % package version_to_downgrade_to = check_output(cmd) yield (package, version_to_downgrade_to)
def can_ping_outside(self, protocol=4): assert protocol in [ 4, 6 ], "Invalid protocol version, it should be either 4 or 6 and was '%s'" % repr( protocol) # We can know that ipv6 is not available directly if this file does not exists if protocol == 6 and not os.path.exists("/proc/net/if_inet6"): return False # If we are indeed connected in ipv4 or ipv6, we should find a default route routes = check_output("ip -%s route show table all" % protocol).split("\n") def is_default_route(r): # Typically the default route starts with "default" # But of course IPv6 is more complex ... e.g. on internet cube there's # no default route but a /3 which acts as a default-like route... # e.g. 2000:/3 dev tun0 ... return r.startswith("default") or (":" in r and re.match( r".*/[0-3]$", r.split()[0])) if not any(is_default_route(r) for r in routes): self.logger_debug( "No default route for IPv%s, so assuming there's no IP address for that version" % protocol) return None # We use the resolver file as a list of well-known, trustable (ie not google ;)) IPs that we can ping resolver_file = "/usr/share/yunohost/templates/dnsmasq/plain/resolv.dnsmasq.conf" resolvers = [ r.split(" ")[1] for r in read_file(resolver_file).split("\n") if r.startswith("nameserver") ] if protocol == 4: resolvers = [r for r in resolvers if ":" not in r] if protocol == 6: resolvers = [r for r in resolvers if ":" in r] assert resolvers != [], "Uhoh, need at least one IPv%s DNS resolver in %s ..." % ( protocol, resolver_file) # So let's try to ping the first 4~5 resolvers (shuffled) # If we succesfully ping any of them, we conclude that we are indeed connected def ping(protocol, target): return os.system("ping%s -c1 -W 3 %s >/dev/null 2>/dev/null" % ("" if protocol == 4 else "6", target)) == 0 random.shuffle(resolvers) return any(ping(protocol, resolver) for resolver in resolvers[:5])
def get_network_interfaces(): # Get network devices and their addresses (raw infos from 'ip addr') devices_raw = {} output = check_output('ip addr show') for d in re.split(r'^(?:[0-9]+: )', output, flags=re.MULTILINE): # Extract device name (1) and its addresses (2) m = re.match(r'([^\s@]+)(?:@[\S]+)?: (.*)', d, flags=re.DOTALL) if m: devices_raw[m.group(1)] = m.group(2) # Parse relevant informations for each of them devices = {name: _extract_inet(addrs) for name, addrs in devices_raw.items() if name != "lo"} return devices
def get_ynh_package_version(package): # Returns the installed version and release version ('stable' or 'testing' # or 'unstable') # NB: this is designed for yunohost packages only ! # Not tested for any arbitrary packages that # may handle changelog differently ! changelog = "/usr/share/doc/%s/changelog.gz" % package cmd = "gzip -cd %s 2>/dev/null | head -n1" % changelog if not os.path.exists(changelog): return {"version": "?", "repo": "?"} out = check_output(cmd).split() # Output looks like : "yunohost (1.2.3) testing; urgency=medium" return {"version": out[1].strip("()"), "repo": out[2].strip(";")}
def analyzed_kern_log(): cmd = 'tail -n 10000 /var/log/kern.log | grep "oom_reaper: reaped process" || true' out = check_output(cmd) lines = out.split("\n") if out else [] now = datetime.datetime.now() for line in reversed(lines): # Lines look like : # Aug 25 18:48:21 yolo kernel: [ 9623.613667] oom_reaper: reaped process 11509 (uwsgi), now anon-rss:0kB, file-rss:0kB, shmem-rss:328kB date_str = str(now.year) + " " + " ".join(line.split()[:3]) date = datetime.datetime.strptime(date_str, "%Y %b %d %H:%M:%S") diff = now - date if diff.days >= 1: break process_killed = re.search(r"\(.*\)", line).group().strip("()") yield process_killed
def manually_modified_files_compared_to_debian_default( ignore_handled_by_regenconf=False): # from https://serverfault.com/a/90401 files = check_output("dpkg-query -W -f='${Conffiles}\n' '*' \ | awk 'OFS=\" \"{print $2,$1}' \ | md5sum -c 2>/dev/null \ | awk -F': ' '$2 !~ /OK/{print $1}'") files = files.strip().split("\n") if ignore_handled_by_regenconf: regenconf_categories = _get_regenconf_infos() regenconf_files = [] for infos in regenconf_categories.values(): regenconf_files.extend(infos["conffiles"].keys()) files = [f for f in files if f not in regenconf_files] return files
def user_info(username): """ Get user informations Keyword argument: username -- Username or mail to get informations """ from yunohost.utils.ldap import _get_ldap_interface ldap = _get_ldap_interface() user_attrs = [ 'cn', 'mail', 'uid', 'maildrop', 'givenName', 'sn', 'mailuserquota' ] if len(username.split('@')) == 2: filter = 'mail=' + username else: filter = 'uid=' + username result = ldap.search('ou=users,dc=yunohost,dc=org', filter, user_attrs) if result: user = result[0] else: raise YunohostError('user_unknown', user=username) result_dict = { 'username': user['uid'][0], 'fullname': user['cn'][0], 'firstname': user['givenName'][0], 'lastname': user['sn'][0], 'mail': user['mail'][0] } if len(user['mail']) > 1: result_dict['mail-aliases'] = user['mail'][1:] if len(user['maildrop']) > 1: result_dict['mail-forward'] = user['maildrop'][1:] if 'mailuserquota' in user: userquota = user['mailuserquota'][0] if isinstance(userquota, int): userquota = str(userquota) # Test if userquota is '0' or '0M' ( quota pattern is ^(\d+[bkMGT])|0$ ) is_limited = not re.match('0[bkMGT]?', userquota) storage_use = '?' if service_status("dovecot")["status"] != "running": logger.warning(m18n.n('mailbox_used_space_dovecot_down')) elif username not in user_permission_info( "mail.main")["corresponding_users"]: logger.warning(m18n.n('mailbox_disabled', user=username)) else: try: cmd = 'doveadm -f flow quota get -u %s' % user['uid'][0] cmd_result = check_output(cmd) except Exception as e: cmd_result = "" logger.warning("Failed to fetch quota info ... : %s " % str(e)) # Exemple of return value for cmd: # """Quota name=User quota Type=STORAGE Value=0 Limit=- %=0 # Quota name=User quota Type=MESSAGE Value=0 Limit=- %=0""" has_value = re.search(r'Value=(\d+)', cmd_result) if has_value: storage_use = int(has_value.group(1)) storage_use = _convertSize(storage_use) if is_limited: has_percent = re.search(r'%=(\d+)', cmd_result) if has_percent: percentage = int(has_percent.group(1)) storage_use += ' (%s%%)' % percentage result_dict['mailbox-quota'] = { 'limit': userquota if is_limited else m18n.n('unlimit'), 'use': storage_use } return result_dict
def run(self): # Detect virt technology (if not bare metal) and arch # Gotta have this "|| true" because it systemd-detect-virt return 'none' # with an error code on bare metal ~.~ virt = check_output("systemd-detect-virt || true", shell=True) if virt.lower() == "none": virt = "bare-metal" # Detect arch arch = check_output("dpkg --print-architecture") hardware = dict( meta={"test": "hardware"}, status="INFO", data={ "virt": virt, "arch": arch }, summary="diagnosis_basesystem_hardware", ) # Also possibly the board / hardware name if os.path.exists("/proc/device-tree/model"): model = read_file("/proc/device-tree/model").strip().replace( "\x00", "") hardware["data"]["model"] = model hardware["details"] = ["diagnosis_basesystem_hardware_model"] elif os.path.exists("/sys/devices/virtual/dmi/id/sys_vendor"): model = read_file("/sys/devices/virtual/dmi/id/sys_vendor").strip() if os.path.exists("/sys/devices/virtual/dmi/id/product_name"): model = "%s %s" % ( model, read_file( "/sys/devices/virtual/dmi/id/product_name").strip(), ) hardware["data"]["model"] = model hardware["details"] = ["diagnosis_basesystem_hardware_model"] yield hardware # Kernel version kernel_version = read_file("/proc/sys/kernel/osrelease").strip() yield dict( meta={"test": "kernel"}, data={"kernel_version": kernel_version}, status="INFO", summary="diagnosis_basesystem_kernel", ) # Debian release debian_version = read_file("/etc/debian_version").strip() yield dict( meta={"test": "host"}, data={"debian_version": debian_version}, status="INFO", summary="diagnosis_basesystem_host", ) # Yunohost packages versions # We check if versions are consistent (e.g. all 3.6 and not 3 packages with 3.6 and the other with 3.5) # This is a classical issue for upgrades that failed in the middle # (or people upgrading half of the package because they did 'apt upgrade' instead of 'dist-upgrade') # Here, ynh_core_version is for example "3.5.4.12", so [:3] is "3.5" and we check it's the same for all packages ynh_packages = ynh_packages_version() ynh_core_version = ynh_packages["yunohost"]["version"] consistent_versions = all(infos["version"][:3] == ynh_core_version[:3] for infos in ynh_packages.values()) ynh_version_details = [( "diagnosis_basesystem_ynh_single_version", { "package": package, "version": infos["version"], "repo": infos["repo"], }, ) for package, infos in ynh_packages.items()] yield dict( meta={"test": "ynh_versions"}, data={ "main_version": ynh_core_version, "repo": ynh_packages["yunohost"]["repo"], }, status="INFO" if consistent_versions else "ERROR", summary="diagnosis_basesystem_ynh_main_version" if consistent_versions else "diagnosis_basesystem_ynh_inconsistent_versions", details=ynh_version_details, ) if self.is_vulnerable_to_meltdown(): yield dict( meta={"test": "meltdown"}, status="ERROR", summary="diagnosis_security_vulnerable_to_meltdown", details=["diagnosis_security_vulnerable_to_meltdown_details"], ) bad_sury_packages = list(self.bad_sury_packages()) if bad_sury_packages: cmd_to_fix = "apt install --allow-downgrades " + " ".join([ "%s=%s" % (package, version) for package, version in bad_sury_packages ]) yield dict( meta={"test": "packages_from_sury"}, data={"cmd_to_fix": cmd_to_fix}, status="WARNING", summary="diagnosis_package_installed_from_sury", details=["diagnosis_package_installed_from_sury_details"], ) if self.backports_in_sources_list(): yield dict( meta={"test": "backports_in_sources_list"}, status="WARNING", summary="diagnosis_backports_in_sources_list", )
def tools_upgrade(operation_logger, apps=None, system=False, allow_yunohost_upgrade=True): """ Update apps & package cache, then display changelog Keyword arguments: apps -- List of apps to upgrade (or [] to update all apps) system -- True to upgrade system """ from yunohost.utils import packages if packages.dpkg_is_broken(): raise YunohostValidationError("dpkg_is_broken") # Check for obvious conflict with other dpkg/apt commands already running in parallel if not packages.dpkg_lock_available(): raise YunohostValidationError("dpkg_lock_not_available") if system is not False and apps is not None: raise YunohostValidationError("tools_upgrade_cant_both") if system is False and apps is None: raise YunohostValidationError("tools_upgrade_at_least_one") # # Apps # This is basically just an alias to yunohost app upgrade ... # if apps is not None: # Make sure there's actually something to upgrade upgradable_apps = [app["id"] for app in _list_upgradable_apps()] if not upgradable_apps or (len(apps) and all(app not in upgradable_apps for app in apps)): logger.info(m18n.n("apps_already_up_to_date")) return # Actually start the upgrades try: app_upgrade(app=apps) except Exception as e: logger.warning("unable to upgrade apps: %s" % str(e)) logger.error(m18n.n("app_upgrade_some_app_failed")) return # # System # if system is True: # Check that there's indeed some packages to upgrade upgradables = list(_list_upgradable_apt_packages()) if not upgradables: logger.info(m18n.n("already_up_to_date")) logger.info(m18n.n("upgrading_packages")) operation_logger.start() # Critical packages are packages that we can't just upgrade # randomly from yunohost itself... upgrading them is likely to critical_packages = [ "moulinette", "yunohost", "yunohost-admin", "ssowat" ] critical_packages_upgradable = [ p["name"] for p in upgradables if p["name"] in critical_packages ] noncritical_packages_upgradable = [ p["name"] for p in upgradables if p["name"] not in critical_packages ] # Prepare dist-upgrade command dist_upgrade = "DEBIAN_FRONTEND=noninteractive" dist_upgrade += " APT_LISTCHANGES_FRONTEND=none" dist_upgrade += " apt-get" dist_upgrade += ( " --fix-broken --show-upgraded --assume-yes --quiet -o=Dpkg::Use-Pty=0" ) for conf_flag in ["old", "miss", "def"]: dist_upgrade += ' -o Dpkg::Options::="--force-conf{}"'.format( conf_flag) dist_upgrade += " dist-upgrade" # # "Regular" packages upgrade # if noncritical_packages_upgradable: logger.info(m18n.n("tools_upgrade_regular_packages")) # Mark all critical packages as held for package in critical_packages: check_output("apt-mark hold %s" % package) # Doublecheck with apt-mark showhold that packages are indeed held ... held_packages = check_output("apt-mark showhold").split("\n") if any(p not in held_packages for p in critical_packages): logger.warning( m18n.n("tools_upgrade_cant_hold_critical_packages")) operation_logger.error(m18n.n("packages_upgrade_failed")) raise YunohostError(m18n.n("packages_upgrade_failed")) logger.debug("Running apt command :\n{}".format(dist_upgrade)) def is_relevant(line): irrelevants = [ "service sudo-ldap already provided", "Reading database ...", ] return all(i not in line.rstrip() for i in irrelevants) callbacks = ( lambda l: logger.info("+ " + l.rstrip() + "\r") if is_relevant(l) else logger.debug(l.rstrip() + "\r"), lambda l: logger.warning(l.rstrip()) if is_relevant(l) else logger.debug(l.rstrip()), ) returncode = call_async_output(dist_upgrade, callbacks, shell=True) if returncode != 0: upgradables = list(_list_upgradable_apt_packages()) noncritical_packages_upgradable = [ p["name"] for p in upgradables if p["name"] not in critical_packages ] logger.warning( m18n.n( "tools_upgrade_regular_packages_failed", packages_list=", ".join( noncritical_packages_upgradable), )) operation_logger.error(m18n.n("packages_upgrade_failed")) raise YunohostError(m18n.n("packages_upgrade_failed")) # # Critical packages upgrade # if critical_packages_upgradable and allow_yunohost_upgrade: logger.info(m18n.n("tools_upgrade_special_packages")) # Mark all critical packages as unheld for package in critical_packages: check_output("apt-mark unhold %s" % package) # Doublecheck with apt-mark showhold that packages are indeed unheld ... held_packages = check_output("apt-mark showhold").split("\n") if any(p in held_packages for p in critical_packages): logger.warning( m18n.n("tools_upgrade_cant_unhold_critical_packages")) operation_logger.error(m18n.n("packages_upgrade_failed")) raise YunohostError(m18n.n("packages_upgrade_failed")) # # Here we use a dirty hack to run a command after the current # "yunohost tools upgrade", because the upgrade of yunohost # will also trigger other yunohost commands (e.g. "yunohost tools migrations run") # (also the upgrade of the package, if executed from the webadmin, is # likely to kill/restart the api which is in turn likely to kill this # command before it ends...) # logfile = operation_logger.log_path dist_upgrade = dist_upgrade + " 2>&1 | tee -a {}".format(logfile) MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock" wait_until_end_of_yunohost_command = ( "(while [ -f {} ]; do sleep 2; done)".format(MOULINETTE_LOCK)) mark_success = ( "(echo 'Done!' | tee -a {} && echo 'success: true' >> {})". format(logfile, operation_logger.md_path)) mark_failure = ( "(echo 'Failed :(' | tee -a {} && echo 'success: false' >> {})" .format(logfile, operation_logger.md_path)) update_log_metadata = "sed -i \"s/ended_at: .*$/ended_at: $(date -u +'%Y-%m-%d %H:%M:%S.%N')/\" {}" update_log_metadata = update_log_metadata.format( operation_logger.md_path) # Dirty hack such that the operation_logger does not add ended_at # and success keys in the log metadata. (c.f. the code of the # is_unit_operation + operation_logger.close()) We take care of # this ourselves (c.f. the mark_success and updated_log_metadata in # the huge command launched by os.system) operation_logger.ended_at = "notyet" upgrade_completed = "\n" + m18n.n( "tools_upgrade_special_packages_completed") command = "({wait} && {dist_upgrade}) && {mark_success} || {mark_failure}; {update_metadata}; echo '{done}'".format( wait=wait_until_end_of_yunohost_command, dist_upgrade=dist_upgrade, mark_success=mark_success, mark_failure=mark_failure, update_metadata=update_log_metadata, done=upgrade_completed, ) logger.warning( m18n.n("tools_upgrade_special_packages_explanation")) logger.debug("Running command :\n{}".format(command)) open("/tmp/yunohost-selfupgrade", "w").write("rm /tmp/yunohost-selfupgrade; " + command) # Using systemd-run --scope is like nohup/disown and &, but more robust somehow # (despite using nohup/disown and &, the self-upgrade process was still getting killed...) # ref: https://unix.stackexchange.com/questions/420594/why-process-killed-with-nohup # (though I still don't understand it 100%...) os.system("systemd-run --scope bash /tmp/yunohost-selfupgrade &") return else: logger.success(m18n.n("system_upgraded")) operation_logger.success()
def firewall_reload(skip_upnp=False): """ Reload all firewall rules Keyword arguments: skip_upnp -- Do not refresh port forwarding using UPnP """ from yunohost.hook import hook_callback from yunohost.service import _run_service_command reloaded = False errors = False # Check if SSH port is allowed ssh_port = _get_ssh_port() if ssh_port not in firewall_list()["opened_ports"]: firewall_allow("TCP", ssh_port, no_reload=True) # Retrieve firewall rules and UPnP status firewall = firewall_list(raw=True) upnp = firewall_upnp()["enabled"] if not skip_upnp else False # IPv4 try: process.check_output("iptables -w -L") except process.CalledProcessError as e: logger.debug( "iptables seems to be not available, it outputs:\n%s", prependlines(e.output.rstrip(), "> "), ) logger.warning(m18n.n("iptables_unavailable")) else: rules = [ "iptables -w -F", "iptables -w -X", "iptables -w -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", ] # Iterate over ports and add rule for protocol in ["TCP", "UDP"]: for port in firewall["ipv4"][protocol]: rules.append( "iptables -w -A INPUT -p %s --dport %s -j ACCEPT" % (protocol, process.quote(str(port)))) rules += [ "iptables -w -A INPUT -i lo -j ACCEPT", "iptables -w -A INPUT -p icmp -j ACCEPT", "iptables -w -P INPUT DROP", ] # Execute each rule if process.run_commands(rules, callback=_on_rule_command_error): errors = True reloaded = True # IPv6 try: process.check_output("ip6tables -L") except process.CalledProcessError as e: logger.debug( "ip6tables seems to be not available, it outputs:\n%s", prependlines(e.output.rstrip(), "> "), ) logger.warning(m18n.n("ip6tables_unavailable")) else: rules = [ "ip6tables -w -F", "ip6tables -w -X", "ip6tables -w -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", ] # Iterate over ports and add rule for protocol in ["TCP", "UDP"]: for port in firewall["ipv6"][protocol]: rules.append( "ip6tables -w -A INPUT -p %s --dport %s -j ACCEPT" % (protocol, process.quote(str(port)))) rules += [ "ip6tables -w -A INPUT -i lo -j ACCEPT", "ip6tables -w -A INPUT -p icmpv6 -j ACCEPT", "ip6tables -w -P INPUT DROP", ] # Execute each rule if process.run_commands(rules, callback=_on_rule_command_error): errors = True reloaded = True if not reloaded: raise YunohostError("firewall_reload_failed") hook_callback("post_iptable_rules", args=[upnp, os.path.exists("/proc/net/if_inet6")]) if upnp: # Refresh port forwarding with UPnP firewall_upnp(no_refresh=False) _run_service_command("reload", "fail2ban") if errors: logger.warning(m18n.n("firewall_rules_cmd_failed")) else: logger.success(m18n.n("firewall_reloaded")) return firewall_list()
def _tail(n, file_path): from moulinette.utils.process import check_output return check_output(f"tail -n {n} '{file_path}'")
def firewall_reload(skip_upnp=False): """ Reload all firewall rules Keyword arguments: skip_upnp -- Do not refresh port forwarding using UPnP """ from yunohost.hook import hook_callback reloaded = False errors = False # Check if SSH port is allowed ssh_port = _get_ssh_port() if ssh_port not in firewall_list()['opened_ports']: firewall_allow('TCP', ssh_port, no_reload=True) # Retrieve firewall rules and UPnP status firewall = firewall_list(raw=True) upnp = firewall_upnp()['enabled'] if not skip_upnp else False # IPv4 try: process.check_output("iptables -L") except process.CalledProcessError as e: logger.debug('iptables seems to be not available, it outputs:\n%s', prependlines(e.output.rstrip(), '> ')) logger.warning(m18n.n('iptables_unavailable')) else: rules = [ "iptables -F", "iptables -X", "iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", ] # Iterate over ports and add rule for protocol in ['TCP', 'UDP']: for port in firewall['ipv4'][protocol]: rules.append("iptables -A INPUT -p %s --dport %s -j ACCEPT" \ % (protocol, process.quote(str(port)))) rules += [ "iptables -A INPUT -i lo -j ACCEPT", "iptables -A INPUT -p icmp -j ACCEPT", "iptables -P INPUT DROP", ] # Execute each rule if process.check_commands(rules, callback=_on_rule_command_error): errors = True reloaded = True # IPv6 try: process.check_output("ip6tables -L") except process.CalledProcessError as e: logger.debug('ip6tables seems to be not available, it outputs:\n%s', prependlines(e.output.rstrip(), '> ')) logger.warning(m18n.n('ip6tables_unavailable')) else: rules = [ "ip6tables -F", "ip6tables -X", "ip6tables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", ] # Iterate over ports and add rule for protocol in ['TCP', 'UDP']: for port in firewall['ipv6'][protocol]: rules.append("ip6tables -A INPUT -p %s --dport %s -j ACCEPT" \ % (protocol, process.quote(str(port)))) rules += [ "ip6tables -A INPUT -i lo -j ACCEPT", "ip6tables -A INPUT -p icmpv6 -j ACCEPT", "ip6tables -P INPUT DROP", ] # Execute each rule if process.check_commands(rules, callback=_on_rule_command_error): errors = True reloaded = True if not reloaded: raise MoulinetteError(errno.ESRCH, m18n.n('firewall_reload_failed')) hook_callback('post_iptable_rules', args=[upnp, os.path.exists("/proc/net/if_inet6")]) if upnp: # Refresh port forwarding with UPnP firewall_upnp(no_refresh=False) # TODO: Use service_restart os.system("service fail2ban restart") if errors: logger.warning(m18n.n('firewall_rules_cmd_failed')) else: logger.success(m18n.n('firewall_reloaded')) return firewall_list()
def test_check_output(test_file): assert check_output(["cat", str(test_file)], shell=False) == "foo\nbar" assert check_output("cat %s" % str(test_file)) == "foo\nbar"
def run(self): self.check_assertions() logger.info(m18n.n("migration_0015_start")) # # Make sure certificates do not use weak signature hash algorithms (md5, sha1) # otherwise nginx will later refuse to start which result in # catastrophic situation # self.validate_and_upgrade_cert_if_necessary() # # Patch sources.list # logger.info(m18n.n("migration_0015_patching_sources_list")) self.patch_apt_sources_list() tools_update(system=True) # Tell libc6 it's okay to restart system stuff during the upgrade os.system( "echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections" ) # Don't send an email to root about the postgresql migration. It should be handled automatically after. os.system( "echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections" ) # # Specific packages upgrades # logger.info(m18n.n("migration_0015_specific_upgrade")) # Update unscd independently, was 0.53-1+yunohost on stretch (custom build of ours) but now it's 0.53-1+b1 on vanilla buster, # which for apt appears as a lower version (hence the --allow-downgrades and the hardcoded version number) unscd_version = check_output( 'dpkg -s unscd | grep "^Version: " | cut -d " " -f 2') if "yunohost" in unscd_version: new_version = check_output( "LC_ALL=C apt policy unscd 2>/dev/null | grep -v '\\*\\*\\*' | grep http -B1 | head -n 1 | awk '{print $1}'" ).strip() if new_version: self.apt_install('unscd=%s --allow-downgrades' % new_version) else: logger.warning( "Could not identify which version of unscd to install") # Upgrade libpam-modules independently, small issue related to willing to overwrite a file previously provided by Yunohost libpammodules_version = check_output( 'dpkg -s libpam-modules | grep "^Version: " | cut -d " " -f 2') if not libpammodules_version.startswith("1.3"): self.apt_install( 'libpam-modules -o Dpkg::Options::="--force-overwrite"') # # Main upgrade # logger.info(m18n.n("migration_0015_main_upgrade")) apps_packages = self.get_apps_equivs_packages() self.hold(apps_packages) tools_upgrade(system=True, allow_yunohost_upgrade=False) if self.debian_major_version() == 9: raise YunohostError( "migration_0015_still_on_stretch_after_main_upgrade") # Clean the mess logger.info(m18n.n("migration_0015_cleaning_up")) os.system("apt autoremove --assume-yes") os.system("apt clean --assume-yes") # # Yunohost upgrade # logger.info(m18n.n("migration_0015_yunohost_upgrade")) self.unhold(apps_packages) tools_upgrade(system=True)
def firewall_reload(): """ Reload all firewall rules """ from yunohost.hook import hook_callback reloaded = False errors = False # Check if SSH port is allowed ssh_port = _get_ssh_port() if ssh_port not in firewall_list()['opened_ports']: firewall_allow(ssh_port, no_reload=True) # Retrieve firewall rules and UPnP status firewall = firewall_list(raw=True) upnp = firewall_upnp()['enabled'] # IPv4 try: process.check_output("iptables -L") except process.CalledProcessError as e: logger.info('iptables seems to be not available, it outputs:\n%s', prependlines(e.output.rstrip(), '> ')) msignals.display(m18n.n('iptables_unavailable'), 'info') else: rules = [ "iptables -F", "iptables -X", "iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", ] # Iterate over ports and add rule for protocol in ['TCP', 'UDP']: for port in firewall['ipv4'][protocol]: rules.append("iptables -A INPUT -p %s --dport %s -j ACCEPT" \ % (protocol, process.quote(str(port)))) rules += [ "iptables -A INPUT -i lo -j ACCEPT", "iptables -A INPUT -p icmp -j ACCEPT", "iptables -P INPUT DROP", ] # Execute each rule if process.check_commands(rules, callback=_on_rule_command_error): errors = True reloaded = True # IPv6 try: process.check_output("ip6tables -L") except process.CalledProcessError as e: logger.info('ip6tables seems to be not available, it outputs:\n%s', prependlines(e.output.rstrip(), '> ')) msignals.display(m18n.n('ip6tables_unavailable'), 'info') else: rules = [ "ip6tables -F", "ip6tables -X", "ip6tables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", ] # Iterate over ports and add rule for protocol in ['TCP', 'UDP']: for port in firewall['ipv6'][protocol]: rules.append("ip6tables -A INPUT -p %s --dport %s -j ACCEPT" \ % (protocol, process.quote(str(port)))) rules += [ "ip6tables -A INPUT -i lo -j ACCEPT", "ip6tables -A INPUT -p icmpv6 -j ACCEPT", "ip6tables -P INPUT DROP", ] # Execute each rule if process.check_commands(rules, callback=_on_rule_command_error): errors = True reloaded = True if not reloaded: raise MoulinetteError(errno.ESRCH, m18n.n('firewall_reload_failed')) hook_callback('post_iptable_rules', args=[upnp, os.path.exists("/proc/net/if_inet6")]) if upnp: # Refresh port forwarding with UPnP firewall_upnp(no_refresh=False) # TODO: Use service_restart os.system("service fail2ban restart") if errors: msignals.display(m18n.n('firewall_rules_cmd_failed'), 'warning') else: msignals.display(m18n.n('firewall_reloaded'), 'success') return firewall_list()
def user_info(username): """ Get user informations Keyword argument: username -- Username or mail to get informations """ from yunohost.utils.ldap import _get_ldap_interface ldap = _get_ldap_interface() user_attrs = [ "cn", "mail", "uid", "maildrop", "givenName", "sn", "mailuserquota" ] if len(username.split("@")) == 2: filter = "mail=" + username else: filter = "uid=" + username result = ldap.search("ou=users,dc=yunohost,dc=org", filter, user_attrs) if result: user = result[0] else: raise YunohostValidationError("user_unknown", user=username) result_dict = { "username": user["uid"][0], "fullname": user["cn"][0], "firstname": user["givenName"][0], "lastname": user["sn"][0], "mail": user["mail"][0], } if len(user["mail"]) > 1: result_dict["mail-aliases"] = user["mail"][1:] if len(user["maildrop"]) > 1: result_dict["mail-forward"] = user["maildrop"][1:] if "mailuserquota" in user: userquota = user["mailuserquota"][0] if isinstance(userquota, int): userquota = str(userquota) # Test if userquota is '0' or '0M' ( quota pattern is ^(\d+[bkMGT])|0$ ) is_limited = not re.match("0[bkMGT]?", userquota) storage_use = "?" if service_status("dovecot")["status"] != "running": logger.warning(m18n.n("mailbox_used_space_dovecot_down")) elif username not in user_permission_info( "mail.main")["corresponding_users"]: logger.warning(m18n.n("mailbox_disabled", user=username)) else: try: cmd = "doveadm -f flow quota get -u %s" % user["uid"][0] cmd_result = check_output(cmd) except Exception as e: cmd_result = "" logger.warning("Failed to fetch quota info ... : %s " % str(e)) # Exemple of return value for cmd: # """Quota name=User quota Type=STORAGE Value=0 Limit=- %=0 # Quota name=User quota Type=MESSAGE Value=0 Limit=- %=0""" has_value = re.search(r"Value=(\d+)", cmd_result) if has_value: storage_use = int(has_value.group(1)) storage_use = _convertSize(storage_use) if is_limited: has_percent = re.search(r"%=(\d+)", cmd_result) if has_percent: percentage = int(has_percent.group(1)) storage_use += " (%s%%)" % percentage result_dict["mailbox-quota"] = { "limit": userquota if is_limited else m18n.n("unlimit"), "use": storage_use, } return result_dict