コード例 #1
0
def test_read_file_missing_file():
    bad_file = "doesnt-exist"

    with pytest.raises(MoulinetteError) as exception:
        read_file(bad_file)

    translation = m18n.g("file_not_exist", path=bad_file)
    expected_msg = translation.format(path=bad_file)
    assert expected_msg in str(exception)
コード例 #2
0
def test_read_file_cannot_read_exception(test_file, mocker):
    error = "foobar"

    mocker.patch("builtins.open", side_effect=Exception(error))
    with pytest.raises(MoulinetteError) as exception:
        read_file(str(test_file))

    translation = m18n.g("unknown_error_reading_file", file=str(test_file), error=error)
    expected_msg = translation.format(file=str(test_file), error=error)
    assert expected_msg in str(exception)
コード例 #3
0
def user_ssh_remove_key(username, key):
    user = _get_user_for_ssh(username, ["homeDirectory", "uid"])
    if not user:
        raise Exception("User with username '%s' doesn't exists" % username)

    authorized_keys_file = os.path.join(
        user["homeDirectory"][0], ".ssh", "authorized_keys"
    )

    if not os.path.exists(authorized_keys_file):
        raise Exception(
            "this key doesn't exists ({} dosesn't exists)".format(authorized_keys_file)
        )

    authorized_keys_content = read_file(authorized_keys_file)

    if key not in authorized_keys_content:
        raise Exception("Key '{}' is not present in authorized_keys".format(key))

    # don't delete the previous comment because we can't verify if it's legit

    # this regex approach failed for some reasons and I don't know why :(
    # authorized_keys_content = re.sub("{} *\n?".format(key),
    #                                  "",
    #                                  authorized_keys_content,
    #                                  flags=re.MULTILINE)

    authorized_keys_content = authorized_keys_content.replace(key, "")

    write_to_file(authorized_keys_file, authorized_keys_content)
コード例 #4
0
def test_append_to_new_file(tmp_path):
    new_file = tmp_path / "newfile.txt"

    append_to_file(str(new_file), "yolo\nswag")

    assert os.path.exists(str(new_file))
    assert read_file(str(new_file)) == "yolo\nswag"
コード例 #5
0
def user_ssh_add_key(username, key, comment):
    user = _get_user_for_ssh(username, ["homeDirectory", "uid"])
    if not user:
        raise Exception("User with username '%s' doesn't exists" % username)

    authorized_keys_file = os.path.join(user["homeDirectory"][0], ".ssh",
                                        "authorized_keys")

    if not os.path.exists(authorized_keys_file):
        # ensure ".ssh" exists
        mkdir(os.path.join(user["homeDirectory"][0], ".ssh"),
              force=True,
              parents=True,
              uid=user["uid"][0])

        # create empty file to set good permissions
        write_to_file(authorized_keys_file, "")
        chown(authorized_keys_file, uid=user["uid"][0])
        chmod(authorized_keys_file, 0o600)

    authorized_keys_content = read_file(authorized_keys_file)

    authorized_keys_content += "\n"
    authorized_keys_content += "\n"

    if comment and comment.strip():
        if not comment.lstrip().startswith("#"):
            comment = "# " + comment
        authorized_keys_content += comment.replace("\n", " ").strip()
        authorized_keys_content += "\n"

    authorized_keys_content += key.strip()
    authorized_keys_content += "\n"

    write_to_file(authorized_keys_file, authorized_keys_content)
コード例 #6
0
ファイル: test_filesystem.py プロジェクト: zheel/moulinette
def test_write_to_new_file():

    new_file = "%s/barfile" % TMP_TEST_DIR
    assert not os.path.exists(new_file)
    write_to_file(new_file, "yolo\nswag")
    assert os.path.exists(new_file)
    assert read_file(new_file) == "yolo\nswag"
コード例 #7
0
def user_ssh_list_keys(username):
    user = _get_user_for_ssh(username, ["homeDirectory"])
    if not user:
        raise Exception("User with username '%s' doesn't exists" % username)

    authorized_keys_file = os.path.join(
        user["homeDirectory"][0], ".ssh", "authorized_keys"
    )

    if not os.path.exists(authorized_keys_file):
        return {"keys": []}

    keys = []
    last_comment = ""
    for line in read_file(authorized_keys_file).split("\n"):
        # empty line
        if not line.strip():
            continue

        if line.lstrip().startswith("#"):
            last_comment = line.lstrip().lstrip("#").strip()
            continue

        # assuming a key per non empty line
        key = line.strip()
        keys.append(
            {
                "key": key,
                "name": last_comment,
            }
        )

        last_comment = ""

    return {"keys": keys}
コード例 #8
0
ファイル: log.py プロジェクト: trogeat/yunohost
    def parent_logger(self):

        # If there are other operation logger instances
        for instance in reversed(self._instances):
            # Is one of these operation logger started but not yet done ?
            if instance.started_at is not None and instance.ended_at is None:
                # We are a child of the first one we found
                return instance.name

        # If no lock exists, we are probably in tests or yunohost is used as a
        # lib ... let's not really care about that case and assume we're the
        # root logger then.
        if not os.path.exists("/var/run/moulinette_yunohost.lock"):
            return None

        locks = read_file("/var/run/moulinette_yunohost.lock").strip().split(
            "\n")
        # If we're the process with the lock, we're the root logger
        if locks == [] or str(os.getpid()) in locks:
            return None

        # If we get here, we are in a yunohost command called by a yunohost
        # (maybe indirectly from an app script for example...)
        #
        # The strategy is :
        # 1. list 20 most recent log files
        # 2. iterate over the PID of parent processes
        # 3. see if parent process has some log file open (being actively
        # written in)
        # 4. if among those file, there's an operation log file, we use the id
        # of the most recent file

        recent_operation_logs = sorted(glob.iglob(OPERATIONS_PATH + "*.log"),
                                       key=os.path.getctime,
                                       reverse=True)[:20]

        proc = psutil.Process().parent()
        while proc is not None:
            # We use proc.open_files() to list files opened / actively used by this proc
            # We only keep files matching a recent yunohost operation log
            active_logs = sorted(
                [
                    f.path for f in proc.open_files()
                    if f.path in recent_operation_logs
                ],
                key=os.path.getctime,
                reverse=True,
            )
            if active_logs != []:
                # extra the log if from the full path
                return os.path.basename(active_logs[0])[:-4]
            else:
                proc = proc.parent()
                continue

        # If nothing found, assume we're the root operation logger
        return None
コード例 #9
0
def _check_acme_challenge_configuration(domain):

    domain_conf = "/etc/nginx/conf.d/%s.conf" % domain
    if "include /etc/nginx/conf.d/acme-challenge.conf.inc" in read_file(domain_conf):
        return True
    else:
        # This is for legacy setups which haven't updated their domain conf to
        # the new conf that include the acme snippet...
        legacy_acme_conf = "/etc/nginx/conf.d/%s.d/000-acmechallenge.conf" % domain
        return os.path.exists(legacy_acme_conf)
コード例 #10
0
ファイル: test_filesystem.py プロジェクト: zheel/moulinette
def text_write_dict_to_json():

    dummy_dict = {"foo": 42, "bar": ["a", "b", "c"]}
    write_to_json(TMP_TEST_FILE, dummy_dict)
    j = read_json(TMP_TEST_FILE)
    assert "foo" in j.keys()
    assert "bar" in j.keys()
    assert j["foo"] == 42
    assert j["bar"] == ["a", "b", "c"]
    assert read_file(TMP_TEST_FILE) == "foo\nbar\nyolo\nswag"
コード例 #11
0
ファイル: 10-ip.py プロジェクト: grenagit/yunohost
    def can_ping_outside(self, protocol=4):

        assert protocol in [
            4, 6
        ], "Invalid protocol version, it should be either 4 or 6 and was '%s'" % repr(
            protocol)

        # We can know that ipv6 is not available directly if this file does not exists
        if protocol == 6 and not os.path.exists("/proc/net/if_inet6"):
            return False

        # If we are indeed connected in ipv4 or ipv6, we should find a default route
        routes = check_output("ip -%s route show table all" %
                              protocol).split("\n")

        def is_default_route(r):
            # Typically the default route starts with "default"
            # But of course IPv6 is more complex ... e.g. on internet cube there's
            # no default route but a /3 which acts as a default-like route...
            # e.g. 2000:/3 dev tun0 ...
            return r.startswith("default") or (":" in r and re.match(
                r".*/[0-3]$",
                r.split()[0]))

        if not any(is_default_route(r) for r in routes):
            self.logger_debug(
                "No default route for IPv%s, so assuming there's no IP address for that version"
                % protocol)
            return None

        # We use the resolver file as a list of well-known, trustable (ie not google ;)) IPs that we can ping
        resolver_file = "/usr/share/yunohost/templates/dnsmasq/plain/resolv.dnsmasq.conf"
        resolvers = [
            r.split(" ")[1] for r in read_file(resolver_file).split("\n")
            if r.startswith("nameserver")
        ]

        if protocol == 4:
            resolvers = [r for r in resolvers if ":" not in r]
        if protocol == 6:
            resolvers = [r for r in resolvers if ":" in r]

        assert resolvers != [], "Uhoh, need at least one IPv%s DNS resolver in %s ..." % (
            protocol, resolver_file)

        # So let's try to ping the first 4~5 resolvers (shuffled)
        # If we succesfully ping any of them, we conclude that we are indeed connected
        def ping(protocol, target):
            return os.system("ping%s -c1 -W 3 %s >/dev/null 2>/dev/null" %
                             ("" if protocol == 4 else "6", target)) == 0

        random.shuffle(resolvers)
        return any(ping(protocol, resolver) for resolver in resolvers[:5])
コード例 #12
0
ファイル: 10-ip.py プロジェクト: grenagit/yunohost
 def good_resolvconf(self):
     content = read_file("/etc/resolv.conf").strip().split("\n")
     # Ignore comments and empty lines
     content = [
         l.strip() for l in content
         if l.strip() and not l.strip().startswith("#")
         and not l.strip().startswith("search")
     ]
     # We should only find a "nameserver 127.0.0.1"
     return len(content) == 1 and content[0].split() == [
         "nameserver", "127.0.0.1"
     ]
コード例 #13
0
def external_resolvers():

    global external_resolvers_

    if not external_resolvers_:
        resolv_dnsmasq_conf = read_file("/etc/resolv.dnsmasq.conf").split("\n")
        external_resolvers_ = [r.split(" ")[1] for r in resolv_dnsmasq_conf if r.startswith("nameserver")]
        # We keep only ipv4 resolvers, otherwise on IPv4-only instances, IPv6
        # will be tried anyway resulting in super-slow dig requests that'll wait
        # until timeout...
        external_resolvers_ = [r for r in external_resolvers_ if ":" not in r]

    return external_resolvers_
コード例 #14
0
def get_public_ip(protocol=4):

    assert protocol in [4, 6], "Invalid protocol version for get_public_ip: %s, expected 4 or 6" % protocol

    cache_file = "/var/cache/yunohost/ipv%s" % protocol
    cache_duration = 120  # 2 min
    if os.path.exists(cache_file) and abs(os.path.getctime(cache_file) - time.time()) < cache_duration:
        ip = read_file(cache_file).strip()
        ip = ip if ip else None  # Empty file (empty string) means there's no IP
        logger.debug("Reusing IPv%s from cache: %s" % (protocol, ip))
    else:
        ip = get_public_ip_from_remote_server(protocol)
        logger.debug("IP fetched: %s" % ip)
        write_to_file(cache_file, ip or "")
    return ip
コード例 #15
0
    def run(self):

        regenconf_modified_files = list(self.manually_modified_files())

        if not regenconf_modified_files:
            yield dict(
                meta={"test": "regenconf"},
                status="SUCCESS",
                summary="diagnosis_regenconf_allgood",
            )
        else:
            for f in regenconf_modified_files:
                yield dict(
                    meta={
                        "test": "regenconf",
                        "category": f["category"],
                        "file": f["path"],
                    },
                    status="WARNING",
                    summary="diagnosis_regenconf_manually_modified",
                    details=["diagnosis_regenconf_manually_modified_details"],
                )

        if (
            any(f["path"] == "/etc/ssh/sshd_config" for f in regenconf_modified_files)
            and os.system(
                "grep -q '^ *AllowGroups\\|^ *AllowUsers' /etc/ssh/sshd_config"
            )
            != 0
        ):
            yield dict(
                meta={"test": "sshd_config_insecure"},
                status="ERROR",
                summary="diagnosis_sshd_config_insecure",
            )

        # Check consistency between actual ssh port in sshd_config vs. setting
        ssh_port_setting = settings_get("security.ssh.port")
        ssh_port_line = re.findall(
            r"\bPort *([0-9]{2,5})\b", read_file("/etc/ssh/sshd_config")
        )
        if len(ssh_port_line) == 1 and int(ssh_port_line[0]) != ssh_port_setting:
            yield dict(
                meta={"test": "sshd_config_port_inconsistency"},
                status="WARNING",
                summary="diagnosis_sshd_config_inconsistent",
                details=["diagnosis_sshd_config_inconsistent_details"],
            )
コード例 #16
0
    def ssh_root_login_status():
        # XXX temporary placed here for when the ssh_root commands are integrated
        # extracted from https://github.com/YunoHost/yunohost/pull/345
        # XXX should we support all the options?
        # this is the content of "man sshd_config"
        # PermitRootLogin
        #     Specifies whether root can log in using ssh(1).  The argument must be
        #     “yes”, “without-password”, “forced-commands-only”, or “no”.  The
        #     default is “yes”.
        sshd_config_content = read_file(SSHD_CONFIG_PATH)

        if re.search("^ *PermitRootLogin +(no|forced-commands-only) *$",
                     sshd_config_content, re.MULTILINE):
            return {"PermitRootLogin": False}

        return {"PermitRootLogin": True}
コード例 #17
0
ファイル: service.py プロジェクト: lapineige/yunohost
def _get_services():
    """
    Get a dict of managed services with their parameters

    """
    try:
        with open("/etc/yunohost/services.yml", "r") as f:
            services = yaml.load(f) or {}
    except Exception:
        return {}

    # some services are marked as None to remove them from YunoHost
    # filter this
    for key, value in list(services.items()):
        if value is None:
            del services[key]

    # Dirty hack to automatically find custom SSH port ...
    ssh_port_line = re.findall(
        r"\bPort *([0-9]{2,5})\b", read_file("/etc/ssh/sshd_config")
    )
    if len(ssh_port_line) == 1:
        services["ssh"]["needs_exposed_ports"] = [int(ssh_port_line[0])]

    # Dirty hack to check the status of ynh-vpnclient
    if "ynh-vpnclient" in services:
        status_check = "systemctl is-active [email protected]"
        if "test_status" not in services["ynh-vpnclient"]:
            services["ynh-vpnclient"]["test_status"] = status_check
        if "log" not in services["ynh-vpnclient"]:
            services["ynh-vpnclient"]["log"] = ["/var/log/ynh-vpnclient.log"]

    # Stupid hack for postgresql which ain't an official service ... Can't
    # really inject that info otherwise. Real service we want to check for
    # status and log is in fact [email protected] (x.y being the version)
    if "postgresql" in services:
        if "description" in services["postgresql"]:
            del services["postgresql"]["description"]
        services["postgresql"]["actual_systemd_service"] = "postgresql@11-main"

    return services
コード例 #18
0
ファイル: log.py プロジェクト: trogeat/yunohost
    def __init__(self, operation, related_to=None, **kwargs):
        # TODO add a way to not save password on app installation
        self.operation = operation
        self.related_to = related_to
        self.extra = kwargs
        self.started_at = None
        self.ended_at = None
        self.logger = None
        self._name = None
        self.data_to_redact = []
        self.parent = self.parent_logger()
        self._instances.append(self)

        for filename in ["/etc/yunohost/mysql", "/etc/yunohost/psql"]:
            if os.path.exists(filename):
                self.data_to_redact.append(read_file(filename).strip())

        self.path = OPERATIONS_PATH

        if not os.path.exists(self.path):
            os.makedirs(self.path)
コード例 #19
0
    def check_assertions(self):

        # Be on stretch (9.x) and yunohost 3.x
        # NB : we do both check to cover situations where the upgrade crashed
        # in the middle and debian version could be > 9.x but yunohost package
        # would still be in 3.x...
        if not self.debian_major_version() == 9 \
           and not self.yunohost_major_version() == 3:
            raise YunohostError("migration_0015_not_stretch")

        # Have > 1 Go free space on /var/ ?
        if free_space_in_directory("/var/") / (1024**3) < 1.0:
            raise YunohostError("migration_0015_not_enough_free_space")

        # Check system is up to date
        # (but we don't if 'stretch' is already in the sources.list ...
        # which means maybe a previous upgrade crashed and we're re-running it)
        if " buster " not in read_file("/etc/apt/sources.list"):
            tools_update(system=True)
            upgradable_system_packages = list(_list_upgradable_apt_packages())
            if upgradable_system_packages:
                raise YunohostError(
                    "migration_0015_system_not_fully_up_to_date")
コード例 #20
0
ファイル: test_filesystem.py プロジェクト: zheel/moulinette
def test_read_file():

    content = read_file(TMP_TEST_FILE)
    assert content == "foo\nbar\n"
コード例 #21
0
ファイル: service.py プロジェクト: lapineige/yunohost
def _remove_lock(PID_to_remove):
    # FIXME ironically not concurrency safe because it's not atomic...

    PIDs = read_file(MOULINETTE_LOCK).split("\n")
    PIDs_to_keep = [PID for PID in PIDs if int(PID) != PID_to_remove]
    write_to_file(MOULINETTE_LOCK, "\n".join(PIDs_to_keep))
コード例 #22
0
ファイル: 21-web.py プロジェクト: grenagit/yunohost
    def run(self):

        all_domains = domain_list()["domains"]
        domains_to_check = []
        for domain in all_domains:

            # If the diagnosis location ain't defined, can't do diagnosis,
            # probably because nginx conf manually modified...
            nginx_conf = "/etc/nginx/conf.d/%s.conf" % domain
            if ".well-known/ynh-diagnosis/" not in read_file(nginx_conf):
                yield dict(
                    meta={"domain": domain},
                    status="WARNING",
                    summary="diagnosis_http_nginx_conf_not_up_to_date",
                    details=[
                        "diagnosis_http_nginx_conf_not_up_to_date_details"
                    ])
            else:
                domains_to_check.append(domain)

        self.nonce = ''.join(
            random.choice("0123456789abcedf") for i in range(16))
        os.system("rm -rf /tmp/.well-known/ynh-diagnosis/")
        os.system("mkdir -p /tmp/.well-known/ynh-diagnosis/")
        os.system("touch /tmp/.well-known/ynh-diagnosis/%s" % self.nonce)

        if not domains_to_check:
            return

        # To perform hairpinning test, we gotta make sure that port forwarding
        # is working and therefore we'll do it only if at least one ipv4 domain
        # works.
        self.do_hairpinning_test = False

        ipversions = []
        ipv4 = Diagnoser.get_cached_report("ip", item={"test": "ipv4"}) or {}
        if ipv4.get("status") == "SUCCESS":
            ipversions.append(4)

        # To be discussed: we could also make this check dependent on the
        # existence of an AAAA record...
        ipv6 = Diagnoser.get_cached_report("ip", item={"test": "ipv6"}) or {}
        if ipv6.get("status") == "SUCCESS":
            ipversions.append(6)

        for item in self.test_http(domains_to_check, ipversions):
            yield item

        # If at least one domain is correctly exposed to the outside,
        # attempt to diagnose hairpinning situations. On network with
        # hairpinning issues, the server may be correctly exposed on the
        # outside, but from the outside, it will be as if the port forwarding
        # was not configured... Hence, calling for example
        # "curl --head the.global.ip" will simply timeout...
        if self.do_hairpinning_test:
            global_ipv4 = ipv4.get("data", {}).get("global", None)
            if global_ipv4:
                try:
                    requests.head("http://" + global_ipv4, timeout=5)
                except requests.exceptions.Timeout:
                    yield dict(
                        meta={"test": "hairpinning"},
                        status="WARNING",
                        summary="diagnosis_http_hairpinning_issue",
                        details=["diagnosis_http_hairpinning_issue_details"])
                except:
                    # Well I dunno what to do if that's another exception
                    # type... That'll most probably *not* be an hairpinning
                    # issue but something else super weird ...
                    pass
コード例 #23
0
ファイル: 00-basesystem.py プロジェクト: trogeat/yunohost
    def run(self):

        # Detect virt technology (if not bare metal) and arch
        # Gotta have this "|| true" because it systemd-detect-virt return 'none'
        # with an error code on bare metal ~.~
        virt = check_output("systemd-detect-virt || true", shell=True)
        if virt.lower() == "none":
            virt = "bare-metal"

        # Detect arch
        arch = check_output("dpkg --print-architecture")
        hardware = dict(
            meta={"test": "hardware"},
            status="INFO",
            data={
                "virt": virt,
                "arch": arch
            },
            summary="diagnosis_basesystem_hardware",
        )

        # Also possibly the board / hardware name
        if os.path.exists("/proc/device-tree/model"):
            model = read_file("/proc/device-tree/model").strip().replace(
                "\x00", "")
            hardware["data"]["model"] = model
            hardware["details"] = ["diagnosis_basesystem_hardware_model"]
        elif os.path.exists("/sys/devices/virtual/dmi/id/sys_vendor"):
            model = read_file("/sys/devices/virtual/dmi/id/sys_vendor").strip()
            if os.path.exists("/sys/devices/virtual/dmi/id/product_name"):
                model = "%s %s" % (
                    model,
                    read_file(
                        "/sys/devices/virtual/dmi/id/product_name").strip(),
                )
            hardware["data"]["model"] = model
            hardware["details"] = ["diagnosis_basesystem_hardware_model"]

        yield hardware

        # Kernel version
        kernel_version = read_file("/proc/sys/kernel/osrelease").strip()
        yield dict(
            meta={"test": "kernel"},
            data={"kernel_version": kernel_version},
            status="INFO",
            summary="diagnosis_basesystem_kernel",
        )

        # Debian release
        debian_version = read_file("/etc/debian_version").strip()
        yield dict(
            meta={"test": "host"},
            data={"debian_version": debian_version},
            status="INFO",
            summary="diagnosis_basesystem_host",
        )

        # Yunohost packages versions
        # We check if versions are consistent (e.g. all 3.6 and not 3 packages with 3.6 and the other with 3.5)
        # This is a classical issue for upgrades that failed in the middle
        # (or people upgrading half of the package because they did 'apt upgrade' instead of 'dist-upgrade')
        # Here, ynh_core_version is for example "3.5.4.12", so [:3] is "3.5" and we check it's the same for all packages
        ynh_packages = ynh_packages_version()
        ynh_core_version = ynh_packages["yunohost"]["version"]
        consistent_versions = all(infos["version"][:3] == ynh_core_version[:3]
                                  for infos in ynh_packages.values())
        ynh_version_details = [(
            "diagnosis_basesystem_ynh_single_version",
            {
                "package": package,
                "version": infos["version"],
                "repo": infos["repo"],
            },
        ) for package, infos in ynh_packages.items()]

        yield dict(
            meta={"test": "ynh_versions"},
            data={
                "main_version": ynh_core_version,
                "repo": ynh_packages["yunohost"]["repo"],
            },
            status="INFO" if consistent_versions else "ERROR",
            summary="diagnosis_basesystem_ynh_main_version"
            if consistent_versions else
            "diagnosis_basesystem_ynh_inconsistent_versions",
            details=ynh_version_details,
        )

        if self.is_vulnerable_to_meltdown():
            yield dict(
                meta={"test": "meltdown"},
                status="ERROR",
                summary="diagnosis_security_vulnerable_to_meltdown",
                details=["diagnosis_security_vulnerable_to_meltdown_details"],
            )

        bad_sury_packages = list(self.bad_sury_packages())
        if bad_sury_packages:
            cmd_to_fix = "apt install --allow-downgrades " + " ".join([
                "%s=%s" % (package, version)
                for package, version in bad_sury_packages
            ])
            yield dict(
                meta={"test": "packages_from_sury"},
                data={"cmd_to_fix": cmd_to_fix},
                status="WARNING",
                summary="diagnosis_package_installed_from_sury",
                details=["diagnosis_package_installed_from_sury_details"],
            )

        if self.backports_in_sources_list():
            yield dict(
                meta={"test": "backports_in_sources_list"},
                status="WARNING",
                summary="diagnosis_backports_in_sources_list",
            )
コード例 #24
0
ファイル: test_filesystem.py プロジェクト: zheel/moulinette
def test_write_to_existing_file():

    assert os.path.exists(TMP_TEST_FILE)
    write_to_file(TMP_TEST_FILE, "yolo\nswag")
    assert read_file(TMP_TEST_FILE) == "yolo\nswag"
コード例 #25
0
ファイル: test_filesystem.py プロジェクト: zheel/moulinette
def test_read_file_badpermissions():

    switch_to_non_root_user()
    with pytest.raises(MoulinetteError):
        read_file(TMP_TEST_FILE)
コード例 #26
0
ファイル: test_filesystem.py プロジェクト: zheel/moulinette
def test_read_file_badfile():

    with pytest.raises(MoulinetteError):
        read_file(TMP_TEST_FILE + "nope")
コード例 #27
0
ファイル: log.py プロジェクト: trogeat/yunohost
def log_show(path,
             number=None,
             share=False,
             filter_irrelevant=False,
             with_suboperations=False):
    """
    Display a log file enriched with metadata if any.

    If the file_name is not an absolute path, it will try to search the file in
    the unit operations log path (see OPERATIONS_PATH).

    Argument:
        file_name
        number
        share
    """

    if share:
        filter_irrelevant = True

    if filter_irrelevant:
        filters = [
            r"set [+-]x$",
            r"set [+-]o xtrace$",
            r"local \w+$",
            r"local legacy_args=.*$",
            r".*Helper used in legacy mode.*",
            r"args_array=.*$",
            r"local -A args_array$",
            r"ynh_handle_getopts_args",
            r"ynh_script_progression",
        ]
    else:
        filters = []

    def _filter_lines(lines, filters=[]):

        filters = [re.compile(f) for f in filters]
        return [
            line for line in lines
            if not any(f.search(line.strip()) for f in filters)
        ]

    # Normalize log/metadata paths and filenames
    abs_path = path
    log_path = None
    if not path.startswith("/"):
        abs_path = os.path.join(OPERATIONS_PATH, path)

    if os.path.exists(abs_path) and not path.endswith(METADATA_FILE_EXT):
        log_path = abs_path

    if abs_path.endswith(METADATA_FILE_EXT) or abs_path.endswith(LOG_FILE_EXT):
        base_path = "".join(os.path.splitext(abs_path)[:-1])
    else:
        base_path = abs_path
    base_filename = os.path.basename(base_path)
    md_path = base_path + METADATA_FILE_EXT
    if log_path is None:
        log_path = base_path + LOG_FILE_EXT

    if not os.path.exists(md_path) and not os.path.exists(log_path):
        raise YunohostValidationError("log_does_exists", log=path)

    infos = {}

    # If it's a unit operation, display the name and the description
    if base_path.startswith(CATEGORIES_PATH):
        infos["description"] = _get_description_from_name(base_filename)
        infos["name"] = base_filename

    if share:
        from yunohost.utils.yunopaste import yunopaste

        content = ""
        if os.path.exists(md_path):
            content += read_file(md_path)
            content += "\n============\n\n"
        if os.path.exists(log_path):
            actual_log = read_file(log_path)
            content += "\n".join(_filter_lines(actual_log.split("\n"),
                                               filters))

        url = yunopaste(content)

        logger.info(m18n.n("log_available_on_yunopaste", url=url))
        if msettings.get("interface") == "api":
            return {"url": url}
        else:
            return

    # Display metadata if exist
    if os.path.exists(md_path):
        try:
            metadata = read_yaml(md_path)
        except MoulinetteError as e:
            error = m18n.n("log_corrupted_md_file", md_file=md_path, error=e)
            if os.path.exists(log_path):
                logger.warning(error)
            else:
                raise YunohostError(error)
        else:
            infos["metadata_path"] = md_path
            infos["metadata"] = metadata

            if "log_path" in metadata:
                log_path = metadata["log_path"]

            if with_suboperations:

                def suboperations():
                    try:
                        log_start = _get_datetime_from_name(base_filename)
                    except ValueError:
                        return

                    for filename in os.listdir(OPERATIONS_PATH):

                        if not filename.endswith(METADATA_FILE_EXT):
                            continue

                        # We first retrict search to a ~48h time window to limit the number
                        # of .yml we look into
                        try:
                            date = _get_datetime_from_name(base_filename)
                        except ValueError:
                            continue
                        if (date < log_start) or (
                                date > log_start + timedelta(hours=48)):
                            continue

                        try:
                            submetadata = read_yaml(
                                os.path.join(OPERATIONS_PATH, filename))
                        except Exception:
                            continue

                        if submetadata and submetadata.get(
                                "parent") == base_filename:
                            yield {
                                "name":
                                filename[:-len(METADATA_FILE_EXT)],
                                "description":
                                _get_description_from_name(
                                    filename[:-len(METADATA_FILE_EXT)]),
                                "success":
                                submetadata.get("success", "?"),
                            }

                metadata["suboperations"] = list(suboperations())

    # Display logs if exist
    if os.path.exists(log_path):
        from yunohost.service import _tail

        if number and filters:
            logs = _tail(log_path, int(number * 4))
        elif number:
            logs = _tail(log_path, int(number))
        else:
            logs = read_file(log_path)
        logs = _filter_lines(logs, filters)
        if number:
            logs = logs[-number:]
        infos["log_path"] = log_path
        infos["logs"] = logs

    return infos
コード例 #28
0
ファイル: test_filesystem.py プロジェクト: zheel/moulinette
def test_append_to_existing_file():

    assert os.path.exists(TMP_TEST_FILE)
    append_to_file(TMP_TEST_FILE, "yolo\nswag")
    assert read_file(TMP_TEST_FILE) == "foo\nbar\nyolo\nswag"
コード例 #29
0
def dyndns_update(
    operation_logger,
    dyn_host="dyndns.yunohost.org",
    domain=None,
    key=None,
    ipv4=None,
    ipv6=None,
    force=False,
    dry_run=False,
):
    """
    Update IP on DynDNS platform

    Keyword argument:
        domain -- Full domain to update
        dyn_host -- Dynette DNS server to inform
        key -- Public DNS key
        ipv4 -- IP address to send
        ipv6 -- IPv6 address to send

    """
    # Get old ipv4/v6

    old_ipv4, old_ipv6 = (None, None)  # (default values)

    # If domain is not given, try to guess it from keys available...
    if domain is None:
        (domain, key) = _guess_current_dyndns_domain(dyn_host)

    if domain is None:
        raise YunohostValidationError('dyndns_no_domain_registered')

    # If key is not given, pick the first file we find with the domain given
    else:
        if key is None:
            keys = glob.glob(
                "/etc/yunohost/dyndns/K{0}.+*.private".format(domain))

            if not keys:
                raise YunohostValidationError("dyndns_key_not_found")

            key = keys[0]

    # Extract 'host', e.g. 'nohost.me' from 'foo.nohost.me'
    host = domain.split(".")[1:]
    host = ".".join(host)

    logger.debug("Building zone update file ...")

    lines = [
        "server %s" % dyn_host,
        "zone %s" % host,
    ]

    def resolve_domain(domain, rdtype):

        # FIXME make this work for IPv6-only hosts too..
        ok, result = dig(dyn_host, "A")
        dyn_host_ip = result[0] if ok == "ok" and len(result) else None
        if not dyn_host_ip:
            raise YunohostError("Failed to resolve %s" % dyn_host)

        ok, result = dig(domain, rdtype, resolvers=[dyn_host_ip])
        if ok == "ok":
            return result[0] if len(result) else None
        elif result[0] == "Timeout":
            logger.debug(
                "Timed-out while trying to resolve %s record for %s using %s" %
                (rdtype, domain, dyn_host))
        else:
            return None

        logger.debug("Falling back to external resolvers")
        ok, result = dig(domain, rdtype, resolvers="force_external")
        if ok == "ok":
            return result[0] if len(result) else None
        elif result[0] == "Timeout":
            logger.debug(
                "Timed-out while trying to resolve %s record for %s using external resolvers : %s"
                % (rdtype, domain, result))
        else:
            return None

        raise YunohostError("Failed to resolve %s for %s" % (rdtype, domain),
                            raw_msg=True)

    old_ipv4 = resolve_domain(domain, "A")
    old_ipv6 = resolve_domain(domain, "AAAA")

    # Get current IPv4 and IPv6
    ipv4_ = get_public_ip()
    ipv6_ = get_public_ip(6)

    if ipv4 is None:
        ipv4 = ipv4_

    if ipv6 is None:
        ipv6 = ipv6_

    logger.debug("Old IPv4/v6 are (%s, %s)" % (old_ipv4, old_ipv6))
    logger.debug("Requested IPv4/v6 are (%s, %s)" % (ipv4, ipv6))

    # no need to update
    if (not force and not dry_run) and (old_ipv4 == ipv4 and old_ipv6 == ipv6):
        logger.info("No updated needed.")
        return
    else:
        operation_logger.related_to.append(("domain", domain))
        operation_logger.start()
        logger.info("Updated needed, going on...")

    dns_conf = _build_dns_conf(domain)

    # Delete custom DNS records, we don't support them (have to explicitly
    # authorize them on dynette)
    for category in dns_conf.keys():
        if category not in ["basic", "mail", "xmpp", "extra"]:
            del dns_conf[category]

    # Delete the old records for all domain/subdomains

    # every dns_conf.values() is a list of :
    # [{"name": "...", "ttl": "...", "type": "...", "value": "..."}]
    for records in dns_conf.values():
        for record in records:
            action = "update delete {name}.{domain}.".format(domain=domain,
                                                             **record)
            action = action.replace(" @.", " ")
            lines.append(action)

    # Add the new records for all domain/subdomains

    for records in dns_conf.values():
        for record in records:
            # (For some reason) here we want the format with everytime the
            # entire, full domain shown explicitly, not just "muc" or "@", it
            # should be muc.the.domain.tld. or the.domain.tld
            if record["value"] == "@":
                record["value"] = domain
            record["value"] = record["value"].replace(";", r"\;")

            action = "update add {name}.{domain}. {ttl} {type} {value}".format(
                domain=domain, **record)
            action = action.replace(" @.", " ")
            lines.append(action)

    lines += ["show", "send"]

    # Write the actions to do to update to a file, to be able to pass it
    # to nsupdate as argument
    write_to_file(DYNDNS_ZONE, "\n".join(lines))

    logger.debug("Now pushing new conf to DynDNS host...")

    if not dry_run:
        try:
            command = ["/usr/bin/nsupdate", "-k", key, DYNDNS_ZONE]
            subprocess.check_call(command)
        except subprocess.CalledProcessError:
            raise YunohostError("dyndns_ip_update_failed")

        logger.success(m18n.n("dyndns_ip_updated"))
    else:
        print(read_file(DYNDNS_ZONE))
        print("")
        print(
            "Warning: dry run, this is only the generated config, it won't be applied"
        )
コード例 #30
0
ファイル: test_filesystem.py プロジェクト: zheel/moulinette
def test_write_to_file_with_a_list():

    assert os.path.exists(TMP_TEST_FILE)
    write_to_file(TMP_TEST_FILE, ["yolo", "swag"])
    assert read_file(TMP_TEST_FILE) == "yolo\nswag"