예제 #1
0
def test_read_yaml_cannot_read(test_yaml, mocker):
    error = "foobar"

    mocker.patch("yaml.safe_load", side_effect=Exception(error))
    with pytest.raises(MoulinetteError) as exception:
        read_yaml(str(test_yaml))

    translation = m18n.g("corrupted_yaml", ressource=str(test_yaml), error=error)
    expected_msg = translation.format(ressource=str(test_yaml), error=error)
    assert expected_msg in str(exception)
예제 #2
0
        def generate_cache():

            logger.debug("generating cache for actions map")

            # Read actions map from yaml file
            actionsmap = read_yaml(actionsmap_yml)

            if not actionsmap["_global"].get("cache", True):
                return actionsmap

            # Delete old cache files
            for old_cache in glob.glob(
                f"{actionsmap_yml_dir}/.{actionsmap_yml_file}.*.pkl"
            ):
                os.remove(old_cache)

            # at installation, cachedir might not exists
            dir_ = os.path.dirname(actionsmap_pkl)
            if not os.path.isdir(dir_):
                os.makedirs(dir_)

            # Cache actions map into pickle file
            with open(actionsmap_pkl, "wb") as f:
                pickle.dump(actionsmap, f)

            return actionsmap
예제 #3
0
파일: tools.py 프로젝트: trogeat/yunohost
def tools_migrations_state():
    """
    Show current migration state
    """
    if not os.path.exists(MIGRATIONS_STATE_PATH):
        return {"migrations": {}}

    return read_yaml(MIGRATIONS_STATE_PATH)
예제 #4
0
def text_write_list_to_yaml(tmp_path):
    new_file = tmp_path / "newfile.yaml"

    dummy_list = ["foo", "bar", "baz"]
    write_to_yaml(str(new_file), dummy_list)

    _yaml = read_yaml(str(new_file))
    assert _yaml == ["foo", "bar", "baz"]
    def run(self, *args):

        from yunohost.utils.ldap import _get_ldap_interface

        ldap = _get_ldap_interface()

        existing_perms_raw = ldap.search(
            "ou=permission,dc=yunohost,dc=org", "(objectclass=permissionYnh)", ["cn"]
        )
        existing_perms = [perm["cn"][0] for perm in existing_perms_raw]

        # Add SSH and SFTP permissions
        ldap_map = read_yaml(
            "/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml"
        )

        if "sftp.main" not in existing_perms:
            ldap.add(
                "cn=sftp.main,ou=permission",
                ldap_map["depends_children"]["cn=sftp.main,ou=permission"],
            )

        if "ssh.main" not in existing_perms:
            ldap.add(
                "cn=ssh.main,ou=permission",
                ldap_map["depends_children"]["cn=ssh.main,ou=permission"],
            )

            # Add a bash terminal to each users
            users = ldap.search(
                "ou=users,dc=yunohost,dc=org",
                filter="(loginShell=*)",
                attrs=["dn", "uid", "loginShell"],
            )
            for user in users:
                if user["loginShell"][0] == "/bin/false":
                    dn = user["dn"][0].replace(",dc=yunohost,dc=org", "")
                    ldap.update(dn, {"loginShell": ["/bin/bash"]})
                else:
                    user_permission_update(
                        "ssh.main", add=user["uid"][0], sync_perm=False
                    )

            permission_sync_to_user()

            # Somehow this is needed otherwise the PAM thing doesn't forget about the
            # old loginShell value ?
            subprocess.call(["nscd", "-i", "passwd"])

        if (
            "/etc/ssh/sshd_config" in manually_modified_files()
            and os.system(
                "grep -q '^ *AllowGroups\\|^ *AllowUsers' /etc/ssh/sshd_config"
            )
            != 0
        ):
            logger.error(m18n.n("diagnosis_sshd_config_insecure"))
예제 #6
0
파일: legacy.py 프로젝트: grenagit/yunohost
    def migrate_LDAP_db():

        logger.info(m18n.n("migration_0011_update_LDAP_database"))

        from yunohost.utils.ldap import _get_ldap_interface
        ldap = _get_ldap_interface()

        ldap_map = read_yaml(
            '/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml')

        try:
            SetupGroupPermissions.remove_if_exists("ou=permission")
            SetupGroupPermissions.remove_if_exists('ou=groups')

            attr_dict = ldap_map['parents']['ou=permission']
            ldap.add('ou=permission', attr_dict)

            attr_dict = ldap_map['parents']['ou=groups']
            ldap.add('ou=groups', attr_dict)

            attr_dict = ldap_map['children']['cn=all_users,ou=groups']
            ldap.add('cn=all_users,ou=groups', attr_dict)

            attr_dict = ldap_map['children']['cn=visitors,ou=groups']
            ldap.add('cn=visitors,ou=groups', attr_dict)

            for rdn, attr_dict in ldap_map['depends_children'].items():
                ldap.add(rdn, attr_dict)
        except Exception as e:
            raise YunohostError("migration_0011_LDAP_update_failed", error=e)

        logger.info(m18n.n("migration_0011_create_group"))

        # Create a group for each yunohost user
        user_list = ldap.search(
            'ou=users,dc=yunohost,dc=org',
            '(&(objectclass=person)(!(uid=root))(!(uid=nobody)))',
            ['uid', 'uidNumber'])
        for user_info in user_list:
            username = user_info['uid'][0]
            ldap.update(
                'uid=%s,ou=users' % username, {
                    'objectClass': [
                        'mailAccount', 'inetOrgPerson', 'posixAccount',
                        'userPermissionYnh'
                    ]
                })
            user_group_create(username,
                              gid=user_info['uidNumber'][0],
                              primary_group=True,
                              sync_perm=False)
            user_group_update(groupname='all_users',
                              add=username,
                              force=True,
                              sync_perm=False)
예제 #7
0
파일: 24-mail.py 프로젝트: trogeat/yunohost
    def check_blacklist(self):
        """
        Check with dig onto blacklist DNS server
        This check is ran on IPs and domains we could used to send mail.
        """

        dns_blacklists = read_yaml(DEFAULT_DNS_BLACKLIST)
        for item in self.ips + self.mail_domains:
            for blacklist in dns_blacklists:
                item_type = "domain"
                if ":" in item:
                    item_type = "ipv6"
                elif re.match(r"^\d+\.\d+\.\d+\.\d+$", item):
                    item_type = "ipv4"

                if not blacklist[item_type]:
                    continue

                # Build the query for DNSBL
                subdomain = item
                if item_type != "domain":
                    rev = dns.reversename.from_address(item)
                    subdomain = str(rev.split(3)[0])
                query = subdomain + "." + blacklist["dns_server"]

                # Do the DNS Query
                status, _ = dig(query, "A")
                if status != "ok":
                    continue

                # Try to get the reason
                details = []
                status, answers = dig(query, "TXT")
                reason = "-"
                if status == "ok":
                    reason = ", ".join(answers)
                    details.append("diagnosis_mail_blacklist_reason")

                details.append("diagnosis_mail_blacklist_website")

                yield dict(
                    meta={
                        "test": "mail_blacklist",
                        "item": item,
                        "blacklist": blacklist["dns_server"],
                    },
                    data={
                        "blacklist_name": blacklist["name"],
                        "blacklist_website": blacklist["website"],
                        "reason": reason,
                    },
                    status="ERROR",
                    summary="diagnosis_mail_blacklist_listed_by",
                    details=details,
                )
예제 #8
0
    def check_blacklist(self):
        """
        Check with dig onto blacklist DNS server
        This check is ran on IPs and domains we could used to send mail.
        """

        dns_blacklists = read_yaml(DEFAULT_DNS_BLACKLIST)
        for item in self.ips + self.mail_domains:
            for blacklist in dns_blacklists:
                item_type = "domain"
                if ":" in item:
                    item_type = 'ipv6'
                elif re.match(r'^\d+\.\d+\.\d+\.\d+$', item):
                    item_type = 'ipv4'

                if not blacklist[item_type]:
                    continue

                # Build the query for DNSBL
                subdomain = item
                if item_type != "domain":
                    rev = dns.reversename.from_address(item)
                    subdomain = str(rev.split(3)[0])
                query = subdomain + '.' + blacklist['dns_server']

                # Do the DNS Query
                status, _ = dig(query, 'A')
                if status != 'ok':
                    continue

                # Try to get the reason
                details = []
                status, answers = dig(query, 'TXT')
                reason = "-"
                if status == 'ok':
                    reason = ', '.join(answers)
                    details.append("diagnosis_mail_blacklist_reason")

                details.append("diagnosis_mail_blacklist_website")

                yield dict(meta={
                    "test": "mail_blacklist",
                    "item": item,
                    "blacklist": blacklist["dns_server"]
                },
                           data={
                               'blacklist_name': blacklist['name'],
                               'blacklist_website': blacklist['website'],
                               'reason': reason
                           },
                           status="ERROR",
                           summary='diagnosis_mail_blacklist_listed_by',
                           details=details)
예제 #9
0
def test_write_dict_to_yaml(tmp_path):
    new_file = tmp_path / "newfile.yaml"

    dummy_dict = {"foo": 42, "bar": ["a", "b", "c"]}
    write_to_yaml(str(new_file), dummy_dict)
    _yaml = read_yaml(str(new_file))

    assert "foo" in _yaml.keys()
    assert "bar" in _yaml.keys()

    assert _yaml["foo"] == 42
    assert _yaml["bar"] == ["a", "b", "c"]
예제 #10
0
파일: log.py 프로젝트: trogeat/yunohost
                def suboperations():
                    try:
                        log_start = _get_datetime_from_name(base_filename)
                    except ValueError:
                        return

                    for filename in os.listdir(OPERATIONS_PATH):

                        if not filename.endswith(METADATA_FILE_EXT):
                            continue

                        # We first retrict search to a ~48h time window to limit the number
                        # of .yml we look into
                        try:
                            date = _get_datetime_from_name(base_filename)
                        except ValueError:
                            continue
                        if (date < log_start) or (
                                date > log_start + timedelta(hours=48)):
                            continue

                        try:
                            submetadata = read_yaml(
                                os.path.join(OPERATIONS_PATH, filename))
                        except Exception:
                            continue

                        if submetadata and submetadata.get(
                                "parent") == base_filename:
                            yield {
                                "name":
                                filename[:-len(METADATA_FILE_EXT)],
                                "description":
                                _get_description_from_name(
                                    filename[:-len(METADATA_FILE_EXT)]),
                                "success":
                                submetadata.get("success", "?"),
                            }
예제 #11
0
def _diagnosis_read_configuration():
    if not os.path.exists(DIAGNOSIS_CONFIG_FILE):
        return {}

    return read_yaml(DIAGNOSIS_CONFIG_FILE)
예제 #12
0
파일: log.py 프로젝트: trogeat/yunohost
def log_list(limit=None, with_details=False, with_suboperations=False):
    """
    List available logs

    Keyword argument:
        limit -- Maximum number of logs
        with_details -- Include details (e.g. if the operation was a success).
        Likely to increase the command time as it needs to open and parse the
        metadata file for each log...
        with_suboperations -- Include operations that are not the "main"
        operation but are sub-operations triggered by another ongoing operation
        ... (e.g. initializing groups/permissions when installing an app)
    """

    operations = {}

    logs = [
        x for x in os.listdir(OPERATIONS_PATH) if x.endswith(METADATA_FILE_EXT)
    ]
    logs = list(reversed(sorted(logs)))

    if limit is not None:
        logs = logs[:limit]

    for log in logs:

        base_filename = log[:-len(METADATA_FILE_EXT)]
        md_path = os.path.join(OPERATIONS_PATH, log)

        entry = {
            "name": base_filename,
            "path": md_path,
            "description": _get_description_from_name(base_filename),
        }

        try:
            entry["started_at"] = _get_datetime_from_name(base_filename)
        except ValueError:
            pass

        try:
            metadata = (read_yaml(md_path)
                        or {})  # Making sure this is a dict and not  None..?
        except Exception as e:
            # If we can't read the yaml for some reason, report an error and ignore this entry...
            logger.error(
                m18n.n("log_corrupted_md_file", md_file=md_path, error=e))
            continue

        if with_details:
            entry["success"] = metadata.get("success", "?")
            entry["parent"] = metadata.get("parent")

        if with_suboperations:
            entry["parent"] = metadata.get("parent")
            entry["suboperations"] = []
        elif metadata.get("parent") is not None:
            continue

        operations[base_filename] = entry

    # When displaying suboperations, we build a tree-like structure where
    # "suboperations" is a list of suboperations (each of them may also have a list of
    # "suboperations" suboperations etc...
    if with_suboperations:
        suboperations = [
            o for o in operations.values() if o["parent"] is not None
        ]
        for suboperation in suboperations:
            parent = operations.get(suboperation["parent"])
            if not parent:
                continue
            parent["suboperations"].append(suboperation)
        operations = [o for o in operations.values() if o["parent"] is None]
    else:
        operations = [o for o in operations.values()]

    operations = list(reversed(sorted(operations, key=lambda o: o["name"])))
    # Reverse the order of log when in cli, more comfortable to read (avoid
    # unecessary scrolling)
    is_api = msettings.get("interface") == "api"
    if not is_api:
        operations = list(reversed(operations))

    return {"operation": operations}
예제 #13
0
파일: log.py 프로젝트: trogeat/yunohost
def log_show(path,
             number=None,
             share=False,
             filter_irrelevant=False,
             with_suboperations=False):
    """
    Display a log file enriched with metadata if any.

    If the file_name is not an absolute path, it will try to search the file in
    the unit operations log path (see OPERATIONS_PATH).

    Argument:
        file_name
        number
        share
    """

    if share:
        filter_irrelevant = True

    if filter_irrelevant:
        filters = [
            r"set [+-]x$",
            r"set [+-]o xtrace$",
            r"local \w+$",
            r"local legacy_args=.*$",
            r".*Helper used in legacy mode.*",
            r"args_array=.*$",
            r"local -A args_array$",
            r"ynh_handle_getopts_args",
            r"ynh_script_progression",
        ]
    else:
        filters = []

    def _filter_lines(lines, filters=[]):

        filters = [re.compile(f) for f in filters]
        return [
            line for line in lines
            if not any(f.search(line.strip()) for f in filters)
        ]

    # Normalize log/metadata paths and filenames
    abs_path = path
    log_path = None
    if not path.startswith("/"):
        abs_path = os.path.join(OPERATIONS_PATH, path)

    if os.path.exists(abs_path) and not path.endswith(METADATA_FILE_EXT):
        log_path = abs_path

    if abs_path.endswith(METADATA_FILE_EXT) or abs_path.endswith(LOG_FILE_EXT):
        base_path = "".join(os.path.splitext(abs_path)[:-1])
    else:
        base_path = abs_path
    base_filename = os.path.basename(base_path)
    md_path = base_path + METADATA_FILE_EXT
    if log_path is None:
        log_path = base_path + LOG_FILE_EXT

    if not os.path.exists(md_path) and not os.path.exists(log_path):
        raise YunohostValidationError("log_does_exists", log=path)

    infos = {}

    # If it's a unit operation, display the name and the description
    if base_path.startswith(CATEGORIES_PATH):
        infos["description"] = _get_description_from_name(base_filename)
        infos["name"] = base_filename

    if share:
        from yunohost.utils.yunopaste import yunopaste

        content = ""
        if os.path.exists(md_path):
            content += read_file(md_path)
            content += "\n============\n\n"
        if os.path.exists(log_path):
            actual_log = read_file(log_path)
            content += "\n".join(_filter_lines(actual_log.split("\n"),
                                               filters))

        url = yunopaste(content)

        logger.info(m18n.n("log_available_on_yunopaste", url=url))
        if msettings.get("interface") == "api":
            return {"url": url}
        else:
            return

    # Display metadata if exist
    if os.path.exists(md_path):
        try:
            metadata = read_yaml(md_path)
        except MoulinetteError as e:
            error = m18n.n("log_corrupted_md_file", md_file=md_path, error=e)
            if os.path.exists(log_path):
                logger.warning(error)
            else:
                raise YunohostError(error)
        else:
            infos["metadata_path"] = md_path
            infos["metadata"] = metadata

            if "log_path" in metadata:
                log_path = metadata["log_path"]

            if with_suboperations:

                def suboperations():
                    try:
                        log_start = _get_datetime_from_name(base_filename)
                    except ValueError:
                        return

                    for filename in os.listdir(OPERATIONS_PATH):

                        if not filename.endswith(METADATA_FILE_EXT):
                            continue

                        # We first retrict search to a ~48h time window to limit the number
                        # of .yml we look into
                        try:
                            date = _get_datetime_from_name(base_filename)
                        except ValueError:
                            continue
                        if (date < log_start) or (
                                date > log_start + timedelta(hours=48)):
                            continue

                        try:
                            submetadata = read_yaml(
                                os.path.join(OPERATIONS_PATH, filename))
                        except Exception:
                            continue

                        if submetadata and submetadata.get(
                                "parent") == base_filename:
                            yield {
                                "name":
                                filename[:-len(METADATA_FILE_EXT)],
                                "description":
                                _get_description_from_name(
                                    filename[:-len(METADATA_FILE_EXT)]),
                                "success":
                                submetadata.get("success", "?"),
                            }

                metadata["suboperations"] = list(suboperations())

    # Display logs if exist
    if os.path.exists(log_path):
        from yunohost.service import _tail

        if number and filters:
            logs = _tail(log_path, int(number * 4))
        elif number:
            logs = _tail(log_path, int(number))
        else:
            logs = read_file(log_path)
        logs = _filter_lines(logs, filters)
        if number:
            logs = logs[-number:]
        infos["log_path"] = log_path
        infos["logs"] = logs

    return infos
예제 #14
0
파일: legacy.py 프로젝트: grenagit/yunohost
def translate_legacy_rules_in_ssowant_conf_json_persistent():

    persistent_file_name = "/etc/ssowat/conf.json.persistent"
    if not os.path.exists(persistent_file_name):
        return

    # Ugly hack to try not to misarably fail migration
    persistent = read_yaml(persistent_file_name)

    legacy_rules = [
        "skipped_urls", "unprotected_urls", "protected_urls", "skipped_regex",
        "unprotected_regex", "protected_regex"
    ]

    if not any(legacy_rule in persistent for legacy_rule in legacy_rules):
        return

    if not isinstance(persistent.get("permissions"), dict):
        persistent["permissions"] = {}

    skipped_urls = persistent.get("skipped_urls", []) + [
        "re:" + r for r in persistent.get("skipped_regex", [])
    ]
    protected_urls = persistent.get("protected_urls", []) + [
        "re:" + r for r in persistent.get("protected_regex", [])
    ]
    unprotected_urls = persistent.get("unprotected_urls", []) + [
        "re:" + r for r in persistent.get("unprotected_regex", [])
    ]

    known_users = list(user_list()["users"].keys())

    for legacy_rule in legacy_rules:
        if legacy_rule in persistent:
            del persistent[legacy_rule]

    if skipped_urls:
        persistent["permissions"]['custom_skipped'] = {
            "users": [],
            "label":
            "Custom permissions - skipped",
            "show_tile":
            False,
            "auth_header":
            False,
            "public":
            True,
            "uris":
            skipped_urls + persistent["permissions"].get("custom_skipped",
                                                         {}).get("uris", []),
        }

    if unprotected_urls:
        persistent["permissions"]['custom_unprotected'] = {
            "users": [],
            "label":
            "Custom permissions - unprotected",
            "show_tile":
            False,
            "auth_header":
            True,
            "public":
            True,
            "uris":
            unprotected_urls + persistent["permissions"].get(
                "custom_unprotected", {}).get("uris", []),
        }

    if protected_urls:
        persistent["permissions"]['custom_protected'] = {
            "users":
            known_users,
            "label":
            "Custom permissions - protected",
            "show_tile":
            False,
            "auth_header":
            True,
            "public":
            False,
            "uris":
            protected_urls + persistent["permissions"].get(
                "custom_protected", {}).get("uris", []),
        }

    write_to_json(persistent_file_name, persistent, sort_keys=True, indent=4)

    logger.warning(
        "Yunohost automatically translated some legacy rules in /etc/ssowat/conf.json.persistent to match the new permission system"
    )
예제 #15
0
def test_read_yaml(test_yaml):
    content = read_yaml(str(test_yaml))
    assert "foo" in content.keys()
    assert content["foo"] == "bar"
예제 #16
0
    def migrate_LDAP_db():

        logger.info(m18n.n("migration_0011_update_LDAP_database"))

        from yunohost.utils.ldap import _get_ldap_interface

        ldap = _get_ldap_interface()

        ldap_map = read_yaml(
            "/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml"
        )

        try:
            SetupGroupPermissions.remove_if_exists("ou=permission")
            SetupGroupPermissions.remove_if_exists("ou=groups")

            attr_dict = ldap_map["parents"]["ou=permission"]
            ldap.add("ou=permission", attr_dict)

            attr_dict = ldap_map["parents"]["ou=groups"]
            ldap.add("ou=groups", attr_dict)

            attr_dict = ldap_map["children"]["cn=all_users,ou=groups"]
            ldap.add("cn=all_users,ou=groups", attr_dict)

            attr_dict = ldap_map["children"]["cn=visitors,ou=groups"]
            ldap.add("cn=visitors,ou=groups", attr_dict)

            for rdn, attr_dict in ldap_map["depends_children"].items():
                ldap.add(rdn, attr_dict)
        except Exception as e:
            raise YunohostError("migration_0011_LDAP_update_failed", error=e)

        logger.info(m18n.n("migration_0011_create_group"))

        # Create a group for each yunohost user
        user_list = ldap.search(
            "ou=users,dc=yunohost,dc=org",
            "(&(objectclass=person)(!(uid=root))(!(uid=nobody)))",
            ["uid", "uidNumber"],
        )
        for user_info in user_list:
            username = user_info["uid"][0]
            ldap.update(
                "uid=%s,ou=users" % username,
                {
                    "objectClass": [
                        "mailAccount",
                        "inetOrgPerson",
                        "posixAccount",
                        "userPermissionYnh",
                    ]
                },
            )
            user_group_create(
                username,
                gid=user_info["uidNumber"][0],
                primary_group=True,
                sync_perm=False,
            )
            user_group_update(
                groupname="all_users", add=username, force=True, sync_perm=False
            )