示例#1
0
def test_deb_package_contains_expected_conffiles(host: Host, deb: Path):
    """
    Ensures the `securedrop-app-code` package declares only whitelisted
    `conffiles`. Several files in `/etc/` would automatically be marked
    conffiles, which would break unattended updates to critical package
    functionality such as AppArmor profiles. This test validates overrides
    in the build logic to unset those conffiles.
    """
    # For the securedrop-app-code package:
    if deb.name.startswith("securedrop-app-code"):
        mktemp = host.run("mktemp -d")
        tmpdir = mktemp.stdout.strip()
        # The `--raw-extract` flag includes `DEBIAN/` dir with control files
        host.run("dpkg-deb --raw-extract {} {}".format(deb, tmpdir))
        conffiles_path = os.path.join(tmpdir, "DEBIAN", "conffiles")
        f = host.file(conffiles_path)

        assert f.is_file
        # Ensure that the entirety of the file lists only the logo as conffile;
        # effectively ensures e.g. AppArmor profiles are not conffiles.
        conffiles = f.content_string.rstrip()
        assert conffiles == "/var/www/securedrop/static/i/logo.png"

    # For the securedrop-config package, we want to ensure there are no
    # conffiles so securedrop_additions.sh is squashed every time
    if deb.name.startswith("securedrop-config"):
        c = host.run("dpkg-deb -I {}".format(deb))
        assert "conffiles" not in c.stdout
def test_server_created(host: Host) -> None:
    host_funcs = HostFuncsAdapter(host=host, user_name="spire")
    print(f"Test Infra Host: {host} -- {host_funcs}")
    server_dirs = spire_server_info_cmd.ServerDirs(
        config_dir="/home/spire/spire-server/conf",
        data_dir="/home/spire/spire-server/data",
        install_dir="/home/spire/spire-server/",
        service_dir="/home/spire/.config/systemd/user/",
        log_dir="/home/spire/spire-server/logs",
        service_name="spire_server")
    info = spire_server_info_cmd.SpireServerInfo(
        run_command=host_funcs.run_command_with_ansible,
        log_func=host_funcs.no_log,
        server_dirs=server_dirs,
        service_name="spire_server",
        service_scope="user",
        registration_uds_path="/tmp/spire-registration.sock",
        expected_version=test_data.spire_version,
        file_exists_func=host_funcs.file_exists)
    print(f"server_info:{info}")
    assert ("server-installed", *info.is_installed()) == ("server-installed",True, None) \
            and ("service-installed", *info.is_service_installed()) == ("service-installed",True, None) \
            and ("service-enabled", *info.is_service_enabled()) == ("service-enabled",True, None) \
            and ("service-running", *info.is_service_running()) == ("service-running",True, None) \
            and ("server-healthy", *info.is_healthy()) == ("server-healthy",True, None) \
            and info.version == (test_data.spire_version, None) \
            and (info.service_scope, info.service_scope_issue) == (Scope.scope_user, None) \
            , ["should have been installed, enabled and healthy", info]

    spire_server_create_ansible_result: CommandResult = host.run(
        "cat /tmp/spire_server_creation_result.json")
    assert spire_server_create_ansible_result.succeeded and spire_server_create_ansible_result.stdout
    ansible_res_json: Dict[str, str] = json.loads(
        spire_server_create_ansible_result.stdout)
    print(f"ansible_res_json={ansible_res_json}")
示例#3
0
def test_config_package_contains_expected_files(host: Host) -> None:
    """
    Inspect the package contents to ensure all config files are included in
    the package.
    """
    if SECUREDROP_TARGET_DISTRIBUTION == "xenial":
        wanted_files = [
            "/etc/cron-apt/action.d/9-remove",
            "/etc/profile.d/securedrop_additions.sh",
        ]
    else:
        wanted_files = [
            "/etc/profile.d/securedrop_additions.sh",
            "/opt/securedrop/20auto-upgrades",
            "/opt/securedrop/50unattended-upgrades",
            "/opt/securedrop/reboot-flag",
        ]
    c = host.run("dpkg-deb --contents {}".format(
        deb_paths["securedrop_config"]))
    for wanted_file in wanted_files:
        assert re.search(
            r"^.* .{}$".format(wanted_file),
            c.stdout,
            re.M,
        )
示例#4
0
def test_build_deb_packages(host: Host, deb: Path) -> None:
    """
    Sanity check the built Debian packages for Control field
    values and general package structure.
    """
    deb_package = host.file(str(deb))
    assert deb_package.is_file
示例#5
0
def test_deb_package_lintian(host: Host, deb: Path, tag: str):
    """
    Ensures lintian likes our Debian packages.
    """
    c = host.run("lintian --tags {} --no-tag-display-limit {}".format(
        tag, deb))
    assert len(c.stdout) == 0
示例#6
0
def run_ansible(host: Host, playbook: str, parameter_values: Dict, run_configuration: Dict = MappingProxyType({})) \
        -> Dict:
    """
    TODO
    :param host:
    :param playbook:
    :param parameter_values:
    :param run_configuration:
    :return:
    """
    run_configuration = dict(**DEFAULT_RUN_CONFIGURATION, **run_configuration)
    extra_vars = run_configuration.get("extra_vars", {})

    # Hacking around "this task 'X' has extra params, which is only allowed in the following modules..." error, which
    # happens with complex arguments (list, dicts).
    # e.g. https://www.reddit.com/r/ansible/comments/72ntlu/ansible_adhoc_commands_with_dict_keys/dnn1zn9/
    #
    # Solution is implied:
    # https://github.com/philpep/testinfra/blob/4b7f67541bc85e88817ddcccbd91670cb05fcbbb/testinfra/modules/ansible.py#L96-L101
    for key, value in copy(parameter_values).items():
        if isinstance(value, dict) or isinstance(value, list):
            placeholder_name = generate_random_string()
            extra_vars[placeholder_name] = value
            parameter_values[key] = "{{ %s }}" % (placeholder_name, )

    # For some reason module args are passed though as a string. JSON dump did not work
    parameter_arguments = " ".join(f"{key}={value}" for key, value in parameter_values.items())

    result = host.ansible(playbook, parameter_arguments, **run_configuration, extra_vars=extra_vars)
    # `host.ansible` doesn't offer a good way of distinguishing success and failure. Assuming that `msg` in the output
    # (required by Ansible's `fail_json`) can be used to detect failure
    if "msg" in result:
        raise AnsibleRunError(result["msg"])
    return result
示例#7
0
def test_system_time(host: Host) -> None:
    assert not host.package("ntp").is_installed
    assert not host.package("ntpdate").is_installed

    s = host.service("systemd-timesyncd")
    assert s.is_running
    assert s.is_enabled
    assert not s.is_masked

    # File will be touched on every successful synchronization,
    # see 'man systemd-timesyncd'`
    assert host.file("/run/systemd/timesync/synchronized").exists

    c = host.run("timedatectl show")
    assert "NTP=yes" in c.stdout
    assert "NTPSynchronized=yes" in c.stdout
def test_system_time(host: Host) -> None:
    if host.system_info.codename == "xenial":
        assert host.package("ntp").is_installed
        assert host.package("ntpdate").is_installed

        # TODO: The staging setup timing is too erratic for the
        # following check. If we do want to reinstate it before
        # dropping Xenial support, it should be done in a loop to give
        # ntpd time to sync after the machines are created.

        # c = host.run("ntpq -c rv")
        # assert "leap_none" in c.stdout
        # assert "sync_ntp" in c.stdout
        # assert "refid" in c.stdout
    else:
        assert not host.package("ntp").is_installed
        assert not host.package("ntpdate").is_installed

        s = host.service("systemd-timesyncd")
        assert s.is_running
        assert s.is_enabled
        assert not s.is_masked

        # File will be touched on every successful synchronization,
        # see 'man systemd-timesyncd'`
        assert host.file("/run/systemd/timesync/synchronized").exists

        c = host.run("timedatectl show")
        assert "NTP=yes" in c.stdout
        assert "NTPSynchronized=yes" in c.stdout
示例#9
0
def test_deb_package_control_fields_homepage(host: Host, deb: Path):
    # The `--field` option will display all fields if none are specified.
    c = host.run("dpkg-deb --field {}".format(deb))
    # The OSSEC source packages will have a different homepage;
    # all other packages should set securedrop.org as homepage.
    if deb.name.startswith("ossec-"):
        assert "Homepage: http://ossec.net" in c.stdout
    else:
        assert "Homepage: https://securedrop.org" in c.stdout
示例#10
0
def test_jinja_files_not_present(host: Host, deb: Path):
    """
    Make sure that jinja (.j2) files were not copied over
    as-is into the debian packages.
    """

    c = host.run("dpkg-deb --contents {}".format(deb))
    # There shouldn't be any files with a .j2 ending
    assert not re.search(r"^.*\.j2$", c.stdout, re.M)
示例#11
0
def resolve_symlink(host: Host,
                    file_: Union[str, GNUFile],
                    resolve_depth: int = 20) -> str:
    """
    Resolve symlink until actual file or fail in case if symlink is broken.

    Parameters
    ----------
    host:           Host

    file_:          str | GNUFile
        Input symlink to resolve
    resolve_depth:  int
        The depth of symlink resolution (failsafe if symlink is circular)

    Returns
    -------
    GNUFile
        Path to the resolved file

    """
    resolve_items = []
    if isinstance(file_, GNUFile):
        initial_file = file_.path
        new_file = file_
    else:
        initial_file = file_
        new_file = host.file(file_)

    while new_file.exists and new_file.is_symlink and resolve_depth > 0:
        new_path = new_file.linked_to
        resolve_items.append(f'{new_file.path} is linked to {new_path}')
        new_file = host.file(new_path)
        if not new_file.is_symlink:
            return new_file.path
        resolve_depth -= 1

    resolve_out = '\n'.join(resolve_items)
    raise ValueError(f'Broken or circular symlink found: {initial_file}\n'
                     f'Full resolution output:\n{resolve_out}')
示例#12
0
def test_deb_packages_appear_installable(host: Host, deb: Path) -> None:
    """
    Confirms that a dry-run of installation reports no errors.
    Simple check for valid Debian package structure, but not thorough.
    When run on a malformed package, `dpkg` will report:

       dpkg-deb: error: `foo.deb' is not a debian format archive

    Testing application behavior is left to the functional tests.
    """

    package_name = extract_package_name_from_filepath(str(deb))
    assert deb.name.startswith(package_name)

    # sudo is required to call `dpkg --install`, even as dry-run.
    with host.sudo():
        c = host.run("dpkg --install --dry-run {}".format(deb))
        assert "Selecting previously unselected package {}".format(
            package_name) in c.stdout
        regex = "Preparing to unpack [./]+{} ...".format(re.escape(deb.name))
        assert re.search(regex, c.stdout, re.M)
        assert c.rc == 0
示例#13
0
def test_grsec_metapackage(host: Host):
    """
    Sanity checks on the securedrop-grsec metapackage. Mostly checks
    for presence of PaX flags hook and sysctl settings.
    Does not validate file contents, just presence.
    """

    c = host.run("dpkg-deb --contents {}".format(
        deb_paths["securedrop_grsec"]))
    contents = c.stdout
    # Custom sysctl options should be present
    assert re.search(r"^.*\./etc/sysctl.d/30-securedrop.conf$", contents, re.M)
    # Post-install kernel hook for managing PaX flags must exist.
    assert re.search(r"^.*\./etc/kernel/postinst.d/paxctl-grub$", contents,
                     re.M)
示例#14
0
def test_config_package_contains_expected_files(host: Host) -> None:
    """
    Inspect the package contents to ensure all config files are included in
    the package.
    """
    wanted_files = [
        "/etc/cron-apt/action.d/9-remove",
        "/etc/profile.d/securedrop_additions.sh",
    ]
    c = host.run("dpkg-deb --contents {}".format(
        deb_paths["securedrop_config"]))
    for wanted_file in wanted_files:
        assert re.search(
            r"^.* .{}$".format(wanted_file),
            c.stdout,
            re.M,
        )
示例#15
0
def test_config_package_contains_expected_files(host: Host) -> None:
    """
    Inspect the package contents to ensure all config files are included in
    the package.
    """
    wanted_files = [
        "/etc/profile.d/securedrop_additions.sh",
        "/opt/securedrop/20auto-upgrades",
        "/opt/securedrop/50unattended-upgrades",
        "/opt/securedrop/reboot-flag",
    ]
    c = host.run("dpkg-deb --contents {}".format(
        deb_paths["securedrop_config"]))
    for wanted_file in wanted_files:
        assert re.search(
            r"^.* .{}$".format(wanted_file),
            c.stdout,
            re.M,
        )
示例#16
0
def test_deb_package_control_fields(host: Host, deb: Path) -> None:
    """
    Ensure Debian Control fields are populated as expected in the package.
    These checks are rather superficial, and don't actually confirm that the
    .deb files are not broken. At a later date, consider integration tests
    that actually use these built files during an Ansible provisioning run.
    """
    package_name = extract_package_name_from_filepath(str(deb))
    # The `--field` option will display all fields if none are specified.
    c = host.run("dpkg-deb --field {}".format(deb))

    assert "Maintainer: SecureDrop Team <*****@*****.**>" in c.stdout
    # The securedrop-config package is architecture indepedent
    if package_name == "securedrop-config":
        assert "Architecture: all" in c.stdout
    else:
        assert "Architecture: amd64" in c.stdout

    assert "Package: {}".format(package_name) in c.stdout
    assert c.rc == 0
示例#17
0
def test_ossec_binaries_are_present_agent(host: Host):
    """
    Inspect the package contents to ensure all ossec agent binaries are properly
    included in the package.
    """
    wanted_files = [
        "/var/ossec/bin/agent-auth",
        "/var/ossec/bin/ossec-syscheckd",
        "/var/ossec/bin/ossec-agentd",
        "/var/ossec/bin/manage_agents",
        "/var/ossec/bin/ossec-control",
        "/var/ossec/bin/ossec-logcollector",
        "/var/ossec/bin/util.sh",
        "/var/ossec/bin/ossec-execd",
    ]
    c = host.run("dpkg-deb -c {}".format(deb_paths["ossec_agent"]))
    for wanted_file in wanted_files:
        assert re.search(
            r"^.* .{}$".format(wanted_file),
            c.stdout,
            re.M,
        )
示例#18
0
def test_ossec_binaries_are_present_server(host: Host):
    """
    Inspect the package contents to ensure all ossec server binaries are properly
    included in the package.
    """
    wanted_files = [
        "/var/ossec/bin/ossec-maild",
        "/var/ossec/bin/ossec-remoted",
        "/var/ossec/bin/ossec-syscheckd",
        "/var/ossec/bin/ossec-makelists",
        "/var/ossec/bin/ossec-logtest",
        "/var/ossec/bin/syscheck_update",
        "/var/ossec/bin/ossec-reportd",
        "/var/ossec/bin/ossec-agentlessd",
        "/var/ossec/bin/manage_agents",
        "/var/ossec/bin/rootcheck_control",
        "/var/ossec/bin/ossec-control",
        "/var/ossec/bin/ossec-dbd",
        "/var/ossec/bin/ossec-csyslogd",
        "/var/ossec/bin/ossec-regex",
        "/var/ossec/bin/agent_control",
        "/var/ossec/bin/ossec-monitord",
        "/var/ossec/bin/clear_stats",
        "/var/ossec/bin/ossec-logcollector",
        "/var/ossec/bin/list_agents",
        "/var/ossec/bin/verify-agent-conf",
        "/var/ossec/bin/syscheck_control",
        "/var/ossec/bin/util.sh",
        "/var/ossec/bin/ossec-analysisd",
        "/var/ossec/bin/ossec-execd",
        "/var/ossec/bin/ossec-authd",
    ]
    c = host.run("dpkg-deb --contents {}".format(deb_paths["ossec_server"]))
    for wanted_file in wanted_files:
        assert re.search(
            r"^.* .{}$".format(wanted_file),
            c.stdout,
            re.M,
        )
示例#19
0
def test_control_helper_files_are_present(host: Host):
    """
    Inspect the package info to get a list of helper scripts
    that should be shipped with the package, e.g. postinst, prerm, etc.
    Necessary due to package build logic retooling.

    Example output from package info, for reference:

      $ dpkg-deb --info securedrop-app-code_0.12.0~rc1_amd64.deb
      new debian package, version 2.0.
      size 13583186 bytes: control archive=11713 bytes.
           62 bytes,     2 lines      conffiles
          657 bytes,    10 lines      control
        26076 bytes,   298 lines      md5sums
         5503 bytes,   159 lines   *  postinst             #!/bin/bash

    Note that the actual output will have trailing whitespace, removed
    from this text description to satisfy linters.
    """
    wanted_files = [
        "conffiles",
        "config",
        "control",
        "postinst",
        "postrm",
        "preinst",
        "prerm",
        "templates",
    ]
    c = host.run("dpkg-deb --info {}".format(deb_paths["securedrop_app_code"]))
    for wanted_file in wanted_files:
        assert re.search(
            r"^\s+?\d+ bytes,\s+\d+ lines[\s*]+" + wanted_file + r"\s+.*$",
            c.stdout,
            re.M,
        )
示例#20
0
def test_package_installed(host: Host):
    host.run_expect([0], 'whereis podman')
示例#21
0
 def fn(host, sudo=True):
     return Host.get_host(
         f"ansible://{host}?ansible_inventory={request.config.option.ansible_inventory}",
         sudo=sudo,
     )
示例#22
0
def test_assert_all_containers_are_stopped_and_removed(host: Host):
    with host.sudo():
        host.run_expect([0], 'podman container stop -a')
        host.run_expect([0], 'podman container prune -f')
示例#23
0
def test_agent_created(host: Host, agent_data_dir_local: str) -> None:
    host_funcs = HostFuncsAdapter(host)

    print(f"Test Infra Host: {host}")
    # copying data to local machine because certificates requires
    # files to be local
    host_funcs.docker_copy_agent_svid_der(agent_data_dir_local)
    dirs = AgentDirs(
        config_dir="/etc/spire-agent",
        data_dir=agent_data_dir_local,
        install_dir="/opt/spire-agent",
        service_name="spire_agent",
        log_dir="/var/log/spire",
        service_dir="/etc/systemd/system",
    )
    agent_info = SpireAgentInfo(run_command=host_funcs.run_command,
                                log_func=host_funcs.no_log,
                                dirs=dirs,
                                service_scope="system",
                                socket_path="/tmp/agent.sock",
                                expected_version=test_data.spire_version,
                                file_exists_func=host_funcs.file_exists)
    print(f"agent_info:{agent_info}")
    assert ("agent-installed", *agent_info.is_agent_installed()) == ("agent-installed",True, None) \
            and ("service-installed", *agent_info.is_service_installed()) == ("service-installed",True, None) \
            and ("service-enabled", *agent_info.is_service_enabled()) == ("service-enabled",True, None) \
            and ("service-running", *agent_info.is_service_running()) == ("service-running",True, None) \
            and ("agent-healthy", *agent_info.is_agent_healthy()) == ("agent-healthy",True, None) \
            and agent_info.version == (test_data.spire_version, None) \
            , ["should have been installed, enabled and healthy", agent_info]

    spire_agent_create_ansible_result: CommandResult = host.run(
        "cat /tmp/spire_agent_creation_result.json")
    assert spire_agent_create_ansible_result.succeeded and spire_agent_create_ansible_result.stdout
    ansible_res_json: Dict[str, str] = json.loads(
        spire_agent_create_ansible_result.stdout)
    print(f"ansible_res_json={ansible_res_json}")
    agent_spiffe_id_sn_and_issue = agent_info.get_agent_spiffe_id_and_sertial_number(
    )
    assert (ansible_res_json.get("actual_spire_agent_spiffe_id"),
            ansible_res_json.get("actual_spire_agent_serial_number"),
            ansible_res_json.get("actual_spire_agent_get_info_issue")
            ) == agent_spiffe_id_sn_and_issue

    spire_agent_service_name = "spire_agent"
    spire_agent_service_filename = f"{spire_agent_service_name}.service"
    agent_health_res: CommandResult = host.run("%s %s", dirs.path_executable,
                                               "healthcheck")
    agent_srv_running_res: CommandResult = host.run(
        "systemctl is-active %s", spire_agent_service_filename)
    agent_srv_enabled_res: CommandResult = host.run(
        "systemctl is-enabled %s", spire_agent_service_filename)


    assert  (agent_health_res.succeeded and "Agent is healthy" in agent_health_res.stdout) \
            and  (agent_srv_enabled_res.succeeded and "enabled" == str(agent_srv_enabled_res.stdout).strip() ) \
            and  (agent_srv_running_res.succeeded and "active" == str(agent_srv_running_res.stdout).strip() )

    spire_server_install_dir = "/opt/spire/"
    spire_service_bin_path = os.path.join(spire_server_install_dir, "bin",
                                          "spire-server")
    cmd = " ".join([
        spire_service_bin_path, "entry", "show", "-parentID",
        agent_spiffe_id_sn_and_issue[0], "-selector",
        f"spiffe_id:{agent_spiffe_id_sn_and_issue[0]}"
    ])
    host_spire_server: Host = ansible_runner.get_host("spire_server")
    print(f"host_spire_server:{host_spire_server}")
    cresult: CommandResult = host_spire_server.run(cmd)
    assert cresult.succeeded, f"""Fail to run show entry:
                                cmd={cmd},
                                result={cresult}
                                """
    outcome = spire_server_entry_cmd.SpireServerEntryShowOutcome(
        cresult.rc, cresult.stdout, cresult.stderr)
    assert outcome.entries is not None and len(
        outcome.entries) == 1, f"Should have had exactly one entry: {outcome}"
    entry: spire_server_entry_cmd.RegistrationEntry = outcome.entries[0]
    assert "spiffe://example.org/agent/local1" == entry.get("spiffe_id")
示例#24
0
def test_default_packages(host: Host) -> None:
    p = host.package("git")
    assert p.is_installed
示例#25
0
def test_httpd_container(host: Host):
    with host.sudo():
        host.run_expect([0], 'podman image pull docker.io/httpd:2.4.39')
        host.run_expect([
            0
        ], 'podman container create --name httpd-test -p 127.0.0.1:8080:80 httpd:2.4.39'
                        )  # noqa E501
        host.run_expect([0], 'podman container start httpd-test')
    host.run_expect([0], 'sleep 2')

    curl_works = host.run_expect([0],
                                 "curl 'http://127.0.0.1:8080/' 2>/dev/null")
    assert 'It works!' in curl_works.stdout

    with host.sudo():
        host.run_expect([0], 'podman container stop httpd-test')
        host.run_expect([0], 'podman container start httpd-test')
        host.run_expect([0], 'podman container stop httpd-test')
示例#26
0
def test_default_command(host: Host) -> None:
    f = host.file("/usr/bin/git")
    assert f.is_file
示例#27
0
def test_shinobi_ui_available(host: Host, testvars: Dict):
    shinobi = host.addr(SHINOBI_HOST)
    assert shinobi.port(testvars["shinobi_host_port"]).is_reachable
示例#28
0
def test_container_basics(host: Host):
    with host.sudo():
        host.run_expect([0], 'podman image pull docker.io/hello-world')
        run_hello_world = host.run_expect([0], 'podman run --rm hello-world')
        assert 'Hello from Docker!' in run_hello_world.stdout