Esempio n. 1
0
def test_deb_package_contains_expected_conffiles(host: Host, deb: Path):
    """
    Ensures the `securedrop-app-code` package declares only whitelisted
    `conffiles`. Several files in `/etc/` would automatically be marked
    conffiles, which would break unattended updates to critical package
    functionality such as AppArmor profiles. This test validates overrides
    in the build logic to unset those conffiles.
    """
    # For the securedrop-app-code package:
    if deb.name.startswith("securedrop-app-code"):
        mktemp = host.run("mktemp -d")
        tmpdir = mktemp.stdout.strip()
        # The `--raw-extract` flag includes `DEBIAN/` dir with control files
        host.run("dpkg-deb --raw-extract {} {}".format(deb, tmpdir))
        conffiles_path = os.path.join(tmpdir, "DEBIAN", "conffiles")
        f = host.file(conffiles_path)

        assert f.is_file
        # Ensure that the entirety of the file lists only the logo as conffile;
        # effectively ensures e.g. AppArmor profiles are not conffiles.
        conffiles = f.content_string.rstrip()
        assert conffiles == "/var/www/securedrop/static/i/logo.png"

    # For the securedrop-config package, we want to ensure there are no
    # conffiles so securedrop_additions.sh is squashed every time
    if deb.name.startswith("securedrop-config"):
        c = host.run("dpkg-deb -I {}".format(deb))
        assert "conffiles" not in c.stdout
def test_server_created(host: Host) -> None:
    host_funcs = HostFuncsAdapter(host=host, user_name="spire")
    print(f"Test Infra Host: {host} -- {host_funcs}")
    server_dirs = spire_server_info_cmd.ServerDirs(
        config_dir="/home/spire/spire-server/conf",
        data_dir="/home/spire/spire-server/data",
        install_dir="/home/spire/spire-server/",
        service_dir="/home/spire/.config/systemd/user/",
        log_dir="/home/spire/spire-server/logs",
        service_name="spire_server")
    info = spire_server_info_cmd.SpireServerInfo(
        run_command=host_funcs.run_command_with_ansible,
        log_func=host_funcs.no_log,
        server_dirs=server_dirs,
        service_name="spire_server",
        service_scope="user",
        registration_uds_path="/tmp/spire-registration.sock",
        expected_version=test_data.spire_version,
        file_exists_func=host_funcs.file_exists)
    print(f"server_info:{info}")
    assert ("server-installed", *info.is_installed()) == ("server-installed",True, None) \
            and ("service-installed", *info.is_service_installed()) == ("service-installed",True, None) \
            and ("service-enabled", *info.is_service_enabled()) == ("service-enabled",True, None) \
            and ("service-running", *info.is_service_running()) == ("service-running",True, None) \
            and ("server-healthy", *info.is_healthy()) == ("server-healthy",True, None) \
            and info.version == (test_data.spire_version, None) \
            and (info.service_scope, info.service_scope_issue) == (Scope.scope_user, None) \
            , ["should have been installed, enabled and healthy", info]

    spire_server_create_ansible_result: CommandResult = host.run(
        "cat /tmp/spire_server_creation_result.json")
    assert spire_server_create_ansible_result.succeeded and spire_server_create_ansible_result.stdout
    ansible_res_json: Dict[str, str] = json.loads(
        spire_server_create_ansible_result.stdout)
    print(f"ansible_res_json={ansible_res_json}")
def test_system_time(host: Host) -> None:
    if host.system_info.codename == "xenial":
        assert host.package("ntp").is_installed
        assert host.package("ntpdate").is_installed

        # TODO: The staging setup timing is too erratic for the
        # following check. If we do want to reinstate it before
        # dropping Xenial support, it should be done in a loop to give
        # ntpd time to sync after the machines are created.

        # c = host.run("ntpq -c rv")
        # assert "leap_none" in c.stdout
        # assert "sync_ntp" in c.stdout
        # assert "refid" in c.stdout
    else:
        assert not host.package("ntp").is_installed
        assert not host.package("ntpdate").is_installed

        s = host.service("systemd-timesyncd")
        assert s.is_running
        assert s.is_enabled
        assert not s.is_masked

        # File will be touched on every successful synchronization,
        # see 'man systemd-timesyncd'`
        assert host.file("/run/systemd/timesync/synchronized").exists

        c = host.run("timedatectl show")
        assert "NTP=yes" in c.stdout
        assert "NTPSynchronized=yes" in c.stdout
Esempio n. 4
0
def test_deb_package_lintian(host: Host, deb: Path, tag: str):
    """
    Ensures lintian likes our Debian packages.
    """
    c = host.run("lintian --tags {} --no-tag-display-limit {}".format(
        tag, deb))
    assert len(c.stdout) == 0
Esempio n. 5
0
def test_config_package_contains_expected_files(host: Host) -> None:
    """
    Inspect the package contents to ensure all config files are included in
    the package.
    """
    if SECUREDROP_TARGET_DISTRIBUTION == "xenial":
        wanted_files = [
            "/etc/cron-apt/action.d/9-remove",
            "/etc/profile.d/securedrop_additions.sh",
        ]
    else:
        wanted_files = [
            "/etc/profile.d/securedrop_additions.sh",
            "/opt/securedrop/20auto-upgrades",
            "/opt/securedrop/50unattended-upgrades",
            "/opt/securedrop/reboot-flag",
        ]
    c = host.run("dpkg-deb --contents {}".format(
        deb_paths["securedrop_config"]))
    for wanted_file in wanted_files:
        assert re.search(
            r"^.* .{}$".format(wanted_file),
            c.stdout,
            re.M,
        )
Esempio n. 6
0
def test_jinja_files_not_present(host: Host, deb: Path):
    """
    Make sure that jinja (.j2) files were not copied over
    as-is into the debian packages.
    """

    c = host.run("dpkg-deb --contents {}".format(deb))
    # There shouldn't be any files with a .j2 ending
    assert not re.search(r"^.*\.j2$", c.stdout, re.M)
Esempio n. 7
0
def test_deb_package_control_fields_homepage(host: Host, deb: Path):
    # The `--field` option will display all fields if none are specified.
    c = host.run("dpkg-deb --field {}".format(deb))
    # The OSSEC source packages will have a different homepage;
    # all other packages should set securedrop.org as homepage.
    if deb.name.startswith("ossec-"):
        assert "Homepage: http://ossec.net" in c.stdout
    else:
        assert "Homepage: https://securedrop.org" in c.stdout
Esempio n. 8
0
def test_grsec_metapackage(host: Host):
    """
    Sanity checks on the securedrop-grsec metapackage. Mostly checks
    for presence of PaX flags hook and sysctl settings.
    Does not validate file contents, just presence.
    """

    c = host.run("dpkg-deb --contents {}".format(
        deb_paths["securedrop_grsec"]))
    contents = c.stdout
    # Custom sysctl options should be present
    assert re.search(r"^.*\./etc/sysctl.d/30-securedrop.conf$", contents, re.M)
    # Post-install kernel hook for managing PaX flags must exist.
    assert re.search(r"^.*\./etc/kernel/postinst.d/paxctl-grub$", contents,
                     re.M)
Esempio n. 9
0
def test_system_time(host: Host) -> None:
    assert not host.package("ntp").is_installed
    assert not host.package("ntpdate").is_installed

    s = host.service("systemd-timesyncd")
    assert s.is_running
    assert s.is_enabled
    assert not s.is_masked

    # File will be touched on every successful synchronization,
    # see 'man systemd-timesyncd'`
    assert host.file("/run/systemd/timesync/synchronized").exists

    c = host.run("timedatectl show")
    assert "NTP=yes" in c.stdout
    assert "NTPSynchronized=yes" in c.stdout
Esempio n. 10
0
def test_config_package_contains_expected_files(host: Host) -> None:
    """
    Inspect the package contents to ensure all config files are included in
    the package.
    """
    wanted_files = [
        "/etc/cron-apt/action.d/9-remove",
        "/etc/profile.d/securedrop_additions.sh",
    ]
    c = host.run("dpkg-deb --contents {}".format(
        deb_paths["securedrop_config"]))
    for wanted_file in wanted_files:
        assert re.search(
            r"^.* .{}$".format(wanted_file),
            c.stdout,
            re.M,
        )
Esempio n. 11
0
def test_config_package_contains_expected_files(host: Host) -> None:
    """
    Inspect the package contents to ensure all config files are included in
    the package.
    """
    wanted_files = [
        "/etc/profile.d/securedrop_additions.sh",
        "/opt/securedrop/20auto-upgrades",
        "/opt/securedrop/50unattended-upgrades",
        "/opt/securedrop/reboot-flag",
    ]
    c = host.run("dpkg-deb --contents {}".format(
        deb_paths["securedrop_config"]))
    for wanted_file in wanted_files:
        assert re.search(
            r"^.* .{}$".format(wanted_file),
            c.stdout,
            re.M,
        )
Esempio n. 12
0
def test_deb_package_control_fields(host: Host, deb: Path) -> None:
    """
    Ensure Debian Control fields are populated as expected in the package.
    These checks are rather superficial, and don't actually confirm that the
    .deb files are not broken. At a later date, consider integration tests
    that actually use these built files during an Ansible provisioning run.
    """
    package_name = extract_package_name_from_filepath(str(deb))
    # The `--field` option will display all fields if none are specified.
    c = host.run("dpkg-deb --field {}".format(deb))

    assert "Maintainer: SecureDrop Team <*****@*****.**>" in c.stdout
    # The securedrop-config package is architecture indepedent
    if package_name == "securedrop-config":
        assert "Architecture: all" in c.stdout
    else:
        assert "Architecture: amd64" in c.stdout

    assert "Package: {}".format(package_name) in c.stdout
    assert c.rc == 0
Esempio n. 13
0
def test_ossec_binaries_are_present_server(host: Host):
    """
    Inspect the package contents to ensure all ossec server binaries are properly
    included in the package.
    """
    wanted_files = [
        "/var/ossec/bin/ossec-maild",
        "/var/ossec/bin/ossec-remoted",
        "/var/ossec/bin/ossec-syscheckd",
        "/var/ossec/bin/ossec-makelists",
        "/var/ossec/bin/ossec-logtest",
        "/var/ossec/bin/syscheck_update",
        "/var/ossec/bin/ossec-reportd",
        "/var/ossec/bin/ossec-agentlessd",
        "/var/ossec/bin/manage_agents",
        "/var/ossec/bin/rootcheck_control",
        "/var/ossec/bin/ossec-control",
        "/var/ossec/bin/ossec-dbd",
        "/var/ossec/bin/ossec-csyslogd",
        "/var/ossec/bin/ossec-regex",
        "/var/ossec/bin/agent_control",
        "/var/ossec/bin/ossec-monitord",
        "/var/ossec/bin/clear_stats",
        "/var/ossec/bin/ossec-logcollector",
        "/var/ossec/bin/list_agents",
        "/var/ossec/bin/verify-agent-conf",
        "/var/ossec/bin/syscheck_control",
        "/var/ossec/bin/util.sh",
        "/var/ossec/bin/ossec-analysisd",
        "/var/ossec/bin/ossec-execd",
        "/var/ossec/bin/ossec-authd",
    ]
    c = host.run("dpkg-deb --contents {}".format(deb_paths["ossec_server"]))
    for wanted_file in wanted_files:
        assert re.search(
            r"^.* .{}$".format(wanted_file),
            c.stdout,
            re.M,
        )
Esempio n. 14
0
def test_ossec_binaries_are_present_agent(host: Host):
    """
    Inspect the package contents to ensure all ossec agent binaries are properly
    included in the package.
    """
    wanted_files = [
        "/var/ossec/bin/agent-auth",
        "/var/ossec/bin/ossec-syscheckd",
        "/var/ossec/bin/ossec-agentd",
        "/var/ossec/bin/manage_agents",
        "/var/ossec/bin/ossec-control",
        "/var/ossec/bin/ossec-logcollector",
        "/var/ossec/bin/util.sh",
        "/var/ossec/bin/ossec-execd",
    ]
    c = host.run("dpkg-deb -c {}".format(deb_paths["ossec_agent"]))
    for wanted_file in wanted_files:
        assert re.search(
            r"^.* .{}$".format(wanted_file),
            c.stdout,
            re.M,
        )
Esempio n. 15
0
def test_deb_packages_appear_installable(host: Host, deb: Path) -> None:
    """
    Confirms that a dry-run of installation reports no errors.
    Simple check for valid Debian package structure, but not thorough.
    When run on a malformed package, `dpkg` will report:

       dpkg-deb: error: `foo.deb' is not a debian format archive

    Testing application behavior is left to the functional tests.
    """

    package_name = extract_package_name_from_filepath(str(deb))
    assert deb.name.startswith(package_name)

    # sudo is required to call `dpkg --install`, even as dry-run.
    with host.sudo():
        c = host.run("dpkg --install --dry-run {}".format(deb))
        assert "Selecting previously unselected package {}".format(
            package_name) in c.stdout
        regex = "Preparing to unpack [./]+{} ...".format(re.escape(deb.name))
        assert re.search(regex, c.stdout, re.M)
        assert c.rc == 0
Esempio n. 16
0
def test_control_helper_files_are_present(host: Host):
    """
    Inspect the package info to get a list of helper scripts
    that should be shipped with the package, e.g. postinst, prerm, etc.
    Necessary due to package build logic retooling.

    Example output from package info, for reference:

      $ dpkg-deb --info securedrop-app-code_0.12.0~rc1_amd64.deb
      new debian package, version 2.0.
      size 13583186 bytes: control archive=11713 bytes.
           62 bytes,     2 lines      conffiles
          657 bytes,    10 lines      control
        26076 bytes,   298 lines      md5sums
         5503 bytes,   159 lines   *  postinst             #!/bin/bash

    Note that the actual output will have trailing whitespace, removed
    from this text description to satisfy linters.
    """
    wanted_files = [
        "conffiles",
        "config",
        "control",
        "postinst",
        "postrm",
        "preinst",
        "prerm",
        "templates",
    ]
    c = host.run("dpkg-deb --info {}".format(deb_paths["securedrop_app_code"]))
    for wanted_file in wanted_files:
        assert re.search(
            r"^\s+?\d+ bytes,\s+\d+ lines[\s*]+" + wanted_file + r"\s+.*$",
            c.stdout,
            re.M,
        )
Esempio n. 17
0
def test_agent_created(host: Host, agent_data_dir_local: str) -> None:
    host_funcs = HostFuncsAdapter(host)

    print(f"Test Infra Host: {host}")
    # copying data to local machine because certificates requires
    # files to be local
    host_funcs.docker_copy_agent_svid_der(agent_data_dir_local)
    dirs = AgentDirs(
        config_dir="/etc/spire-agent",
        data_dir=agent_data_dir_local,
        install_dir="/opt/spire-agent",
        service_name="spire_agent",
        log_dir="/var/log/spire",
        service_dir="/etc/systemd/system",
    )
    agent_info = SpireAgentInfo(run_command=host_funcs.run_command,
                                log_func=host_funcs.no_log,
                                dirs=dirs,
                                service_scope="system",
                                socket_path="/tmp/agent.sock",
                                expected_version=test_data.spire_version,
                                file_exists_func=host_funcs.file_exists)
    print(f"agent_info:{agent_info}")
    assert ("agent-installed", *agent_info.is_agent_installed()) == ("agent-installed",True, None) \
            and ("service-installed", *agent_info.is_service_installed()) == ("service-installed",True, None) \
            and ("service-enabled", *agent_info.is_service_enabled()) == ("service-enabled",True, None) \
            and ("service-running", *agent_info.is_service_running()) == ("service-running",True, None) \
            and ("agent-healthy", *agent_info.is_agent_healthy()) == ("agent-healthy",True, None) \
            and agent_info.version == (test_data.spire_version, None) \
            , ["should have been installed, enabled and healthy", agent_info]

    spire_agent_create_ansible_result: CommandResult = host.run(
        "cat /tmp/spire_agent_creation_result.json")
    assert spire_agent_create_ansible_result.succeeded and spire_agent_create_ansible_result.stdout
    ansible_res_json: Dict[str, str] = json.loads(
        spire_agent_create_ansible_result.stdout)
    print(f"ansible_res_json={ansible_res_json}")
    agent_spiffe_id_sn_and_issue = agent_info.get_agent_spiffe_id_and_sertial_number(
    )
    assert (ansible_res_json.get("actual_spire_agent_spiffe_id"),
            ansible_res_json.get("actual_spire_agent_serial_number"),
            ansible_res_json.get("actual_spire_agent_get_info_issue")
            ) == agent_spiffe_id_sn_and_issue

    spire_agent_service_name = "spire_agent"
    spire_agent_service_filename = f"{spire_agent_service_name}.service"
    agent_health_res: CommandResult = host.run("%s %s", dirs.path_executable,
                                               "healthcheck")
    agent_srv_running_res: CommandResult = host.run(
        "systemctl is-active %s", spire_agent_service_filename)
    agent_srv_enabled_res: CommandResult = host.run(
        "systemctl is-enabled %s", spire_agent_service_filename)


    assert  (agent_health_res.succeeded and "Agent is healthy" in agent_health_res.stdout) \
            and  (agent_srv_enabled_res.succeeded and "enabled" == str(agent_srv_enabled_res.stdout).strip() ) \
            and  (agent_srv_running_res.succeeded and "active" == str(agent_srv_running_res.stdout).strip() )

    spire_server_install_dir = "/opt/spire/"
    spire_service_bin_path = os.path.join(spire_server_install_dir, "bin",
                                          "spire-server")
    cmd = " ".join([
        spire_service_bin_path, "entry", "show", "-parentID",
        agent_spiffe_id_sn_and_issue[0], "-selector",
        f"spiffe_id:{agent_spiffe_id_sn_and_issue[0]}"
    ])
    host_spire_server: Host = ansible_runner.get_host("spire_server")
    print(f"host_spire_server:{host_spire_server}")
    cresult: CommandResult = host_spire_server.run(cmd)
    assert cresult.succeeded, f"""Fail to run show entry:
                                cmd={cmd},
                                result={cresult}
                                """
    outcome = spire_server_entry_cmd.SpireServerEntryShowOutcome(
        cresult.rc, cresult.stdout, cresult.stderr)
    assert outcome.entries is not None and len(
        outcome.entries) == 1, f"Should have had exactly one entry: {outcome}"
    entry: spire_server_entry_cmd.RegistrationEntry = outcome.entries[0]
    assert "spiffe://example.org/agent/local1" == entry.get("spiffe_id")