Ejemplo n.º 1
0
def directory(path):
    call('filesystem.mkdir', path)

    try:
        yield path
    finally:
        ssh(f'rm -rf {path}')
Ejemplo n.º 2
0
def test_system_general_ui_rollback():
    try:
        # Apply incorrect changes
        call("system.general.update", {
            "ui_port": 81,
            "rollback_timeout": 20,
            "ui_restart_delay": 3
        })

        # Wait for changes to be automatically applied
        time.sleep(10)

        # Ensure that changes were applied and the UI is now inaccessible
        with pytest.raises(requests.ConnectionError):
            requests.get(url(), timeout=10)

        # Additionally ensure that it is still working
        assert requests.get(url() + ":81", timeout=10).status_code == 200

        # Ensure that the check-in timeout is ticking back
        assert 3 <= int(
            ssh("midclt call system.general.checkin_waiting").strip()) < 10

        # Wait for changes to be automatically rolled back
        time.sleep(10)

        # Ensure that the UI is now accessible
        assert requests.get(url(), timeout=10).status_code == 200
    except Exception:
        # Bring things back to normal via SSH in case of any error
        ssh("midclt call system.general.update '{\"ui_port\": 80}'")
        ssh("midclt call system.general.ui_restart 0")
        time.sleep(10)
        raise
Ejemplo n.º 3
0
def outdated_pool():
    with another_pool() as pool:
        device = pool["topology"]["data"][0]["path"]
        ssh(f"zpool export {pool['name']}")
        ssh(f"zpool create test -o altroot=/mnt -o feature@sha512=disabled -f {device}"
            )
        yield pool
Ejemplo n.º 4
0
def test_changing_hostname():
    current_hostname = call('network.configuration.config')['hostname']

    call('network.configuration.update', {'hostname': NEW_HOSTNAME})
    assert ssh('hostname').strip() == NEW_HOSTNAME

    call('network.configuration.update', {'hostname': current_hostname})
    assert ssh('hostname').strip() == current_hostname
Ejemplo n.º 5
0
def test_s3_attachment_delegate__works_for_poor_s3_configuration(request):
    depends(request, ["pool_04"], scope="session")
    with dataset("test") as test_dataset:
        old_path = "/mnt/unavailable-pool/s3"
        ssh(f"mkdir -p {old_path}")
        try:
            call("datastore.update", "services.s3", 1, {"s3_disks": old_path})
            assert call("pool.dataset.attachments", test_dataset) == []
        finally:
            ssh(f"rm -rf {old_path}")
Ejemplo n.º 6
0
def test_snapshot(request, has_zvol_sibling):
    depends(request, ["pool_04"], scope="session")
    with dataset("test") as ds:
        ssh(f"mkdir -p /mnt/{ds}/dir1/dir2")
        ssh(f"dd if=/dev/urandom of=/mnt/{ds}/dir1/dir2/blob bs=1M count=1")

        if has_zvol_sibling:
            ssh(f"zfs create -V 1gb {pool}/zvol")

        try:
            with local_s3_task({
                "path": f"/mnt/{ds}/dir1/dir2",
                "bwlimit": [{"time": "00:00", "bandwidth": 1024 * 200}],  # So it'll take 5 seconds
                "snapshot": True,
            }) as task:
                job_id = call("cloudsync.sync", task["id"])

                time.sleep(2.5)

                ps_ax = ssh("ps ax | grep rclone")

                call("core.job_wait", job_id, job=True)

                assert re.search(rf"rclone .+ /mnt/{ds}/.zfs/snapshot/cloud_sync-[0-9]+-[0-9]+/dir1/dir2", ps_ax)

            time.sleep(1)

            assert call("zfs.snapshot.query", [["dataset", "=", ds]]) == []
        finally:
            if has_zvol_sibling:
                ssh(f"zfs destroy -r {pool}/zvol")
Ejemplo n.º 7
0
def test__sets_up_keeps_existing_data():
    setup_stage0()
    ssh("mkdir -p /var/db/collectd/rrd/journal")
    ssh("sh -c 'echo 1 > /var/db/collectd/rrd/journal/file'")
    ssh("mkdir -p /var/db/collectd/rrd/localhost")
    ssh("sh -c 'echo 1 > /var/db/collectd/rrd/localhost/file'")

    assert_reporting_setup()

    assert ssh("cat /var/db/collectd/rrd/journal/file") == "1\n"
    assert ssh("cat /var/db/collectd/rrd/localhost/file") == "1\n"
Ejemplo n.º 8
0
def anonymous_ftp_server(config=None):
    config = config or {}

    with dataset("anonftp") as ds:
        path = f"/mnt/{ds}"
        ssh(f"chmod 777 {path}")
        with ftp_server({
            "onlyanonymous": True,
            "anonpath": path,
            **config,
        }):
            yield SimpleNamespace(dataset=ds, username="******", password="")
Ejemplo n.º 9
0
def test_system_general_ui_allowlist():
    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    s.connect((host(), 1))  # connect() for UDP doesn't send packets
    local_ip = s.getsockname()[0]

    try:
        protected_endpoints = (
            "/_download",
            "/_upload",
            "/_plugins",
            "/api/docs",
            "/api/v2.0",
            "/progress",
            "/vm/display",
            "/websocket",
        )

        # Ensure we are testing endpoints that do not give 403 by default
        for endpoint in protected_endpoints:
            r = requests.get(url() + endpoint, timeout=10)
            assert r.status_code != 403

        # Set `ui_allowlist` to IP we are using
        call("system.general.update", {"ui_allowlist": [local_ip]})
        call("system.general.ui_restart", 0)
        time.sleep(10)

        # Check everything still works
        for endpoint in protected_endpoints:
            r = requests.get(url() + endpoint, timeout=10)
            assert r.status_code != 403

        # Set it to an invalid IP
        call("system.general.update", {"ui_allowlist": ["8.8.8.8"]})
        call("system.general.ui_restart", 0)
        time.sleep(10)

        # Ensure we are still able to open the UI
        r = requests.get(url(), timeout=10)
        assert r.status_code == 200

        # Ensure that we can't access API
        for endpoint in protected_endpoints:
            r = requests.get(url() + endpoint, timeout=10)
            assert r.status_code == 403
    finally:
        # We are not allowed to access API, bring things back to normal via SSH
        ssh("midclt call system.general.update '{\"ui_allowlist\": []}'")
        ssh("midclt call system.general.ui_restart 0")
        time.sleep(10)
Ejemplo n.º 10
0
def test__sets_up_from_invalid_link():
    ssh(f"rm -rf {rrd_mount()}/*")
    ssh("rm -rf /var/db/collectd")
    ssh("mkdir /var/db/collectd")
    ssh("ln -s /mnt /var/db/collectd/rrd")

    assert_reporting_setup()
Ejemplo n.º 11
0
def test_lock_queue_size():
    try:
        with mock("test.test1", """
            from middlewared.service import job
            
            @job(lock="test", lock_queue_size=1)
            def mock(self, job, *args):
                with open("/tmp/test", "a") as f:
                    f.write("a\\n")
            
                import time
                time.sleep(5)
        """):
            j1 = call("test.test1")
            j2 = call("test.test1")
            j3 = call("test.test1")
            j4 = call("test.test1")

            call("core.job_wait", j1, job=True)
            call("core.job_wait", j2, job=True)
            call("core.job_wait", j3, job=True)
            call("core.job_wait", j4, job=True)

            assert ssh("cat /tmp/test") == "a\na\n"

            assert j3 == j2
            assert j4 == j2
    finally:
        with contextlib.suppress(FileNotFoundError):
            os.unlink("/tmp/test")
Ejemplo n.º 12
0
def test_s3_attachment_delegate__works(request):
    depends(request, ["pool_04"], scope="session")
    with dataset("test") as test_dataset:
        ssh(f"mkdir /mnt/{test_dataset}/s3_root")

        with s3_server(f"{test_dataset}/s3_root"):
            assert call("pool.dataset.attachments", test_dataset) == [{
                "type":
                "S3",
                "service":
                "s3",
                "attachments": [test_dataset]
            }]

            call("pool.dataset.delete", test_dataset)

            assert not call("service.started", "s3")
Ejemplo n.º 13
0
def test_09_timezone_choices():
    timezones_dic = call('system.general.timezone_choices')
    result = ssh('timedatectl list-timezones')
    missing = []
    for timezone in filter(bool, result.split('\n')):
        if not timezones_dic.get(timezone):
            missing.append(timezone)
    assert missing == []
Ejemplo n.º 14
0
def test_run_onetime__exclude_mountpoint_property(request, exclude_mountpoint_property):
    depends(request, ["pool_04"], scope="session")
    with dataset("src") as src:
        with dataset("src/legacy") as src_legacy:
            ssh(f"zfs set mountpoint=legacy {src_legacy}")
            ssh(f"zfs snapshot -r {src}@2022-01-01-00-00-00")

            try:
                call("replication.run_onetime", {
                    "direction": "PUSH",
                    "transport": "LOCAL",
                    "source_datasets": [src],
                    "target_dataset": f"{pool}/dst",
                    "recursive": True,
                    "also_include_naming_schema": ["%Y-%m-%d-%H-%M-%S"],
                    "retention_policy": "NONE",
                    "replicate": True,
                    "readonly": "IGNORE",
                    "exclude_mountpoint_property": exclude_mountpoint_property
                }, job=True)

                mountpoint = ssh(f"zfs get -H -o value mountpoint {pool}/dst/legacy").strip()
                if exclude_mountpoint_property:
                    assert mountpoint == f"/mnt/{pool}/dst/legacy"
                else:
                    assert mountpoint == "legacy"
            finally:
                ssh(f"zfs destroy -r {pool}/dst", check=False)
Ejemplo n.º 15
0
def test_private_params_do_not_leak_to_logs():
    with mock(
            "test.test1", """    
        from middlewared.service import accepts
        from middlewared.schema import Dict, Str

        @accepts(Dict("test", Str("password", private=True)))
        async def mock(self, args):
            raise Exception()
    """):
        log_before = ssh("cat /var/log/middlewared.log")

        with client(py_exceptions=False) as c:
            with pytest.raises(Exception):
                c.call("test.test1", {"password": "******"})

        log = ssh("cat /var/log/middlewared.log")[len(log_before):]
        assert "Exception while calling test.test1(*[{'password': '******'}])" in log
Ejemplo n.º 16
0
def test_abort(request):
    depends(request, ["pool_04"], scope="session")
    with dataset("test") as ds:
        ssh(f"dd if=/dev/urandom of=/mnt/{ds}/blob bs=1M count=1")

        with local_s3_task({
            "path": f"/mnt/{ds}",
            "bwlimit": [{"time": "00:00", "bandwidth": 1024 * 100}],  # So it'll take 10 seconds
            "snapshot": True,
        }) as task:
            job_id = call("cloudsync.sync", task["id"])

            time.sleep(2.5)

            call("core.job_abort", job_id)

            time.sleep(1)

            assert "rclone" not in ssh("ps ax")
            assert call("cloudsync.query", [["id", "=", task["id"]]], {"get": True})["job"]["state"] == "ABORTED"
Ejemplo n.º 17
0
def test_filesystem__file_tail_follow__grouping():
    ssh("echo > /tmp/file_tail_follow.txt")

    with client() as c:
        received = []

        def append(type, **kwargs):
            received.append((time.monotonic(), kwargs["fields"]["data"]))

        c.subscribe("filesystem.file_tail_follow:/tmp/file_tail_follow.txt",
                    append)

        ssh("for i in `seq 1 200`; do echo test >> /tmp/file_tail_follow.txt; sleep 0.01; done"
            )

        # Settle down things
        time.sleep(1)

        received = received[1:]  # Initial file contents
        # We were sending this for 2-3 seconds so we should have received 4-6 blocks with 0.5 sec interval
        assert 4 <= len(received) <= 6, str(received)
        # All blocks should have been received uniformly in time
        assert all(
            0.4 <= b2[0] - b1[0] <= 1.0
            for b1, b2 in zip(received[:-1], received[1:])), str(received)
        # All blocks should contains more or less same amount of data
        assert all(30 <= len(block[1].split("\n")) <= 60
                   for block in received[:-1]), str(received)

        # One single send
        ssh("echo finish >> /tmp/file_tail_follow.txt")

        time.sleep(1)
        assert received[-1][1] == "finish\n"
Ejemplo n.º 18
0
def test_exclude_recycle_bin(request):
    depends(request, ["pool_04"], scope="session")
    with local_s3_task({
        "exclude": ["$RECYCLE.BIN/"],
    }) as task:
        ssh(f'mkdir {task["path"]}/\'$RECYCLE.BIN\'')
        ssh(f'touch {task["path"]}/\'$RECYCLE.BIN\'/garbage')
        ssh(f'touch {task["path"]}/file')

        run_task(task)

        assert ssh(f'ls /mnt/{pool}/cloudsync_remote/bucket') == 'file\n'
Ejemplo n.º 19
0
def assert_reporting_setup():
    assert call("reporting.setup")

    assert ssh("[ -L /var/db/collectd/rrd ] && echo OK").strip() == "OK"
    assert ssh("readlink /var/db/collectd/rrd").strip() == rrd_mount()

    hostname = call("reporting.hostname")
    assert set(ssh("ls -1 /var/db/collectd/rrd/").split()) - {"journal"} == {
        "localhost", hostname
    }
    assert ssh(
        "[ -d /var/db/collectd/rrd/localhost ] && echo OK").strip() == "OK"
    assert ssh(
        f"[ -L /var/db/collectd/rrd/{hostname} ] && echo OK").strip() == "OK"
    assert ssh(f"readlink /var/db/collectd/rrd/{hostname}").strip(
    ) == "/var/db/collectd/rrd/localhost"
Ejemplo n.º 20
0
def test_system_general_ui_checkin():
    try:
        # Apply incorrect changes
        call("system.general.update", {
            "ui_port": 81,
            "rollback_timeout": 20,
            "ui_restart_delay": 3
        })

        # Wait for changes to be automatically applied
        time.sleep(10)

        # Check-in our new settings
        assert ssh("midclt call system.general.checkin")

        # Checking should not be pending anymore
        assert ssh(
            "midclt call system.general.checkin_waiting").strip() == "null"
    finally:
        # Bring things back to normal via SSH
        ssh("midclt call system.general.update '{\"ui_port\": 80}'")
        ssh("midclt call system.general.ui_restart 0")
        time.sleep(10)
Ejemplo n.º 21
0
def test_pool_dataset_unlock_recursive():
    key = "0" * 32
    try:
        ssh(f"echo -n '{key}' > /tmp/key")
        ssh(f"zfs create -o encryption=on -o keyformat=raw -o keylocation=file:///tmp/key {pool}/test")
        ssh(f"zfs create -o encryption=on -o keyformat=raw -o keylocation=file:///tmp/key {pool}/test/nested")
        ssh(f"echo TEST > /mnt/{pool}/test/nested/file")
        ssh("rm /tmp/key")
        ssh(f"zfs set readonly=on {pool}/test")
        ssh(f"zfs set readonly=on {pool}/test/nested")
        ssh(f"zfs unmount {pool}/test")
        ssh(f"zfs unload-key -r {pool}/test")

        result = call("pool.dataset.unlock", f"{pool}/test", {
            "recursive": True,
            "datasets": [
                {
                    "name": f"{pool}/test",
                    "key": key.encode("ascii").hex(),
                    "recursive": True,
                },
            ],
        }, job=True)
        assert not result["failed"]

        assert not call("pool.dataset.get_instance", f"{pool}/test")["locked"]
        assert not call("pool.dataset.get_instance", f"{pool}/test/nested")["locked"]

        # Ensure the child dataset is mounted
        assert ssh(f"cat /mnt/{pool}/test/nested/file") == "TEST\n"

        # Ensure the keys are stored in the database to be able to unlock the datasets after reboot
        assert call("datastore.query", "storage.encrypteddataset", [["name", "=", f"{pool}/test"]])
        assert call("datastore.query", "storage.encrypteddataset", [["name", "=", f"{pool}/test/nested"]])
    finally:
        call("pool.dataset.delete", f"{pool}/test", {"recursive": True})
Ejemplo n.º 22
0
def setup_stage0():
    # Ensures `/var/db/collectd/rrd` is a proper system dataset link
    ssh(f"rm -rf {rrd_mount()}/*")
    ssh("rm -rf /var/db/collectd")
    ssh("mkdir /var/db/collectd")
    ssh(f"ln -s {rrd_mount()} /var/db/collectd/rrd")
Ejemplo n.º 23
0
def assert_serial_port_configuration(ports):
    for port, enabled in ports.items():
        is_enabled = ssh(f'systemctl is-enabled serial-getty@{port}.service', False).strip() == 'enabled'
        assert is_enabled is enabled, f'{port!r} enabled assertion failed: {is_enabled!r} != {enabled!r}'
        is_enabled = ssh(f'systemctl is-active --quiet serial-getty@{port}.service', False, True)['return_code'] == 0
        assert is_enabled is enabled, f'{port!r} active assertion failed: {is_enabled!r} != {enabled!r}'
Ejemplo n.º 24
0
def test_get_disk_info():
    global CONTROL
    CONTROL = {
        i['name']: i
        for i in json.loads(ssh('lsblk -bJ -o NAME,SIZE'))['blockdevices']
    }
Ejemplo n.º 25
0
def test__sets_up_from_scratch():
    ssh(f"rm -rf {rrd_mount()}/*")
    ssh("rm -rf /var/db/collectd")

    assert_reporting_setup()
Ejemplo n.º 26
0
def test__sets_up_removes_invalid_directory():
    setup_stage0()
    ssh("mkdir -p /var/db/collectd/rrd/invalidhostname.invaliddomain/data")

    assert_reporting_setup()
Ejemplo n.º 27
0
def test__sets_up_removes_localhost_symlink():
    setup_stage0()
    ssh("ln -s /mnt /var/db/collectd/rrd/localhost")

    assert_reporting_setup()
Ejemplo n.º 28
0
def test__sets_up_with_already_existing_directory():
    ssh(f"rm -rf {rrd_mount()}/*")
    ssh("rm -rf /var/db/collectd")
    ssh("mkdir -p /var/db/collectd/rrd/some-data")

    assert_reporting_setup()
Ejemplo n.º 29
0
def test__sets_up_removes_invalid_symlink():
    setup_stage0()
    ssh("ln -s /mnt /var/db/collectd/rrd/invalidhostname.invaliddomain")

    assert_reporting_setup()
Ejemplo n.º 30
0
def test_device_get_disks_size():
    boot_disk = call('boot.get_disks')[0]
    fdisk_size = int(ssh(f'fdisk -s /dev/{boot_disk}').strip()) * 1024
    assert call('device.get_disks')[boot_disk]['size'] == fdisk_size