Example #1
0
def test_system_general_ui_rollback():
    try:
        # Apply incorrect changes
        call("system.general.update", {
            "ui_port": 81,
            "rollback_timeout": 20,
            "ui_restart_delay": 3
        })

        # Wait for changes to be automatically applied
        time.sleep(10)

        # Ensure that changes were applied and the UI is now inaccessible
        with pytest.raises(requests.ConnectionError):
            requests.get(url(), timeout=10)

        # Additionally ensure that it is still working
        assert requests.get(url() + ":81", timeout=10).status_code == 200

        # Ensure that the check-in timeout is ticking back
        assert 3 <= int(
            ssh("midclt call system.general.checkin_waiting").strip()) < 10

        # Wait for changes to be automatically rolled back
        time.sleep(10)

        # Ensure that the UI is now accessible
        assert requests.get(url(), timeout=10).status_code == 200
    except Exception:
        # Bring things back to normal via SSH in case of any error
        ssh("midclt call system.general.update '{\"ui_port\": 80}'")
        ssh("midclt call system.general.ui_restart 0")
        time.sleep(10)
        raise
Example #2
0
def test_systemdataset_migrate_error(request):
    depends(request, ["pool_04"], scope="session")
    """
    On HA this test will fail with the error below if failover is enable:
    [ENOTSUP] Disable failover before exporting last pool on system.
    """

    # Disable Failover
    if ha is True:
        results = PUT('/failover/', {"disabled": True, "master": True})
        assert results.status_code == 200, results.text

    pool = call("pool.query", [["name", "=", pool_name]], {"get": True})

    with mock(
            "systemdataset.update", """\
        from middlewared.service import job, CallError

        @job()
        def mock(self, job, *args):
            raise CallError("Test error")
    """):
        with pytest.raises(ClientException) as e:
            call("pool.export", pool["id"], job=True)

        assert e.value.error == (
            "[EFAULT] This pool contains system dataset, but its reconfiguration failed: [EFAULT] Test error"
        )

    # Enable back Failover.
    if ha is True:
        results = PUT('/failover/', {"disabled": False, "master": True})
        assert results.status_code == 200, results.text
Example #3
0
def test_snapshot(request, has_zvol_sibling):
    depends(request, ["pool_04"], scope="session")
    with dataset("test") as ds:
        ssh(f"mkdir -p /mnt/{ds}/dir1/dir2")
        ssh(f"dd if=/dev/urandom of=/mnt/{ds}/dir1/dir2/blob bs=1M count=1")

        if has_zvol_sibling:
            ssh(f"zfs create -V 1gb {pool}/zvol")

        try:
            with local_s3_task({
                "path": f"/mnt/{ds}/dir1/dir2",
                "bwlimit": [{"time": "00:00", "bandwidth": 1024 * 200}],  # So it'll take 5 seconds
                "snapshot": True,
            }) as task:
                job_id = call("cloudsync.sync", task["id"])

                time.sleep(2.5)

                ps_ax = ssh("ps ax | grep rclone")

                call("core.job_wait", job_id, job=True)

                assert re.search(rf"rclone .+ /mnt/{ds}/.zfs/snapshot/cloud_sync-[0-9]+-[0-9]+/dir1/dir2", ps_ax)

            time.sleep(1)

            assert call("zfs.snapshot.query", [["dataset", "=", ds]]) == []
        finally:
            if has_zvol_sibling:
                ssh(f"zfs destroy -r {pool}/zvol")
Example #4
0
def test_delete_retention(request):
    depends(request, ["pool_04"], scope="session")

    with dataset("snapshottask-retention-test-2") as ds:
        call("zettarepl.load_removal_dates")

        result = POST(
            "/pool/snapshottask/", {
                "dataset": ds,
                "recursive": True,
                "exclude": [],
                "lifetime_value": 1,
                "lifetime_unit": "WEEK",
                "naming_schema": "auto-%Y-%m-%d-%H-%M-1y",
                "schedule": {
                    "minute": "*",
                },
            })
        assert result.status_code == 200, result.text
        task_id = result.json()["id"]

        result = POST("/zfs/snapshot/", {
            "dataset": ds,
            "name": "auto-2021-04-12-06-30-1y",
        })
        assert result.status_code == 200, result.text

        result = POST(
            f"/pool/snapshottask/id/{task_id}/delete_will_change_retention_for/"
        )
        assert result.status_code == 200, result.text
        assert result.json() == {
            ds: ["auto-2021-04-12-06-30-1y"],
        }

        result = DELETE(f"/pool/snapshottask/id/{task_id}/", {
            "fixate_removal_date": True,
        })
        assert result.status_code == 200, result.text

        results = GET(
            "/core/get_jobs/?method=pool.snapshottask.fixate_removal_date")
        job_status = wait_on_job(results.json()[-1]["id"], 180)
        assert job_status["state"] == "SUCCESS", str(job_status["results"])

        result = GET(
            f"/zfs/snapshot/?id={ds}@auto-2021-04-12-06-30-1y&extra.retention=true"
        )
        assert result.status_code == 200, result.text
        assert ([
            v for k, v in result.json()[0]["properties"].items()
            if k.startswith("org.truenas:destroy_at_")
        ][0]["value"] == "2021-04-19T06:30:00")
        assert result.json()[0]["retention"] == {
            "datetime": {
                "$date": (datetime(2021, 4, 19, 6, 30) -
                          datetime(1970, 1, 1)).total_seconds() * 1000,
            },
            "source": "property",
        }
Example #5
0
def test_run_onetime__exclude_mountpoint_property(request, exclude_mountpoint_property):
    depends(request, ["pool_04"], scope="session")
    with dataset("src") as src:
        with dataset("src/legacy") as src_legacy:
            ssh(f"zfs set mountpoint=legacy {src_legacy}")
            ssh(f"zfs snapshot -r {src}@2022-01-01-00-00-00")

            try:
                call("replication.run_onetime", {
                    "direction": "PUSH",
                    "transport": "LOCAL",
                    "source_datasets": [src],
                    "target_dataset": f"{pool}/dst",
                    "recursive": True,
                    "also_include_naming_schema": ["%Y-%m-%d-%H-%M-%S"],
                    "retention_policy": "NONE",
                    "replicate": True,
                    "readonly": "IGNORE",
                    "exclude_mountpoint_property": exclude_mountpoint_property
                }, job=True)

                mountpoint = ssh(f"zfs get -H -o value mountpoint {pool}/dst/legacy").strip()
                if exclude_mountpoint_property:
                    assert mountpoint == f"/mnt/{pool}/dst/legacy"
                else:
                    assert mountpoint == "legacy"
            finally:
                ssh(f"zfs destroy -r {pool}/dst", check=False)
Example #6
0
def common_min_max_txg_snapshot_test(test_min_txg=False, test_max_txg=False):
    assert all(i is False for i in (test_min_txg, test_max_txg)) is False

    with dataset('test') as test_dataset:
        created_snaps = []
        total_snaps = 20
        for i in range(total_snaps):
            created_snaps.append(
                int(
                    call('zfs.snapshot.create', {
                        'dataset': test_dataset,
                        'name': f'snap_{i}'
                    })['properties']['createtxg']['value']))

        assert call('zfs.snapshot.query', [['dataset', '=', test_dataset]],
                    {'count': True}) == len(created_snaps)

        for i in range(int(total_snaps / 2) - 1):
            new_list = created_snaps
            extra_args = {}
            if test_min_txg:
                new_list = created_snaps[i:]
                extra_args['min_txg'] = new_list[0]
            if test_max_txg:
                new_list = new_list[:int(len(new_list) / 2)]
                extra_args['max_txg'] = new_list[-1]

            assert call('zfs.snapshot.query', [['dataset', '=', test_dataset]],
                        {
                            'count': True,
                            'extra': extra_args
                        }) == len(new_list)
Example #7
0
def iscsi_extent(data):
    extent = call("iscsi.extent.create", data)

    try:
        yield extent
    finally:
        call("iscsi.extent.delete", extent["id"])
Example #8
0
def test_create_replication(request, credentials, periodic_snapshot_tasks, req,
                            error):
    depends(request, ["pool_04"], scope="session")
    if "ssh_credentials" in req:
        req["ssh_credentials"] = credentials["id"]

    if "periodic_snapshot_tasks" in req:
        req["periodic_snapshot_tasks"] = [
            periodic_snapshot_tasks[k]["id"]
            for k in req["periodic_snapshot_tasks"]
        ]

    name = "".join(random.choice(string.ascii_letters) for _ in range(64))
    result = POST("/replication/", dict(BASE_REPLICATION, name=name, **req))

    if error:
        assert result.status_code == 422, result.text
        assert f"replication_create.{error}" in result.json(), result.text
    else:
        assert result.status_code == 200, result.text

        task_id = result.json()["id"]

        result = POST(f"/replication/id/{task_id}/restore/", {
            "name": f"restore {name}",
            "target_dataset": "data/restore",
        })
        assert result.status_code == 200, result.text

        call("replication.delete", result.json()["id"])

        call("replication.delete", task_id)
Example #9
0
def directory(path):
    call('filesystem.mkdir', path)

    try:
        yield path
    finally:
        ssh(f'rm -rf {path}')
Example #10
0
def test_custom_s3(request, credential_attributes, result):
    depends(request, ["pool_04"], scope="session")
    with dataset("test") as ds:
        with credential({
                "name": "S3",
                "provider": "S3",
                "attributes": {
                    "access_key_id": "test",
                    "secret_access_key": "test",
                    **credential_attributes,
                },
        }) as c:
            with task({
                    "direction": "PUSH",
                    "transfer_mode": "COPY",
                    "path": f"/mnt/{ds}",
                    "credentials": c["id"],
                    "attributes": {
                        "bucket": "bucket",
                        "folder": "",
                    },
            }) as t:
                with mock_rclone() as mr:
                    call("cloudsync.sync", t["id"])

                    time.sleep(2.5)

                    assert mr.result["config"]["remote"]["region"] == "fr-par"
Example #11
0
def test_changing_hostname():
    current_hostname = call('network.configuration.config')['hostname']

    call('network.configuration.update', {'hostname': NEW_HOSTNAME})
    assert ssh('hostname').strip() == NEW_HOSTNAME

    call('network.configuration.update', {'hostname': current_hostname})
    assert ssh('hostname').strip() == current_hostname
Example #12
0
def test_empty_for_locked_root_dataset():
    with another_pool({
            "encryption": True,
            "encryption_options": {
                "passphrase": "passphrase"
            }
    }):
        call("pool.dataset.lock", "test", job=True)
        assert call("pool.dataset.processes", "test") == []
Example #13
0
    def test_pruning_for_deleted_chart_release_images(request):
        depends(request, ['setup_kubernetes'], scope='session')
        with chart_release("prune-test2", {"image": {"repository": "nginx"}}) as chart_release_data:
            container_images = chart_release_data["resources"]["container_images"]
            before_deletion_images = get_num_of_images(container_images)
            assert before_deletion_images != 0

        call("container.prune", {"remove_unused_images": True})
        assert get_num_of_images(container_images) == 0
Example #14
0
def test__disable_proactive_support_for_valid_alert_class(request):
    depends(request, ["pool_04"], scope="session")
    call("alertclasses.update", {
        "classes": {
            "ZpoolCapacityNotice": {
                "proactive_support": False,
            },
        },
    })
Example #15
0
def test_disabling_serial_port():
    ports = call('system.advanced.serial_port_choices')
    assert 'ttyS0' in ports, ports

    for port in ports:
        test_config = {'serialconsole': False, 'serialport': port}
        config = call('system.advanced.update', test_config)
        for k, v in test_config.items():
            assert config[k] == v, config
        assert_serial_port_configuration({p: False for p in ports})
Example #16
0
def test_s3_attachment_delegate__works_for_poor_s3_configuration(request):
    depends(request, ["pool_04"], scope="session")
    with dataset("test") as test_dataset:
        old_path = "/mnt/unavailable-pool/s3"
        ssh(f"mkdir -p {old_path}")
        try:
            call("datastore.update", "services.s3", 1, {"s3_disks": old_path})
            assert call("pool.dataset.attachments", test_dataset) == []
        finally:
            ssh(f"rm -rf {old_path}")
Example #17
0
def test__nonexisting_alert_class():
    with pytest.raises(ValidationErrors) as ve:
        call("alertclasses.update", {
            "classes": {
                "Invalid": {
                    "level": "WARNING",
                },
            },
        })

    assert ve.value.errors[0].attribute == "alert_class_update.classes.Invalid"
Example #18
0
def test_creating_root_ca():
    root_ca = call(
        'certificateauthority.create', {
            **get_cert_params(),
            'name': 'test_root_ca',
            'create_type': 'CA_CREATE_INTERNAL',
        })
    try:
        assert root_ca['CA_type_internal'] is True, root_ca
    finally:
        call('certificateauthority.delete', root_ca['id'])
Example #19
0
def credential(data):
    data = {
        "name": "Test",
        **data,
    }

    credential = call("cloudsync.credentials.create", data)

    try:
        yield credential
    finally:
        call("cloudsync.credentials.delete", credential["id"])
Example #20
0
def test_errors(request, id, quota_type, error):
    depends(request, ["pool_04"], scope="session")
    with dataset("test") as ds:
        with pytest.raises(ValidationErrors) as ve:
            call("pool.dataset.set_quota", ds, [{
                "quota_type": quota_type,
                "id": id,
                "quota_value": 5242880
            }])

        assert ve.value.errors[
            0].errmsg == f"Setting {error} [0] is not permitted."
Example #21
0
def test_importing_ca():
    with root_certificate_authority('root_ca_test') as root_ca:
        imported_ca = call(
            'certificateauthority.create', {
                'certificate': root_ca['certificate'],
                'privatekey': root_ca['privatekey'],
                'name': 'test_imported_ca',
                'create_type': 'CA_CREATE_IMPORTED',
            })
        try:
            assert imported_ca['CA_type_existing'] is True, imported_ca
        finally:
            call('certificateauthority.delete', imported_ca['id'])
Example #22
0
def write_to_log(string):
    assert string not in read_log()

    with mock(
            "test.test1", f"""
        from middlewared.service import lock

        async def mock(self, *args):
            self.logger.debug({string!r})
    """):
        call("test.test1")

    assert string in read_log()
Example #23
0
def test_creating_cert_from_root_ca():
    with root_certificate_authority('root_ca_test') as root_ca:
        cert = call('certificate.create', {
            'name': 'cert_test',
            'signedby': root_ca['id'],
            'create_type': 'CERTIFICATE_CREATE_INTERNAL',
            **get_cert_params(),
        },
                    job=True)
        try:
            assert cert['cert_type_internal'] is True, cert
        finally:
            call('certificate.delete', cert['id'], job=True)
Example #24
0
def test_ca_imported_issuer_reported_correctly():
    with root_certificate_authority('root_ca_test') as root_ca:
        imported_ca = call(
            'certificateauthority.create', {
                'certificate': root_ca['certificate'],
                'privatekey': root_ca['privatekey'],
                'name': 'test_imported_ca',
                'create_type': 'CA_CREATE_IMPORTED',
            })
        try:
            assert imported_ca['issuer'] == 'external', imported_ca
        finally:
            call('certificateauthority.delete', imported_ca['id'])
Example #25
0
def test__normal_alert_class():
    value = {
        "classes": {
            "UPSBatteryLow": {
                "level": "CRITICAL",
                "policy": "IMMEDIATELY",
            },
        },
    }

    call("alertclasses.update", value)

    assert call("alertclasses.config") == {"id": ANY, **value}
Example #26
0
def test__disable_proactive_support_for_invalid_alert_class(request):
    depends(request, ["pool_04"], scope="session")
    with pytest.raises(ValidationErrors) as ve:
        call("alertclasses.update", {
            "classes": {
                "UPSBatteryLow": {
                    "proactive_support": False,
                },
            },
        })

    assert ve.value.errors[
        0].attribute == "alert_class_update.classes.UPSBatteryLow.proactive_support"
Example #27
0
def fixture1():
    with another_pool():
        with dataset("test"):
            with dataset("test/test1"):
                with dataset("test/test2"):
                    with dataset("test", pool="test"):
                        with dataset("test/test1", pool="test"):
                            with dataset("test/test2", pool="test"):
                                call(
                                    "zfs.snapshot.create",
                                    {"dataset": f"{pool}/test", "name": "snap-1", "recursive": True},
                                )
                                call(
                                    "zfs.snapshot.create",
                                    {"dataset": f"{pool}/test", "name": "snap-2", "recursive": True},
                                )
                                call(
                                    "zfs.snapshot.create",
                                    {"dataset": "test/test", "name": "snap-1", "recursive": True},
                                )
                                call(
                                    "zfs.snapshot.create",
                                    {"dataset": "test/test", "name": "snap-2", "recursive": True},
                                )
                                yield
Example #28
0
def test_ca_intermediate_issuer_reported_correctly():
    with root_certificate_authority('root_ca_test') as root_ca:
        intermediate_ca = call(
            'certificateauthority.create', {
                **get_cert_params(),
                'signedby': root_ca['id'],
                'name': 'test_intermediate_ca',
                'create_type': 'CA_CREATE_INTERMEDIATE',
            })
        root_ca = call('certificateauthority.get_instance', root_ca['id'])
        try:
            assert intermediate_ca['issuer'] == root_ca, intermediate_ca
        finally:
            call('certificateauthority.delete', intermediate_ca['id'])
Example #29
0
def test_creating_intermediate_ca():
    with root_certificate_authority('root_ca_test') as root_ca:
        intermediate_ca = call(
            'certificateauthority.create', {
                **get_cert_params(),
                'signedby': root_ca['id'],
                'name': 'test_intermediate_ca',
                'create_type': 'CA_CREATE_INTERMEDIATE',
            })
        try:
            assert intermediate_ca[
                'CA_type_intermediate'] is True, intermediate_ca
        finally:
            call('certificateauthority.delete', intermediate_ca['id'])
Example #30
0
def dataset(name, data=None, pool=pool):
    data = data or {}

    dataset = f"{pool}/{name}"

    call("pool.dataset.create", {"name": dataset, **data})

    try:
        yield dataset
    finally:
        try:
            call("pool.dataset.delete", dataset, {"recursive": True})
        except InstanceNotFound:
            pass