def test_run_onetime__exclude_mountpoint_property(request, exclude_mountpoint_property): depends(request, ["pool_04"], scope="session") with dataset("src") as src: with dataset("src/legacy") as src_legacy: ssh(f"zfs set mountpoint=legacy {src_legacy}") ssh(f"zfs snapshot -r {src}@2022-01-01-00-00-00") try: call("replication.run_onetime", { "direction": "PUSH", "transport": "LOCAL", "source_datasets": [src], "target_dataset": f"{pool}/dst", "recursive": True, "also_include_naming_schema": ["%Y-%m-%d-%H-%M-%S"], "retention_policy": "NONE", "replicate": True, "readonly": "IGNORE", "exclude_mountpoint_property": exclude_mountpoint_property }, job=True) mountpoint = ssh(f"zfs get -H -o value mountpoint {pool}/dst/legacy").strip() if exclude_mountpoint_property: assert mountpoint == f"/mnt/{pool}/dst/legacy" else: assert mountpoint == "legacy" finally: ssh(f"zfs destroy -r {pool}/dst", check=False)
def common_min_max_txg_snapshot_test(test_min_txg=False, test_max_txg=False): assert all(i is False for i in (test_min_txg, test_max_txg)) is False with dataset('test') as test_dataset: created_snaps = [] total_snaps = 20 for i in range(total_snaps): created_snaps.append( int( call('zfs.snapshot.create', { 'dataset': test_dataset, 'name': f'snap_{i}' })['properties']['createtxg']['value'])) assert call('zfs.snapshot.query', [['dataset', '=', test_dataset]], {'count': True}) == len(created_snaps) for i in range(int(total_snaps / 2) - 1): new_list = created_snaps extra_args = {} if test_min_txg: new_list = created_snaps[i:] extra_args['min_txg'] = new_list[0] if test_max_txg: new_list = new_list[:int(len(new_list) / 2)] extra_args['max_txg'] = new_list[-1] assert call('zfs.snapshot.query', [['dataset', '=', test_dataset]], { 'count': True, 'extra': extra_args }) == len(new_list)
def test_snapshot(request, has_zvol_sibling): depends(request, ["pool_04"], scope="session") with dataset("test") as ds: ssh(f"mkdir -p /mnt/{ds}/dir1/dir2") ssh(f"dd if=/dev/urandom of=/mnt/{ds}/dir1/dir2/blob bs=1M count=1") if has_zvol_sibling: ssh(f"zfs create -V 1gb {pool}/zvol") try: with local_s3_task({ "path": f"/mnt/{ds}/dir1/dir2", "bwlimit": [{"time": "00:00", "bandwidth": 1024 * 200}], # So it'll take 5 seconds "snapshot": True, }) as task: job_id = call("cloudsync.sync", task["id"]) time.sleep(2.5) ps_ax = ssh("ps ax | grep rclone") call("core.job_wait", job_id, job=True) assert re.search(rf"rclone .+ /mnt/{ds}/.zfs/snapshot/cloud_sync-[0-9]+-[0-9]+/dir1/dir2", ps_ax) time.sleep(1) assert call("zfs.snapshot.query", [["dataset", "=", ds]]) == [] finally: if has_zvol_sibling: ssh(f"zfs destroy -r {pool}/zvol")
def test__iscsi_extent__disk_choices(request): depends(request, ["pool_04"], scope="session") with dataset("test zvol", { "type": "VOLUME", "volsize": 1024000, }) as ds: # Make snapshots available for devices call("zfs.dataset.update", ds, {"properties": { "snapdev": { "parsed": "visible" } }}) call("zfs.snapshot.create", {"dataset": ds, "name": "snap-1"}) assert call("iscsi.extent.disk_choices") == { f'zvol/{ds.replace(" ", "+")}': f'{ds} (1000 KiB)', f'zvol/{ds.replace(" ", "+")}@snap-1': f'{ds}@snap-1 [ro]', } # Create new extent with iscsi_extent({ "name": "test_extent", "type": "DISK", "disk": f"zvol/{ds.replace(' ', '+')}", }): # Verify that zvol is not available in iscsi disk choices assert call("iscsi.extent.disk_choices") == { f'zvol/{ds.replace(" ", "+")}@snap-1': f'{ds}@snap-1 [ro]', } # Verify that zvol is not availabe in VM disk choices assert call("vm.device.disk_choices") == { f'/dev/zvol/{ds.replace(" ", "+")}@snap-1': f'{ds}@snap-1' }
def test_delete_retention(request): depends(request, ["pool_04"], scope="session") with dataset("snapshottask-retention-test-2") as ds: call("zettarepl.load_removal_dates") result = POST( "/pool/snapshottask/", { "dataset": ds, "recursive": True, "exclude": [], "lifetime_value": 1, "lifetime_unit": "WEEK", "naming_schema": "auto-%Y-%m-%d-%H-%M-1y", "schedule": { "minute": "*", }, }) assert result.status_code == 200, result.text task_id = result.json()["id"] result = POST("/zfs/snapshot/", { "dataset": ds, "name": "auto-2021-04-12-06-30-1y", }) assert result.status_code == 200, result.text result = POST( f"/pool/snapshottask/id/{task_id}/delete_will_change_retention_for/" ) assert result.status_code == 200, result.text assert result.json() == { ds: ["auto-2021-04-12-06-30-1y"], } result = DELETE(f"/pool/snapshottask/id/{task_id}/", { "fixate_removal_date": True, }) assert result.status_code == 200, result.text results = GET( "/core/get_jobs/?method=pool.snapshottask.fixate_removal_date") job_status = wait_on_job(results.json()[-1]["id"], 180) assert job_status["state"] == "SUCCESS", str(job_status["results"]) result = GET( f"/zfs/snapshot/?id={ds}@auto-2021-04-12-06-30-1y&extra.retention=true" ) assert result.status_code == 200, result.text assert ([ v for k, v in result.json()[0]["properties"].items() if k.startswith("org.truenas:destroy_at_") ][0]["value"] == "2021-04-19T06:30:00") assert result.json()[0]["retention"] == { "datetime": { "$date": (datetime(2021, 4, 19, 6, 30) - datetime(1970, 1, 1)).total_seconds() * 1000, }, "source": "property", }
def test_lock_sets_immutable_flag(): with dataset('parent', encryption_props()) as parent_ds: with dataset('parent/child', encryption_props()) as child_ds: child_ds_mountpoint = os.path.join('/mnt', child_ds) assert call('filesystem.is_immutable', child_ds_mountpoint) is False, child_ds_mountpoint call('pool.dataset.lock', child_ds, job=True) assert call('filesystem.is_immutable', child_ds_mountpoint) is True, child_ds_mountpoint parent_mountpoint = os.path.join('/mnt', parent_ds) assert call('filesystem.is_immutable', parent_mountpoint) is False, parent_mountpoint call('pool.dataset.lock', parent_ds, job=True) assert call('filesystem.is_immutable', parent_mountpoint) is True, parent_mountpoint
def test_custom_s3(request, credential_attributes, result): depends(request, ["pool_04"], scope="session") with dataset("test") as ds: with credential({ "name": "S3", "provider": "S3", "attributes": { "access_key_id": "test", "secret_access_key": "test", **credential_attributes, }, }) as c: with task({ "direction": "PUSH", "transfer_mode": "COPY", "path": f"/mnt/{ds}", "credentials": c["id"], "attributes": { "bucket": "bucket", "folder": "", }, }) as t: with mock_rclone() as mr: call("cloudsync.sync", t["id"]) time.sleep(2.5) assert mr.result["config"]["remote"]["region"] == "fr-par"
def test_s3_attachment_delegate__works_for_poor_s3_configuration(request): depends(request, ["pool_04"], scope="session") with dataset("test") as test_dataset: old_path = "/mnt/unavailable-pool/s3" ssh(f"mkdir -p {old_path}") try: call("datastore.update", "services.s3", 1, {"s3_disks": old_path}) assert call("pool.dataset.attachments", test_dataset) == [] finally: ssh(f"rm -rf {old_path}")
def test_errors(request, id, quota_type, error): depends(request, ["pool_04"], scope="session") with dataset("test") as ds: with pytest.raises(ValidationErrors) as ve: call("pool.dataset.set_quota", ds, [{ "quota_type": quota_type, "id": id, "quota_value": 5242880 }]) assert ve.value.errors[ 0].errmsg == f"Setting {error} [0] is not permitted."
def anonymous_ftp_server(config=None): config = config or {} with dataset("anonftp") as ds: path = f"/mnt/{ds}" ssh(f"chmod 777 {path}") with ftp_server({ "onlyanonymous": True, "anonpath": path, **config, }): yield SimpleNamespace(dataset=ds, username="******", password="")
def fixture1(): with another_pool(): with dataset("test"): with dataset("test/test1"): with dataset("test/test2"): with dataset("test", pool="test"): with dataset("test/test1", pool="test"): with dataset("test/test2", pool="test"): call( "zfs.snapshot.create", {"dataset": f"{pool}/test", "name": "snap-1", "recursive": True}, ) call( "zfs.snapshot.create", {"dataset": f"{pool}/test", "name": "snap-2", "recursive": True}, ) call( "zfs.snapshot.create", {"dataset": "test/test", "name": "snap-1", "recursive": True}, ) call( "zfs.snapshot.create", {"dataset": "test/test", "name": "snap-2", "recursive": True}, ) yield
def test_unlock_unsets_immutable_flag(): with dataset('parent', encryption_props()) as parent_ds: parent_mountpoint = os.path.join('/mnt', parent_ds) with dataset('parent/child', encryption_props()) as child_ds: child_ds_mountpoint = os.path.join('/mnt', child_ds) call('pool.dataset.lock', parent_ds, job=True) assert call('filesystem.is_immutable', parent_mountpoint) is True, parent_mountpoint call('pool.dataset.unlock', parent_ds, { 'datasets': [{ 'name': parent_ds, 'passphrase': PASSPHRASE }, { 'name': child_ds, 'passphrase': 'random' }], 'recursive': True, }, job=True) assert call('filesystem.is_immutable', parent_mountpoint) is False, parent_mountpoint assert call('filesystem.is_immutable', child_ds_mountpoint) is True, child_ds_mountpoint call('pool.dataset.unlock', child_ds, { 'datasets': [{ 'name': child_ds, 'passphrase': PASSPHRASE }], }, job=True) assert call('filesystem.is_immutable', child_ds_mountpoint) is False, child_ds_mountpoint
def test_sync_onetime(request): depends(request, ["pool_04"], scope="session") with dataset("cloudsync_local") as local_dataset: with local_s3_credential() as c: call("cloudsync.sync_onetime", { "direction": "PUSH", "transfer_mode": "COPY", "path": f"/mnt/{local_dataset}", "credentials": c["id"], "attributes": { "bucket": "bucket", "folder": "", }, "snapshot": True, }, job=True)
def local_s3_credential(credential_params=None): credential_params = credential_params or {} with dataset("cloudsync_remote") as remote_dataset: with s3_server(remote_dataset) as s3: with credential({ "provider": "S3", "attributes": { "access_key_id": s3.access_key, "secret_access_key": s3.secret_key, "endpoint": "http://localhost:9000", "skip_region": True, **credential_params, }, }) as c: yield c
def test_s3_attachment_delegate__works(request): depends(request, ["pool_04"], scope="session") with dataset("test") as test_dataset: ssh(f"mkdir /mnt/{test_dataset}/s3_root") with s3_server(f"{test_dataset}/s3_root"): assert call("pool.dataset.attachments", test_dataset) == [{ "type": "S3", "service": "s3", "attachments": [test_dataset] }] call("pool.dataset.delete", test_dataset) assert not call("service.started", "s3")
def test__iscsi_extent__create_with_invalid_disk_with_whitespace(request): depends(request, ["pool_04"], scope="session") with dataset("test zvol", { "type": "VOLUME", "volsize": 1024000, }) as ds: with pytest.raises(ValidationErrors) as e: with iscsi_extent({ "name": "test_extent", "type": "DISK", "disk": f"zvol/{ds}", }): pass assert str(e.value) == ( f"[EINVAL] iscsi_extent_create.disk: Device '/dev/zvol/{ds}' for volume '{ds}' does not exist\n" )
def local_s3_task(params=None, credential_params=None): params = params or {} credential_params = credential_params or {} with dataset("cloudsync_local") as local_dataset: with local_s3_credential(credential_params) as c: with task({ "direction": "PUSH", "transfer_mode": "COPY", "path": f"/mnt/{local_dataset}", "credentials": c["id"], "attributes": { "bucket": "bucket", "folder": "", }, **params, }) as t: yield t
def test_snapshot_count_alert(request): depends(request, ["pool_04"], scope="session") with dataset("snapshot_count") as ds: with mock("pool.snapshottask.max_count", return_value=10): for i in range(10): call("zfs.snapshot.create", { "dataset": ds, "name": f"snap-{i}" }) assert call("alert.run_source", "SnapshotCount") == [] call("zfs.snapshot.create", {"dataset": ds, "name": "snap-10"}) alert = call("alert.run_source", "SnapshotCount")[0] assert alert["text"] % alert["args"] == ( "Dataset tank/snapshot_count has more snapshots (11) than recommended (10). Performance or " "functionality might degrade.")
def test_snapshot_total_count_alert(request): depends(request, ["pool_04"], scope="session") with dataset("snapshot_count") as ds: base = call("zfs.snapshot.query", [], {"count": True}) with mock("pool.snapshottask.max_total_count", return_value=base + 10): for i in range(10): call("zfs.snapshot.create", { "dataset": ds, "name": f"snap-{i}" }) assert call("alert.run_source", "SnapshotCount") == [] call("zfs.snapshot.create", {"dataset": ds, "name": "snap-10"}) alert = call("alert.run_source", "SnapshotCount")[0] assert alert["text"] % alert["args"] == ( f"Your system has more snapshots ({base + 11}) than recommended ({base + 10}). Performance or " "functionality might degrade.")
def ftp_server_with_user_account(config=None): config = config or {} with dataset("ftptest") as ds: with group({ "name": "ftp" }) as g: with user({ "username": "******", "group_create": True, "home": f"/mnt/{ds}", "full_name": "FTP Test", "password": "******", "groups": [g], }): with ftp_server({ "onlylocal": True, **config, }): yield SimpleNamespace(dataset=ds, username="******", password="******")
def test_abort(request): depends(request, ["pool_04"], scope="session") with dataset("test") as ds: ssh(f"dd if=/dev/urandom of=/mnt/{ds}/blob bs=1M count=1") with local_s3_task({ "path": f"/mnt/{ds}", "bwlimit": [{"time": "00:00", "bandwidth": 1024 * 100}], # So it'll take 10 seconds "snapshot": True, }) as task: job_id = call("cloudsync.sync", task["id"]) time.sleep(2.5) call("core.job_abort", job_id) time.sleep(1) assert "rclone" not in ssh("ps ax") assert call("cloudsync.query", [["id", "=", task["id"]]], {"get": True})["job"]["state"] == "ABORTED"
def test_ftp_subfolder(request, anonymous, defaultroot, has_leading_slash): depends(request, ["pool_04"], scope="session") with dataset("cloudsync_local") as local_dataset: config = {"defaultroot": defaultroot} with (anonymous_ftp_server if anonymous else ftp_server_with_user_account)(config) as ftp: remote_dataset = ftp.dataset ssh(f"touch /mnt/{remote_dataset}/bad-file") ssh(f"mkdir /mnt/{remote_dataset}/data") ssh(f"touch /mnt/{remote_dataset}/data/another-bad-file") ssh(f"mkdir /mnt/{remote_dataset}/data/child") ssh(f"touch /mnt/{remote_dataset}/data/child/good-file") with credential({ "name": "Test", "provider": "FTP", "attributes": { "host": "localhost", "port": 21, "user": ftp.username, "pass": ftp.password, }, }) as c: folder = f"{'/' if has_leading_slash else ''}data/child" if not anonymous and not defaultroot: # We have access to the FTP server root directory if has_leading_slash: # A path with a leading slash should be complete path in this case folder = f"/mnt/{ftp.dataset}/data/child" with task({ "direction": "PULL", "transfer_mode": "MOVE", "path": f"/mnt/{local_dataset}", "credentials": c["id"], "attributes": { "folder": folder, }, }) as t: run_task(t) assert ssh(f'ls /mnt/{local_dataset}') == 'good-file\n'
def test__iscsi_extent__locked(request): depends(request, ["pool_04"], scope="session") with dataset( "test zvol", { "type": "VOLUME", "volsize": 1024000, "inherit_encryption": False, "encryption": True, "encryption_options": { "passphrase": "testtest" }, }) as ds: with iscsi_extent({ "name": "test_extent", "type": "DISK", "disk": f"zvol/{ds.replace(' ', '+')}", }) as extent: assert not extent["locked"] call("pool.dataset.lock", ds, job=True) extent = call("iscsi.extent.get_instance", extent["id"]) assert extent["locked"]