Example #1
0
def test_52_check_adjusting_threadpool_mode(request):
    """
    Verify that NFS thread pool configuration can be adjusted
    through private API endpoints.

    This request will fail if NFS server is still running.
    """
    supported_modes = ["AUTO", "PERCPU", "PERNODE", "GLOBAL"]
    payload = {'msg': 'method', 'method': None, 'params': []}

    for m in supported_modes:
        payload.update({'method': 'nfs.set_threadpool_mode', 'params': [m]})
        make_ws_request(ip, payload)

        payload.update({'method': 'nfs.get_threadpool_mode', 'params': []})
        res = make_ws_request(ip, payload)
        assert res['result'] == m, res
Example #2
0
 def test_01_verify_fenced_is_running(request):
     payload = {
         'msg': 'method',
         'method': 'failover.fenced.run_info',
         'params': []
     }
     res = make_ws_request(IP, payload)
     assert res['result']['running'], res
Example #3
0
def test_45_check_setting_runtime_debug(request):
    """
    This validates that the private NFS debugging API works correctly.
    """
    depends(request, ["pool_04"], scope="session")
    disabled = {
        "NFS": ["NONE"],
        "NFSD": ["NONE"],
        "NLM": ["NONE"],
        "RPC": ["NONE"]
    }

    get_payload = {'msg': 'method', 'method': 'nfs.get_debug', 'params': []}
    set_payload = {
        'msg': 'method',
        'method': 'nfs.set_debug',
        'params': [["NFSD"], ["ALL"]]
    }
    res = make_ws_request(ip, get_payload)
    assert res['result'] == disabled, res

    make_ws_request(ip, set_payload)
    res = make_ws_request(ip, get_payload)
    assert res['result']['NFSD'] == ["ALL"], res

    set_payload['params'][1] = ["NONE"]
    make_ws_request(ip, set_payload)
    res = make_ws_request(ip, get_payload)
    assert res['result'] == disabled, res
Example #4
0
def test_05_get_pool_disks(request, pool_data):
    depends(request, ["get_pool_id"])
    payload = {
        'msg': 'method',
        'method': 'pool.get_disks',
        'params': [pool_data['id']]
    }
    res = make_ws_request(ip, payload)
    assert isinstance(res['result'], list), res
    assert res['result'] and (set(res['result']) == set(tank_pool_disks)), res
Example #5
0
def test_08_test_pool_property_normalization(request):
    """
    middleware attempts to normalize certain ZFS dataset properties so that
    importing a foreign pool doesn't break our configuration. Currently we
    do this by resetting the mountpoint of datasets, and disabling sharenfs
    property. This test simultates such a situation by creating a test pool
    setting parameters that must be migrated, then exporting the pool and
    re-importing it. Once this is complete, we check whether properties
    have been set to their correct new values.
    """
    depends(request, ["pool_04"])
    global tp
    with another_pool() as tp:
        payload = {
            'msg':
            'method',
            'method':
            'zfs.dataset.update',
            'params': [
                tp['name'],
                {
                    'properties': {
                        'sharenfs': {
                            'value': 'on'
                        },
                    }
                },
            ]
        }
        res = make_ws_request(ip, payload)
        error = res.get('error')
        assert error is not None, str(error)
        assert 'NFS share creation failed' in error['reason'], str(
            error['reason'])

        result = POST("/pool/dataset/", {"name": f"{tp['name']}/ds1"})
        assert result.status_code == 200, result.text

        payload['params'][0] += "/ds1"
        payload['params'][1]['properties'].update({
            'mountpoint': {
                'value': 'legacy'
            },
        })
        res = make_ws_request(ip, payload)
        error = res.get('error')
        assert error is None, str(error)

    res = GET("/pool/import_find")
    assert res.status_code == 200, res.text
    job_id = res.json()
    job_status = wait_on_job(job_id, 180)
    assert job_status['state'] == 'SUCCESS', str(job_status['results'])

    available = job_status['results']['result']
    assert len(available) == 1, res.text
    assert available[0]['name'] == tp['name'], res.text

    res = POST('/pool/import_pool', {'guid': available[0]['guid']})
    assert res.status_code == 200, res.text
    job_id = res.json()
    job_status = wait_on_job(job_id, 180)
    assert job_status['state'] == 'SUCCESS', str(job_status['results'])

    for ds in (f'{tp["name"]}/ds1', tp['name']):
        payload = {
            'msg':
            'method',
            'method':
            'zfs.dataset.query',
            'params': [[['id', '=', ds]], {
                'get': True,
                'extra': {
                    'retrieve_children': False
                }
            }]
        }
        req = make_ws_request(ip, payload)
        error = req.get('error')
        ds = req.get('result')

        assert error is None, str(error)
        assert ds['properties']['mountpoint']['value'] != 'legacy', str(
            ds['properties'])
        assert ds['properties']['sharenfs']['value'] == 'off', str(
            ds['properties'])