コード例 #1
0
def test_026_share_param_hosts(to_check, request):
    depends(request, ['CLUSTER_SMB_SHARE_CREATED'])

    rando_ips = ["192.168.0.240", "192.168.0.69"]
    payload = {to_check: rando_ips}
    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/sharing/smb/id/{SMB_SHARE_ID}'
    res = make_request('put', url, data=payload)
    assert res.status_code == 200, res.text

    for ip in CLUSTER_IPS:
        payload = {
            'msg': 'method',
            'method': 'smb.getparm',
            'params': [f'hosts {to_check[5:]}', "CL_SMB"]
        }
        res = make_ws_request(ip, payload)
        assert res.get('error') is None, res

        assert res['result'] == rando_ips

    payload = {to_check: []}
    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/sharing/smb/id/{SMB_SHARE_ID}'
    res = make_request('put', url, data=payload)
    assert res.status_code == 200, res.text

    for ip in CLUSTER_IPS:
        payload = {
            'msg': 'method',
            'method': 'smb.getparm',
            'params': [f'hosts {to_check[5:]}', "CL_SMB"]
        }
        res = make_ws_request(ip, payload)
        assert res.get('error') is None, res

        assert res['result'] == []
コード例 #2
0
def test_006_validate_kerberos_settings(ip, request):
    depends(request, ['JOINED_AD'])

    payload = {
        'query-filters': [["realm", "=", CLUSTER_ADS['DOMAIN'].upper()]],
        'query-options': {'get': True},
    }
    url = f'http://{ip}/api/v2.0/kerberos/realm'
    res = make_request('get', url, data=payload)
    assert res.status_code == 200, res.text

    payload = {
        'query-filters': [["name", "=", 'AD_MACHINE_ACCOUNT']],
        'query-options': {'get': True},
    }
    url = f'http://{ip}/api/v2.0/kerberos/keytab'
    res = make_request('get', url, data=payload)
    assert res.status_code == 200, res.text

    # check that kinit succeeded
    payload = {
        'msg': 'method',
        'method': 'kerberos.check_ticket',
    }
    res = make_ws_request(ip, payload)
    assert res.get('error') is None, res

    # check that keytab was generated
    payload = {
        'msg': 'method',
        'method': 'kerberos.keytab.kerberos_principal_choices',
    }
    res = make_ws_request(ip, payload)
    assert res.get('error') is None, res
    assert len(res['result']) != 0, res
コード例 #3
0
def test_024_share_comment(request):
    depends(request, ['CLUSTER_SMB_SHARE_CREATED'])
    payload = {"comment": "test comment"}
    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/sharing/smb/id/{SMB_SHARE_ID}'
    res = make_request('put', url, data=payload)
    assert res.status_code == 200, res.text

    for ip in CLUSTER_IPS:
        payload = {
            'msg': 'method',
            'method': 'smb.getparm',
            'params': ["comment", "CL_SMB"]
        }
        res = make_ws_request(ip, payload)
        assert res.get('error') is None, res
        assert res['result'] == 'test comment'

    payload = {"comment": ""}
    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/sharing/smb/id/{SMB_SHARE_ID}'
    res = make_request('put', url, data=payload)
    assert res.status_code == 200, res.text

    for ip in CLUSTER_IPS:
        payload = {
            'msg': 'method',
            'method': 'smb.getparm',
            'params': ["comment", "CL_SMB"]
        }
        res = make_ws_request(ip, payload)
        assert res.get('error') is None, res
        assert res['result'] == ''
コード例 #4
0
ファイル: test_002_ldap.py プロジェクト: yaplej/freenas
def test_003_bind_ldap(request):
    depends(request, ['DS_LDAP_NETWORK_CONFIGURED'])

    payload = {
        "hostname": [CLUSTER_LDAP['HOSTNAME']],
        "basedn": CLUSTER_LDAP['BASEDN'],
        "binddn": CLUSTER_LDAP['BINDDN'],
        "bindpw": CLUSTER_LDAP['BINDPW'],
        "ssl": "ON",
        "enable": True
    }
    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/ldap/'
    res = make_request('put', url, data=payload)
    assert res.status_code == 200, res.text

    try:
        status = wait_on_job(res.json()['job_id'], CLUSTER_IPS[0], 300)
    except JobTimeOut:
        assert False, JobTimeOut
    else:
        assert status['state'] == 'SUCCESS', status

    for ip in CLUSTER_IPS:
        payload = {
            'msg': 'method',
            'method': 'ldap.started',
        }
        res = make_ws_request(ip, payload)
        assert res.get('error') is None, res

        url = f'http://{ip}/api/v2.0/ldap/get_state'
        res = make_request('get', url)
        assert res.status_code == 200, f'ip: {ip}, res: {res.text}'
        assert res.json() == 'HEALTHY'
コード例 #5
0
ファイル: test_002_ldap.py プロジェクト: yaplej/freenas
def test_050_unbind_ldap(request):
    depends(request, ['BOUND_LDAP'])

    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/ldap'
    payload = {
        "has_samba_schema": False,
        "enable": False,
    }
    res = make_request('put', url, data=payload)
    assert res.status_code == 200, res.text

    try:
        status = wait_on_job(res.json()['job_id'], CLUSTER_IPS[0], 300)
    except JobTimeOut:
        assert False, JobTimeOut
    else:
        assert status['state'] == 'SUCCESS', status

    for ip in CLUSTER_IPS:
        url = f'http://{ip}/api/v2.0/ldap/get_state'
        res = make_request('get', url)
        assert res.status_code == 200, f'ip: {ip}, res: {res.text}'
        assert res.json() == 'DISABLED'

        payload = {
            'msg': 'method',
            'method': 'ldap.started',
        }
        res = make_ws_request(ip, payload)
        assert res.get('error') is None, res
コード例 #6
0
def test_023_check_streams_set(ip, request):
    depends(request, ['CLUSTER_SMB_SHARE_CREATED'])

    payload = {
        'msg': 'method',
        'method': 'smb.getparm',
        'params': ["vfs objects", "CL_SMB"]
    }
    res = make_ws_request(ip, payload)
    assert res.get('error') is None, res
    assert 'streams_xattr' in res['result']
コード例 #7
0
def test_007_validate_dns_records_added(request):
    depends(request, ['JOINED_AD'])

    payload = {
        'msg': 'method',
        'method': 'dnsclient.forward_lookup',
        'params': [{"names": [f'truenas.{CLUSTER_ADS["DOMAIN"]}']}],
    }
    res = make_ws_request(CLUSTER_IPS[0], payload)
    assert res.get('error') is None, res
    answers = set([x['address'] for x in res['result']])
    assert set(PUBLIC_IPS) == answers
コード例 #8
0
def test_025_share_param_check_bool(to_check, request):
    depends(request, ['CLUSTER_SMB_SHARE_CREATED'])

    default_val = BOOL_SMB_PARAMS[to_check]['default']
    smbconf_param = BOOL_SMB_PARAMS[to_check]['smbconf']
    payload = {to_check: not default_val}
    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/sharing/smb/id/{SMB_SHARE_ID}'

    res = make_request('put', url, data=payload)
    assert res.status_code == 200, res.text

    for ip in CLUSTER_IPS:
        payload = {
            'msg': 'method',
            'method': 'smb.getparm',
            'params': [smbconf_param, "CL_SMB"]
        }
        res = make_ws_request(ip, payload)
        assert res.get('error') is None, res

        val = get_bool(res['result'])
        assert val is not default_val, f'IP: {ip}, param: {smbconf_param}, value: {res["result"]}'

    payload = {to_check: default_val}
    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/sharing/smb/id/{SMB_SHARE_ID}'
    res = make_request('put', url, data=payload)
    assert res.status_code == 200, res.text

    for ip in CLUSTER_IPS:
        payload = {
            'msg': 'method',
            'method': 'smb.getparm',
            'params': [smbconf_param, "CL_SMB"]
        }
        res = make_ws_request(ip, payload)
        assert res.get('error') is None, res

        val = get_bool(res['result'])
        assert val is default_val, f'IP: {ip}, param: {smbconf_param}, value: {res["result"]}'
コード例 #9
0
def test_018_check_timemachine_unset(ip, request):
    depends(request, ['CLUSTER_SMB_SHARE_CREATED'])

    payload = {
        'msg': 'method',
        'method': 'sharing.smb.reg_showshare',
        'params': ["CL_SMB"]
    }
    res = make_ws_request(ip, payload)
    assert res.get('error') is None, res

    data = res['result']['parameters']
    assert not data['fruit:time machine']['parsed']
コード例 #10
0
def test_001_check_initial_smb_config(request):
    payload = {
        'msg': 'method',
        'method': 'sharing.smb.reg_showshare',
        'params': ["GLOBAL"]
    }
    res = make_ws_request(CLUSTER_IPS[0], payload)
    assert res.get('error') is None, res
    data = res['result']['parameters']
    assert not data['server multi channel support']['parsed']
    assert not data['ntlm auth']['parsed']
    assert data['idmap config * : range']['raw'] == '90000001 - 100000000'
    assert data['server min protocol']['raw'] == 'SMB2_10'
    assert data['guest account']['raw'] == 'nobody'
コード例 #11
0
def test_011_check_aapl_extension_enabled(ip, request):
    depends(request, ['CLUSTER_SMB_SHARE_CREATED'])

    payload = {
        'msg': 'method',
        'method': 'sharing.smb.reg_showshare',
        'params': ["CL_SMB"]
    }
    res = make_ws_request(ip, payload)
    assert res.get('error') is None, res
    data = res['result']['parameters']
    assert 'fruit' in data['vfs objects']['parsed'], data
    assert 'streams_xattr' in data['vfs objects']['parsed'], data
    assert data['fruit:resource']['parsed'] == 'stream'
    assert data['fruit:metadata']['parsed'] == 'stream'
コード例 #12
0
def test_007_check_recycle_set(ip, request):
    depends(request, ['CLUSTER_SMB_SHARE_CREATED'])

    payload = {
        'msg': 'method',
        'method': 'sharing.smb.reg_showshare',
        'params': ["CL_SMB"]
    }
    res = make_ws_request(ip, payload)
    assert res.get('error') is None, res
    data = res['result']['parameters']
    assert 'recycle' in data['vfs objects']['parsed'], data
    assert data['recycle:keeptree']['parsed'], data
    assert data['recycle:versions']['parsed'], data
    assert data['recycle:touch']['parsed'], data
    assert data['recycle:directory_mode']['raw'] == '0777', data
    assert data['recycle:subdir_mode']['raw'] == '0700', data
    assert data['recycle:repository']['raw'] == '.recycle/%U', data
コード例 #13
0
ファイル: init_cluster.py プロジェクト: yaplej/freenas
def setup_zpool_and_datasets(ip):
    result = {'ERROR': ''}

    # query for existing zpool (clean CI run creates a zpool)
    print(f'Checking for existing zpools on {ip}')
    url = f'http://{ip}/api/v2.0/pool'
    ans = make_request('get', url)
    if ans.status_code != 200:
        result[
            'ERROR'] = f'Invalid status code when checking for existing zpools: {ans.text}'
        return result

    # get the id of the existing pool and export it
    pool = ans.json()
    pool = pool[0] if pool else None
    if pool:
        url = f'http://{ip}/api/v2.0/pool/id/{pool["id"]}/export'
        ans = make_request('post', url)
        if ans.status_code != 200:
            result[
                'ERROR'] = f'Invalid status code when exporting "{pool["name"]}" on {ip}: {ans.text}'
            return result
        try:
            print(f'Waiting on "{pool["name"]}" to be exported on {ip}')
            status = wait_on_job(ans.json(), ip, 20)
        except JobTimeOut:
            result[
                'ERROR'] = f'Timed out waiting on "{pool["name"]}" to be exported on {ip}'
            return result
        else:
            if status['state'] != 'SUCCESS':
                result['ERROR'] = 'Exporting "{pool["name"]}" failed on {ip}'
                return result

    # wipe the disks to clean any remnants of previous zpools
    print(f'Wiping "{CLUSTER_INFO["ZPOOL_DISK"]}" on {ip}')
    url = f'http://{ip}/api/v2.0/disk/wipe'
    payload = {'dev': CLUSTER_INFO['ZPOOL_DISK'], 'mode': 'QUICK'}
    ans = make_request('post', url, data=payload)
    if ans.status_code != 200:
        result[
            'ERROR'] = f'Invalid status code when wiping disk: {ans.status_code}:{ans.text}'
        return result
    try:
        print(
            f'Waiting for disk "{CLUSTER_INFO["ZPOOL_DISK"]}" on {ip} to be wiped'
        )
        status = wait_on_job(ans.json(), ip, 10)
    except JobTimeOut:
        result['ERROR'] = f'Timed out waiting for disk to be wiped on {ip}'
        return result
    else:
        if status['state'] != 'SUCCESS':
            result[
                'ERROR'] = 'Wiping disk {CLUSTER_INFO["ZPOOL_DISK"]} failed on {ip}'
            return result

    # now create the zpool
    print(f'Creating zpool "{CLUSTER_INFO["ZPOOL"]}" on {ip}')
    url = f'http://{ip}/api/v2.0/pool'
    payload = {
        'name': CLUSTER_INFO['ZPOOL'],
        'encryption': False,
        'topology': {
            'data': [{
                'type': 'STRIPE',
                'disks': [CLUSTER_INFO['ZPOOL_DISK']]
            }]
        }
    }
    ans = make_request('post', url, data=payload)
    if ans.status_code != 200:
        result[
            'ERROR'] = f'Failed to create zpool: "{CLUSTER_INFO["ZPOOL"]}" on {ip}:{ans.text}'
        return result
    try:
        print(
            f'Waiting on zpool "{CLUSTER_INFO["ZPOOL"]}" to be created on {ip}'
        )
        status = wait_on_job(ans.json(), ip, 30)
    except JobTimeOut:
        result['ERROR'] = f'Timed out waiting on zpool to be created on {ip}'
        return result
    else:
        if status['state'] != 'SUCCESS':
            result[
                'ERROR'] = f'Creating zpool was a failure: {status["result"]} on {ip}'
            return result

    # now create the cluster datasets, we have to use websocket here
    # because we're creating "internal" datasets that we prevent any
    # normal API user from creating using the public API so use websocket
    # API to side-step the public API validation
    print(f'Creating dataset hierarchy "{DATASET_HIERARCHY}" on {ip}')
    payload = {
        'msg':
        'method',
        'method':
        'zfs.dataset.create',
        'params': [{
            'name': DATASET_HIERARCHY,
            'type': 'FILESYSTEM',
            'create_ancestors': True,
            'properties': {
                'acltype': 'posix'
            }
        }]
    }
    res = make_ws_request(ip, payload)
    if res.get('error', {}):
        result['ERROR'] = res['error'].get('reason', 'NO REASON GIVEN')

    # libzfs doesn't mount the youngest ancestor when you give it a
    # path of ancestors to be created all at once. This means we have
    # mount the youngest ancestor
    # i.e. cargo/.glusterfs/gvol01/brick0 (brick0 needs to be mounted)
    print(f'Mounting dataset hierarchy "{DATASET_HIERARCHY}" on {ip}')
    payload = {
        'msg': 'method',
        'method': 'zfs.dataset.mount',
        'params': [DATASET_HIERARCHY],
    }
    res = make_ws_request(ip, payload)
    if res.get('error', {}):
        result['ERROR'] = res['error'].get('reason', 'NO REASON GIVEN')

    return result