コード例 #1
0
def test_005_validate_cached_ad_accounts(ip, request):
    depends(request, ['DS_ACCOUNTS_CONFIGURED'])

    payload = {
        'query-filters': [["method", "=", "activedirectory.fill_cache"]],
        'query-options': {'order_by': ['-id']},
    }
    url = f'http://{ip}/api/v2.0/core/get_jobs'
    res = make_request('get', url, data=payload)
    assert res.status_code == 200, res.text

    try:
        status = wait_on_job(res.json()[0]['id'], ip, 300)
    except JobTimeOut:
        assert False, JobTimeOut
    else:
        assert status['state'] == 'SUCCESS', status

    payload = {
        'query-filters': [["local", "=", False]],
        'query-options': {'extra': {"additional_information": ['DS']}},
    }
    url = f'http://{ip}/api/v2.0/user'
    res = make_request('get', url, data=payload)
    assert res.status_code == 200, res.text
    assert len(res.json()) != 0, 'No cached users'

    url = f'http://{ip}/api/v2.0/group'
    res = make_request('get', url, data=payload)
    assert res.status_code == 200, res.text
    assert len(res.json()) != 0, 'No cached groups'
コード例 #2
0
def add_peers():
    """
    Add the peers to the TSP (Trusted Storage Pool). We choose a single
    node to send these requests since glusterd coordinates the network
    requests to the other nodes.
    """
    nodes = [v for k, v in CLUSTER_INFO.items() if k in ('NODE_B_DNS', 'NODE_C_DNS')]
    for node in nodes:
        # start the job to add a peer
        ans = make_request('post', f'http://{CLUSTER_INFO["NODE_A_IP"]}/api/v2.0/gluster/peer', data={'hostname': node})
        assert ans.status_code == 200, ans.text

        # wait on the peer to be added
        try:
            status = wait_on_job(ans.json(), CLUSTER_INFO['NODE_A_IP'], 10)
        except JobTimeOut:
            assert False, JobTimeOut
        else:
            assert status['state'] == 'SUCCESS', status

    # query a node for the peers (it returns all peer information)
    ans = make_request('get', '/gluster/peer')
    assert ans.status_code == 200, ans.text
    # use casefold() for purpose of hostname validation sense case does not matter
    # but the resolvable names on the network might not match _exactly_ with what
    # was given to us in the config (i.e. DNS1.HOSTNAME.BLAH == DNS1.hostname.BLAH)
    assert set([i['hostname'].casefold() for i in ans.json()]) == set([i.casefold() for i in GPD]), ans.json()
コード例 #3
0
def test_050_leave_activedirectory(request):
    depends(request, ['JOINED_AD'])

    payload = {
        "username": CLUSTER_ADS['USERNAME'],
        "password": CLUSTER_ADS['PASSWORD']
    }
    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/activedirectory/leave/'
    res = make_request('post', url, data=payload)
    assert res.status_code == 200, res.text

    try:
        status = wait_on_job(res.json(), CLUSTER_IPS[0], 300)
    except JobTimeOut:
        assert False, JobTimeOut
    else:
        assert status['state'] == 'SUCCESS', status

    for ip in CLUSTER_IPS:
        url = f'http://{ip}/api/v2.0/activedirectory/get_state'
        res = make_request('get', url)
        assert res.status_code == 200, f'ip: {ip}, res: {res.text}'
        assert res.json() == 'DISABLED'

        url = f'http://{ip}/api/v2.0/activedirectory/started'
        res = make_request('get', url)
        assert res.status_code == 200, f'ip: {ip}, res: {res.text}'
        assert res.json() is False
コード例 #4
0
def test_003_join_activedirectory(request):
    depends(request, ['DS_NETWORK_CONFIGURED'])

    payload = {
        "domainname": CLUSTER_ADS['DOMAIN'],
        "bindname": CLUSTER_ADS['USERNAME'],
        "bindpw": CLUSTER_ADS['PASSWORD'],
        "enable": True
    }
    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/activedirectory/'
    res = make_request('put', url, data=payload)
    assert res.status_code == 200, res.text

    try:
        status = wait_on_job(res.json()['job_id'], CLUSTER_IPS[0], 300)
    except JobTimeOut:
        assert False, JobTimeOut
    else:
        assert status['state'] == 'SUCCESS', status

    # Need to wait a little for cluster state to settle down

    for ip in CLUSTER_IPS:
        url = f'http://{ip}/api/v2.0/activedirectory/started'
        res = make_request('get', url)
        assert res.status_code == 200, f'ip: {ip}, res: {res.text}'
        assert res.json()

        url = f'http://{ip}/api/v2.0/activedirectory/get_state'
        res = make_request('get', url)
        assert res.status_code == 200, f'ip: {ip}, res: {res.text}'
        assert res.json() == 'HEALTHY'
コード例 #5
0
def test_010_filesystem_setperm_recursive(request):
    depends(request, ['FS_BASIC_TEST_FILES_CREATED'])
    payload = {
        "path": CLUSTER_PATH,
        "mode": "777",
        "options": {
            "recursive": True
        }
    }
    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/filesystem/setperm/'
    res = make_request('post', url, data=payload)
    assert res.status_code == 200, res.text

    try:
        status = wait_on_job(res.json(), CLUSTER_IPS[0], 5)
    except JobTimeOut:
        assert False, JobTimeOut
    else:
        assert status['state'] == 'SUCCESS', status

    payload = {'path': CLUSTER_PATH}
    url = f'http://{CLUSTER_IPS[1]}/api/v2.0/filesystem/listdir/'
    res = make_request('post', url, data=payload)
    assert res.status_code == 200, res.text
    data = res.json()
    for entry in data:
        mode = stat.S_IMODE(entry['mode']) & ~stat.S_IFDIR
        assert entry['acl'] is False
        assert f'{mode:03o}' == '777'
コード例 #6
0
ファイル: test_002_ldap.py プロジェクト: yaplej/freenas
def test_003_bind_ldap(request):
    depends(request, ['DS_LDAP_NETWORK_CONFIGURED'])

    payload = {
        "hostname": [CLUSTER_LDAP['HOSTNAME']],
        "basedn": CLUSTER_LDAP['BASEDN'],
        "binddn": CLUSTER_LDAP['BINDDN'],
        "bindpw": CLUSTER_LDAP['BINDPW'],
        "ssl": "ON",
        "enable": True
    }
    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/ldap/'
    res = make_request('put', url, data=payload)
    assert res.status_code == 200, res.text

    try:
        status = wait_on_job(res.json()['job_id'], CLUSTER_IPS[0], 300)
    except JobTimeOut:
        assert False, JobTimeOut
    else:
        assert status['state'] == 'SUCCESS', status

    for ip in CLUSTER_IPS:
        payload = {
            'msg': 'method',
            'method': 'ldap.started',
        }
        res = make_ws_request(ip, payload)
        assert res.get('error') is None, res

        url = f'http://{ip}/api/v2.0/ldap/get_state'
        res = make_request('get', url)
        assert res.status_code == 200, f'ip: {ip}, res: {res.text}'
        assert res.json() == 'HEALTHY'
コード例 #7
0
ファイル: test_002_ldap.py プロジェクト: yaplej/freenas
def test_050_unbind_ldap(request):
    depends(request, ['BOUND_LDAP'])

    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/ldap'
    payload = {
        "has_samba_schema": False,
        "enable": False,
    }
    res = make_request('put', url, data=payload)
    assert res.status_code == 200, res.text

    try:
        status = wait_on_job(res.json()['job_id'], CLUSTER_IPS[0], 300)
    except JobTimeOut:
        assert False, JobTimeOut
    else:
        assert status['state'] == 'SUCCESS', status

    for ip in CLUSTER_IPS:
        url = f'http://{ip}/api/v2.0/ldap/get_state'
        res = make_request('get', url)
        assert res.status_code == 200, f'ip: {ip}, res: {res.text}'
        assert res.json() == 'DISABLED'

        payload = {
            'msg': 'method',
            'method': 'ldap.started',
        }
        res = make_ws_request(ip, payload)
        assert res.get('error') is None, res
コード例 #8
0
ファイル: test_002_ldap.py プロジェクト: yaplej/freenas
def test_008_bind_ldap(request):
    depends(request, ['DS_LDAP_SMB_SHARE_CREATED', 'BOUND_LDAP'])

    payload = {
        "has_samba_schema": True,
    }
    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/ldap/'
    res = make_request('put', url, data=payload)
    assert res.status_code == 200, res.text

    try:
        status = wait_on_job(res.json()['job_id'], CLUSTER_IPS[0], 300)
    except JobTimeOut:
        assert False, JobTimeOut
    else:
        assert status['state'] == 'SUCCESS', status
コード例 #9
0
def test_008_create_clustered_smb_share(request):
    depends(request, ['JOINED_AD'])
    global ds_smb_share_id
    global ds_wrk

    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/filesystem/mkdir/'
    res = make_request('post', url, data=SHARE_FUSE_PATH)
    assert res.status_code == 200, res.text

    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/user/get_user_obj/'
    payload = {"username": f'{CLUSTER_ADS["USERNAME"]}@{CLUSTER_ADS["DOMAIN"]}'}
    res = make_request('post', url, data=payload)
    assert res.status_code == 200, res.text
    user_obj = res.json()

    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/filesystem/chown/'
    payload = {"path": SHARE_FUSE_PATH, "uid": user_obj["pw_uid"]}
    res = make_request('post', url, data=payload)
    assert res.status_code == 200, res.text

    try:
        status = wait_on_job(res.json(), CLUSTER_IPS[0], 300)
    except JobTimeOut:
        assert False, JobTimeOut
    else:
        assert status['state'] == 'SUCCESS', status

    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/sharing/smb/'
    payload = {
        "comment": "AD clustered SMB share",
        "path": '/ds_smb_share_01',
        "name": "DS_CL_SMB",
        "purpose": "NO_PRESET",
        "shadowcopy": False,
        "cluster_volname": CLUSTER_INFO["GLUSTER_VOLUME"]
    }

    res = make_request('post', url, data=payload)
    assert res.status_code == 200, res.text
    ds_smb_share_id = res.json()['id']

    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/smb'
    res = make_request('get', url)
    assert res.status_code == 200, res.text
    ds_wrk = res.json()['workgroup']
コード例 #10
0
def add_public_ips_to_ctdb():
    for priv_ip, pub_ip in zip(CLUSTER_IPS, PUBLIC_IPS):
        res = make_request('post', f'http://{priv_ip}/api/v2.0/ctdb/general/status', data={'all_nodes': False})
        this_node = res.json()[0]['pnn']

        payload = {
            'pnn': this_node,
            'ip': pub_ip,
            'netmask': CLUSTER_INFO['NETMASK'],
            'interface': CLUSTER_INFO['INTERFACE']
        }
        res = make_request('post', f'http://{priv_ip}/api/v2.0/ctdb/public/ips', data=payload)
        try:
            status = wait_on_job(res.json(), priv_ip, 5)
        except JobTimeOut:
            assert False, JobTimeOut
        else:
            assert status['state'] == 'SUCCESS', status
コード例 #11
0
def test_012_filesystem_setacl_nonrecursive(request):
    depends(request, ['FS_BASIC_TEST_FILES_CREATED'])

    payload = {"acl_type": "POSIX_RESTRICTED"}
    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/filesystem/get_default_acl/'
    res = make_request('post', url, data=payload)
    assert res.status_code == 200, res.text

    to_set = res.json()

    payload = {
        "path": CLUSTER_PATH,
        "dacl": to_set,
        "acltype": "POSIX1E",
        "options": {
            "recursive": False
        }
    }

    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/filesystem/setacl/'
    res = make_request('post', url, data=payload)
    assert res.status_code == 200, res.text

    try:
        status = wait_on_job(res.json(), CLUSTER_IPS[0], 5)
    except JobTimeOut:
        assert False, JobTimeOut
    else:
        assert status['state'] == 'SUCCESS', status

    url = f'http://{CLUSTER_IPS[1]}/api/v2.0/filesystem/stat/'
    res = make_request('post', url, data=CLUSTER_PATH)
    assert res.status_code == 200, res.text
    data = res.json()
    assert data['acl'] is True

    payload = {'path': CLUSTER_PATH}
    url = f'http://{CLUSTER_IPS[1]}/api/v2.0/filesystem/listdir/'
    res = make_request('post', url, data=payload)
    assert res.status_code == 200, res.text
    data = res.json()
    for entry in data:
        assert entry['acl'] is False
コード例 #12
0
def test_011_filesystem_reset_mode(request):
    depends(request, ['FS_BASIC_TEST_FILES_CREATED'])

    payload = {
        "path": CLUSTER_PATH,
        "mode": "755",
        "options": {
            "recursive": True
        }
    }
    url = f'http://{CLUSTER_IPS[0]}/api/v2.0/filesystem/setperm/'
    res = make_request('post', url, data=payload)
    assert res.status_code == 200, res.text

    try:
        status = wait_on_job(res.json(), CLUSTER_IPS[0], 5)
    except JobTimeOut:
        assert False, JobTimeOut
    else:
        assert status['state'] == 'SUCCESS', status
コード例 #13
0
def test_008_filesystem_reset_owner(request):
    depends(request, ['FS_BASIC_TEST_FILES_CREATED'])

    payload = {"path": CLUSTER_PATH, "uid": 0, "options": {"recursive": True}}
    url = f'http://{CLUSTER_IPS[1]}/api/v2.0/filesystem/chown/'
    res = make_request('post', url, data=payload)
    assert res.status_code == 200, res.text
    try:
        status = wait_on_job(res.json(), CLUSTER_IPS[1], 5)
    except JobTimeOut:
        assert False, JobTimeOut
    else:
        assert status['state'] == 'SUCCESS', status

    url = f'http://{CLUSTER_IPS[1]}/api/v2.0/filesystem/stat/'
    res = make_request('post', url, data=CLUSTER_PATH)
    assert res.status_code == 200, res.text
    data = res.json()
    assert data['uid'] == 0
    assert data['gid'] == 0
コード例 #14
0
def create_volume():
    """Create and start the gluster volume."""
    gvol = CLUSTER_INFO['GLUSTER_VOLUME']
    payload = {
        'name': gvol,
        'bricks': [{'peer_name': hostname, 'peer_path': BRICK_PATH} for hostname in GPD],
        'force': True,
    }
    ans = make_request('post', '/gluster/volume', data=payload)
    assert ans.status_code == 200, ans.text

    # wait on the gluster volume to be created
    try:
        status = wait_on_job(ans.json(), CLUSTER_INFO['NODE_A_IP'], 20)
    except JobTimeOut:
        assert False, JobTimeOut
    else:
        assert status['state'] == 'SUCCESS', status

    # query a node for the volume
    payload = {'query-filters': [['name', '=', gvol]]}
    ans = make_request('get', '/gluster/volume', data=payload)
    assert ans.status_code == 200, ans.text
    assert len(ans.json()) > 0 and ans.json()[0]['id'] == gvol, ans.text
コード例 #15
0
ファイル: init_gluster.py プロジェクト: wesleywwf/freenas
def add_peers():
    """
    Add the peers to the TSP (Trusted Storage Pool). We choose a single
    node to send these requests since glusterd coordinates the network
    requests to the other nodes.
    """
    nodes = [v for k, v in CLUSTER_INFO.items() if k in ('NODE_B_DNS', 'NODE_C_DNS')]
    for node in nodes:
        # start the job to add a peer
        ans = make_request('post', f'http://{CLUSTER_INFO["NODE_A_IP"]}/api/v2.0/gluster/peer', data={'hostname': node})
        assert ans.status_code == 200, ans.text

        # wait on the peer to be added
        try:
            status = wait_on_job(ans.json(), CLUSTER_INFO['NODE_A_IP'], 10)
        except JobTimeOut:
            assert False, JobTimeOut
        else:
            assert status['state'] == 'SUCCESS', status

    # query a node for the peers (it returns all peer information)
    ans = make_request('get', '/gluster/peer')
    assert ans.status_code == 200, ans.text
    assert set([i['hostname'] for i in ans.json()]) == set(HOSTNAMES), ans.json()
コード例 #16
0
ファイル: init_cluster.py プロジェクト: yaplej/freenas
def setup_zpool_and_datasets(ip):
    result = {'ERROR': ''}

    # query for existing zpool (clean CI run creates a zpool)
    print(f'Checking for existing zpools on {ip}')
    url = f'http://{ip}/api/v2.0/pool'
    ans = make_request('get', url)
    if ans.status_code != 200:
        result[
            'ERROR'] = f'Invalid status code when checking for existing zpools: {ans.text}'
        return result

    # get the id of the existing pool and export it
    pool = ans.json()
    pool = pool[0] if pool else None
    if pool:
        url = f'http://{ip}/api/v2.0/pool/id/{pool["id"]}/export'
        ans = make_request('post', url)
        if ans.status_code != 200:
            result[
                'ERROR'] = f'Invalid status code when exporting "{pool["name"]}" on {ip}: {ans.text}'
            return result
        try:
            print(f'Waiting on "{pool["name"]}" to be exported on {ip}')
            status = wait_on_job(ans.json(), ip, 20)
        except JobTimeOut:
            result[
                'ERROR'] = f'Timed out waiting on "{pool["name"]}" to be exported on {ip}'
            return result
        else:
            if status['state'] != 'SUCCESS':
                result['ERROR'] = 'Exporting "{pool["name"]}" failed on {ip}'
                return result

    # wipe the disks to clean any remnants of previous zpools
    print(f'Wiping "{CLUSTER_INFO["ZPOOL_DISK"]}" on {ip}')
    url = f'http://{ip}/api/v2.0/disk/wipe'
    payload = {'dev': CLUSTER_INFO['ZPOOL_DISK'], 'mode': 'QUICK'}
    ans = make_request('post', url, data=payload)
    if ans.status_code != 200:
        result[
            'ERROR'] = f'Invalid status code when wiping disk: {ans.status_code}:{ans.text}'
        return result
    try:
        print(
            f'Waiting for disk "{CLUSTER_INFO["ZPOOL_DISK"]}" on {ip} to be wiped'
        )
        status = wait_on_job(ans.json(), ip, 10)
    except JobTimeOut:
        result['ERROR'] = f'Timed out waiting for disk to be wiped on {ip}'
        return result
    else:
        if status['state'] != 'SUCCESS':
            result[
                'ERROR'] = 'Wiping disk {CLUSTER_INFO["ZPOOL_DISK"]} failed on {ip}'
            return result

    # now create the zpool
    print(f'Creating zpool "{CLUSTER_INFO["ZPOOL"]}" on {ip}')
    url = f'http://{ip}/api/v2.0/pool'
    payload = {
        'name': CLUSTER_INFO['ZPOOL'],
        'encryption': False,
        'topology': {
            'data': [{
                'type': 'STRIPE',
                'disks': [CLUSTER_INFO['ZPOOL_DISK']]
            }]
        }
    }
    ans = make_request('post', url, data=payload)
    if ans.status_code != 200:
        result[
            'ERROR'] = f'Failed to create zpool: "{CLUSTER_INFO["ZPOOL"]}" on {ip}:{ans.text}'
        return result
    try:
        print(
            f'Waiting on zpool "{CLUSTER_INFO["ZPOOL"]}" to be created on {ip}'
        )
        status = wait_on_job(ans.json(), ip, 30)
    except JobTimeOut:
        result['ERROR'] = f'Timed out waiting on zpool to be created on {ip}'
        return result
    else:
        if status['state'] != 'SUCCESS':
            result[
                'ERROR'] = f'Creating zpool was a failure: {status["result"]} on {ip}'
            return result

    # now create the cluster datasets, we have to use websocket here
    # because we're creating "internal" datasets that we prevent any
    # normal API user from creating using the public API so use websocket
    # API to side-step the public API validation
    print(f'Creating dataset hierarchy "{DATASET_HIERARCHY}" on {ip}')
    payload = {
        'msg':
        'method',
        'method':
        'zfs.dataset.create',
        'params': [{
            'name': DATASET_HIERARCHY,
            'type': 'FILESYSTEM',
            'create_ancestors': True,
            'properties': {
                'acltype': 'posix'
            }
        }]
    }
    res = make_ws_request(ip, payload)
    if res.get('error', {}):
        result['ERROR'] = res['error'].get('reason', 'NO REASON GIVEN')

    # libzfs doesn't mount the youngest ancestor when you give it a
    # path of ancestors to be created all at once. This means we have
    # mount the youngest ancestor
    # i.e. cargo/.glusterfs/gvol01/brick0 (brick0 needs to be mounted)
    print(f'Mounting dataset hierarchy "{DATASET_HIERARCHY}" on {ip}')
    payload = {
        'msg': 'method',
        'method': 'zfs.dataset.mount',
        'params': [DATASET_HIERARCHY],
    }
    res = make_ws_request(ip, payload)
    if res.get('error', {}):
        result['ERROR'] = res['error'].get('reason', 'NO REASON GIVEN')

    return result