Пример #1
0
 def subtest_volume_group_clone_operations(self):
     # clone a volume group
     volume_group_name = util.random_name('cli_test_volume_group_clone')
     source_details = {
         'type': 'volumeGroupId',
         'volumeGroupId': self.volume_group
     }
     params = [
         'volume-group', 'create', '--availability-domain',
         util.availability_domain(), '--compartment-id',
         util.COMPARTMENT_ID, '--display-name', volume_group_name,
         '--source-details',
         json.dumps(source_details)
     ]
     self.volume_group_clone, self.volume_clones = self.volume_group_operations_internal(
         volume_group_name, params)
Пример #2
0
def gitlab_trigger(project_and_pipeline, runner, config_file, config_profile):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_gitlab_trigger_fixture.yml'):
        # create trigger
        project_id = project_and_pipeline[0]
        build_pipeline_id = project_and_pipeline[1]
        trigger_name = util.random_name('cli_devops_build_test_trigger')
        trigger_source = 'GITLAB'
        actions = [{
            'type': 'TRIGGER_BUILD_PIPELINE',
            'filter': {
                'triggerSource': trigger_source,
                'events':
                ['PUSH', 'PULL_REQUEST_UPDATED', 'PULL_REQUEST_REOPENED'],
                'include': {
                    'headRef': 'feature',
                    'baseRef': 'master'
                }
            },
            'buildPipelineId': build_pipeline_id
        }]

        params = [
            'devops', 'trigger', 'create-gitlab-trigger', '--display-name',
            trigger_name, '--project-id', project_id, '--actions',
            json.dumps(actions)
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        trigger_id = json.loads(result.output)['data']['id']
        wait_until(['devops', 'trigger', 'get', '--trigger-id', trigger_id],
                   'ACTIVE', 300)
    yield trigger_id

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_gitlab_trigger_fixture_cleanup.yml'):
        # delete trigger
        params = [
            'devops', 'trigger', 'delete', '--trigger-id', trigger_id,
            '--force'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
Пример #3
0
def deliver_artifact_stage(project_and_pipeline, build_stage, runner,
                           config_file, config_profile):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_deliver_artifact_stage_fixture.yml'):
        # create deliver artifact stage
        build_pipeline_id = project_and_pipeline[1]
        build_stage_id = build_stage
        deliver_artifact_stage_name = util.random_name(
            'cli_devops_build_test_deliver_artifact_stage')
        deliver_artifact_collection = {
            'items': [{
                'artifactId': 'artifactId',
                'artifactName': 'artifactName'
            }]
        }
        stage_predecessor_collection = {'items': [{'id': build_stage_id}]}
        params = [
            'devops', 'build-pipeline-stage', 'create-deliver-artifact-stage',
            '--build-pipeline-id', build_pipeline_id, '--display-name',
            deliver_artifact_stage_name, '--deliver-artifact-collection',
            json.dumps(deliver_artifact_collection),
            '--stage-predecessor-collection',
            json.dumps(stage_predecessor_collection)
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        deliver_artifact_stage_id = json.loads(result.output)['data']['id']
        wait_until([
            'devops', 'build-pipeline-stage', 'get', '--stage-id',
            deliver_artifact_stage_id
        ], 'ACTIVE', 300)
    yield deliver_artifact_stage_id

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_deliver_artifact_stage_fixture_cleanup.yml'):
        # delete deliver artifact stage
        params = [
            'devops', 'build-pipeline-stage', 'delete', '--stage-id',
            deliver_artifact_stage_id, '--force'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
Пример #4
0
    def subtest_windows_instance_operations(self):
        instance_name = util.random_name('cli_test_instance')
        image_id = util.windows_vm_image()
        shape = 'VM.Standard1.1'

        result = self.invoke([
            'compute', 'instance', 'launch', '--compartment-id',
            util.COMPARTMENT_ID, '--availability-domain',
            util.availability_domain(), '--display-name', instance_name,
            '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape',
            shape
        ])
        self.windows_instance_ocid = util.find_id_in_response(result.output)
        util.validate_response(result, expect_etag=True)

        util.wait_until([
            'compute', 'instance', 'get', '--instance-id',
            self.windows_instance_ocid
        ],
                        'RUNNING',
                        max_wait_seconds=600)

        result = self.invoke([
            'compute', 'instance', 'get', '--instance-id',
            self.windows_instance_ocid
        ])
        util.validate_response(result, expect_etag=True)

        result = self.invoke([
            'compute', 'instance', 'get-windows-initial-creds',
            '--instance-id', self.windows_instance_ocid
        ])

        util.validate_response(result)

        credentials = json.loads(result.output)['data']
        assert credentials['username'] == 'opc'
        assert 'password' in credentials

        result = self.invoke([
            'compute', 'instance', 'terminate', '--instance-id',
            self.windows_instance_ocid, '--force'
        ])
        util.validate_response(result)
Пример #5
0
    def subtest_user_operations(self):
        self.user_name = util.random_name('cli_test_user')
        self.user_description = 'Created by CLI identity tests.'

        result = self.invoke([
            'user', 'create', '--compartment-id', util.TENANT_ID, '--name',
            self.user_name, '--description', self.user_description
        ])
        self.user_ocid = util.find_id_in_response(result.output)
        self.validate_response(result,
                               extra_validation=self.validate_user,
                               expect_etag=True)

        result = self.invoke([
            'user', 'list', '--compartment-id', util.TENANT_ID, '--limit',
            '1000'
        ])
        self.validate_response(result, extra_validation=self.validate_user)

        result = self.invoke(['user', 'list', '--limit', '1000'])
        self.validate_response(result, extra_validation=self.validate_user)

        self.user_description = 'UPDATED ' + self.user_description
        result = self.invoke([
            'user', 'update', '--user-id', self.user_ocid, '--description',
            self.user_description
        ])
        self.validate_response(result,
                               extra_validation=self.validate_user,
                               expect_etag=True)

        result = self.invoke([
            'user', 'update-user-state', '--user-id', self.user_ocid,
            '--blocked', 'false'
        ])
        self.validate_response(result,
                               extra_validation=self.validate_user,
                               expect_etag=True)

        result = self.invoke(['user', 'get', '--user-id', self.user_ocid])
        self.validate_response(result,
                               extra_validation=self.validate_user,
                               expect_etag=True)
Пример #6
0
def test_deployment_update(runner, config_file, config_profile):
    # TODO: remove this -- added this return on 8/16/2019 b/c tests were failing.
    return
    api_deployment_id = api_gateway_and_deployment[1]
    fn_func_id = api_gateway_and_deployment[2]

    get_params = [
        'api-gateway', 'deployment', 'get', '--deployment-id',
        api_deployment_id
    ]

    result = invoke(runner, config_file, config_profile, get_params)
    util.validate_response(result)
    api_deployment = json.loads(result.output)['data']

    params = [
        'api-gateway', 'deployment', 'update', '--deployment-id',
        api_deployment_id, '--display-name',
        util.random_name('deployment',
                         insert_underscore=False), '--specification',
        build_full_api_specification('https://cloud.oracle.com', fn_func_id,
                                     fn_func_id), '--force'
    ]

    update_result = invoke(runner, config_file, config_profile, params)
    util.validate_response(update_result)

    util.wait_until([
        'api-gateway', 'deployment', 'get', '--deployment-id',
        api_deployment_id
    ],
                    'ACTIVE',
                    max_wait_seconds=300)

    result = invoke(runner, config_file, config_profile, get_params)
    util.validate_response(result)
    api_deployment_updated = json.loads(result.output)['data']

    assert api_deployment['display-name'] != api_deployment_updated['display-name'], \
        "Deployment's display name should have been updated"

    assert json.dumps(api_deployment['specification']) != json.dumps(api_deployment_updated['specification']), \
        "Deployment's specification should have been updated"
Пример #7
0
def test_topic_crud(runner, config_file, config_profile):
    topic_id = None
    try:
        # Create Topic
        params = [
            'ons', 'topic', 'create', '--name',
            util.random_name('topic_name'), '-c', util.COMPARTMENT_ID,
            '--description', 'A description of the topic'
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        topic_id = json.loads(result.output)['data']['topic-id']
        util.wait_until(['ons', 'topic', 'get', '--topic-id', topic_id],
                        'ACTIVE',
                        max_wait_seconds=600)

        # Update topic
        params = [
            'ons', 'topic', 'update', '--topic-id', topic_id, '--description',
            'new description'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        assert json.loads(
            result.output)['data']['description'] == 'new description'

        # List all topics
        params = [
            'ons', 'topic', 'list', '--compartment-id', util.COMPARTMENT_ID
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        assert len(json.loads(result.output)['data']) > 0
    finally:
        if topic_id:
            params = [
                'ons', 'topic', 'delete', '--topic-id', topic_id, '--force'
            ]
            result = invoke(runner, config_file, config_profile, params)
            util.validate_response(result)
Пример #8
0
    def subtest_ip_sec_connection_operations(self):
        if hasattr(self, 'drg_capacity_issue'):
            print('Unable to execute subtest_ip_sec_connection_operations as a DRG is not available')
            return

        ipsc_name = util.random_name('cli_test_ipsc')
        routes = util.remove_outer_quotes(oci_cli_virtual_network.virtualnetwork_cli_extended.network_create_ip_sec_connection_static_routes_example)

        result = self.invoke(
            ['ip-sec-connection', 'create',
             '--compartment-id', util.COMPARTMENT_ID,
             '--display-name', ipsc_name,
             '--cpe-id', self.cpe_ocid,
             '--drg-id', self.drg_ocid,
             '--static-routes', routes
             ])
        if 'Limit tenant-ipsec-vpn-connection' in result.output:
            self.drg_capacity_issue = True
            print('Unable to execute subtest_ip_sec_connection_operations as an IPSec Connection is not available')
            return

        self.ipsc_ocid = util.find_id_in_response(result.output)
        util.validate_response(result, expect_etag=True)
        util.wait_until(['network', 'ip-sec-connection', 'get', '--ipsc-id', self.ipsc_ocid], 'AVAILABLE',
                        max_wait_seconds=600)

        result = self.invoke(['ip-sec-connection', 'list', '--compartment-id', util.COMPARTMENT_ID])
        util.validate_response(result)

        ipsc_name = ipsc_name + "_updated"
        result = self.invoke(['ip-sec-connection', 'update', '--ipsc-id', self.ipsc_ocid, '--display-name', ipsc_name])
        util.validate_response(result, expect_etag=True)

        result = self.invoke(['ip-sec-connection', 'get', '--ipsc-id', self.ipsc_ocid])
        util.validate_response(result, expect_etag=True)

        result = self.invoke(['ip-sec-connection', 'get-config', '--ipsc-id', self.ipsc_ocid])
        util.validate_response(result)

        result = self.invoke(['ip-sec-connection', 'get-status', '--ipsc-id', self.ipsc_ocid])
        util.validate_response(result)
Пример #9
0
def test_list_and_update_mount_targets(mount_target, runner, config_file, config_profile):
    params = [
        'mount-target', 'list',
        '--compartment-id', util.COMPARTMENT_ID,
        '--availability-domain', util.availability_domain()
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)

    new_display_name = util.random_name('up_cli_test_mt')
    params = [
        'mount-target', 'update',
        '--mount-target-id', mount_target['id'],
        '--display-name', new_display_name
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)

    assert json.loads(result.output)['data']['display-name'] == new_display_name
Пример #10
0
def test_load_balancer_operations_with_waiters(runner, config_file,
                                               config_profile, vcn_and_subnets,
                                               key_pair_files):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_ops_with_waiters.yml'):
        subnet_ocid_1 = vcn_and_subnets[1]
        subnet_ocid_2 = vcn_and_subnets[2]

        lb_name = util.random_name('cli_lb')
        params = [
            'load-balancer', 'create', '-c', util.COMPARTMENT_ID,
            '--display-name', lb_name, '--shape-name', '100Mbps',
            '--subnet-ids', '["{}","{}"]'.format(subnet_ocid_1, subnet_ocid_2),
            '--wait-for-state', 'SUCCEEDED'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result, json_response_expected=False)
        load_balancer = util.get_json_from_mixed_string(result.output)
        assert load_balancer['data']['lifecycle-state'] == 'ACTIVE'
        assert 'loadbalancer' in load_balancer['data']['id']
        assert load_balancer['data']['display-name'] == lb_name
        assert load_balancer['data']['shape-name'] == '100Mbps'
        assert len(load_balancer['data']['subnet-ids']) == 2
        assert subnet_ocid_1 in load_balancer['data']['subnet-ids']
        assert subnet_ocid_2 in load_balancer['data']['subnet-ids']

        _do_backend_and_backend_set_waiters(runner,
                                            load_balancer['data']['id'],
                                            config_file, config_profile)
        _do_certificate_waiters(runner, load_balancer['data']['id'],
                                config_file, config_profile, key_pair_files)

        params = [
            'load-balancer', 'delete', '--load-balancer-id',
            load_balancer['data']['id'], '--force', '--wait-for-state',
            'SUCCEEDED'
        ]
        result = invoke(runner, config_file, config_profile, params)
        _validate_work_request_result(result, load_balancer['data']['id'])
Пример #11
0
    def subtest_cpe_operations(self):
        cpe_name = util.random_name('cli_test_cpe')
        ip_address = "137.254.4.11"

        result = self.invoke(
            ['cpe', 'create',
             '--compartment-id', util.COMPARTMENT_ID,
             '--display-name', cpe_name,
             '--ip-address', ip_address,
             ])
        self.cpe_ocid = util.find_id_in_response(result.output)
        util.validate_response(result, expect_etag=True)

        result = self.invoke(['cpe', 'list', '--compartment-id', util.COMPARTMENT_ID])
        util.validate_response(result)

        cpe_name = cpe_name + "_updated"
        result = self.invoke(['cpe', 'update', '--cpe-id', self.cpe_ocid, '--display-name', cpe_name])
        util.validate_response(result, expect_etag=True)

        result = self.invoke(['cpe', 'get', '--cpe-id', self.cpe_ocid])
        util.validate_response(result, expect_etag=True)
Пример #12
0
def test_crud_snapshot(file_system, runner, config_file, config_profile):
    params = [
        'snapshot', 'list',
        '--file-system-id', file_system
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)

    assert len(result.output) == 0

    params = [
        'snapshot', 'create',
        '--file-system-id', file_system,
        '--name', util.random_name('cli_snapshot')
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)

    snapshot_id = json.loads(result.output)['data']['id']
    params = [
        'snapshot', 'get',
        '--snapshot-id', snapshot_id
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)

    util.wait_until(['fs', 'snapshot', 'get', '--snapshot-id', snapshot_id], 'ACTIVE', max_wait_seconds=300)

    params = [
        'snapshot', 'delete',
        '--snapshot-id', snapshot_id,
        '--force'
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)
Пример #13
0
def test_crud_export_set(mount_target, runner, config_file, config_profile):
    params = [
        'export-set', 'list', '--compartment-id', util.COMPARTMENT_ID,
        '--availability-domain',
        util.availability_domain()
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)

    found_export_set = False
    export_sets = json.loads(result.output)['data']
    for es in export_sets:
        if es['id'] == mount_target['export-set-id']:
            found_export_set = True
            break

    assert found_export_set

    updated_export_set_name = util.random_name('up_cli_test_es')
    params = [
        'export-set', 'update', '--export-set-id',
        mount_target['export-set-id'], '--display-name',
        updated_export_set_name
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)

    assert json.loads(
        result.output)['data']['display-name'] == updated_export_set_name

    params = [
        'export-set', 'get', '--export-set-id', mount_target['export-set-id']
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)
Пример #14
0
    def subtest_drg_attachment_operations(self):
        if hasattr(self, 'drg_capacity_issue'):
            print(
                'Unable to execute subtest_drg_attachment_operations as a DRG is not available'
            )
            return

        drg_attachment_name = util.random_name('cli_test_drg_attachment')

        result = self.invoke([
            'drg-attachment', 'create', '--drg-id', self.drg_ocid, '--vcn-id',
            self.vcn_ocid, '--display-name', drg_attachment_name
        ])
        self.drg_attachment_ocid = util.find_id_in_response(result.output)
        util.validate_response(result, expect_etag=True)

        util.wait_until([
            'network', 'drg-attachment', 'get', '--drg-attachment-id',
            self.drg_attachment_ocid
        ], 'ATTACHED')

        result = self.invoke([
            'drg-attachment', 'list', '--compartment-id', util.COMPARTMENT_ID
        ])
        util.validate_response(result)

        drg_attachment_name = drg_attachment_name + "_updated"
        result = self.invoke([
            'drg-attachment', 'update', '--drg-attachment-id',
            self.drg_attachment_ocid, '--display-name', drg_attachment_name
        ])
        util.validate_response(result, expect_etag=True)

        result = self.invoke([
            'drg-attachment', 'get', '--drg-attachment-id',
            self.drg_attachment_ocid
        ])
        util.validate_response(result, expect_etag=True)
Пример #15
0
    def subtest_volume_operations(self):
        volume_name = util.random_name('cli_test_volume')
        params = [
            'volume', 'create', '--availability-domain',
            util.availability_domain(), '--compartment-id',
            util.COMPARTMENT_ID, '--display-name', volume_name
        ]

        self.volume_id = self.volume_operations_internal(
            volume_name, params, None, str(50 * 1024))
        self.volume_id_two = self.volume_operations_internal(
            volume_name, params, '50', None)

        retrieve_list_and_ensure_sorted([
            'bv', 'volume', 'list', '-c', util.COMPARTMENT_ID,
            '--availability-domain',
            util.availability_domain(), '--sort-by', 'DISPLAYNAME',
            '--sort-order', 'asc'
        ], 'display-name', 'asc')
        retrieve_list_and_ensure_sorted([
            'bv', 'volume', 'list', '-c', util.COMPARTMENT_ID,
            '--availability-domain',
            util.availability_domain(), '--sort-by', 'DISPLAYNAME',
            '--sort-order', 'desc'
        ], 'display-name', 'desc')
        retrieve_list_and_ensure_sorted([
            'bv', 'volume', 'list', '-c', util.COMPARTMENT_ID,
            '--availability-domain',
            util.availability_domain(), '--sort-by', 'TIMECREATED',
            '--sort-order', 'asc', '--all'
        ], 'time-created', 'asc')
        retrieve_list_and_ensure_sorted([
            'bv', 'volume', 'list', '-c', util.COMPARTMENT_ID,
            '--availability-domain',
            util.availability_domain(), '--sort-by', 'TIMECREATED',
            '--sort-order', 'desc', '--all'
        ], 'time-created', 'desc')
Пример #16
0
def build_run(project_and_pipeline, wait_stage, build_stage, github_connection,
              runner, config_file, config_profile):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_build_run_fixture.yml'):
        # start build run
        build_pipeline_id = project_and_pipeline[1]
        build_run_name = util.random_name('cli_devops_build_test_build_run')
        commit_info = {
            'repositoryUrl':
            'https://github.com/dlcbld/SdkCliIntegrationTest.git',
            'repositoryBranch': 'main',
            'commitHash': '6062c07b44b7da31aa14a4c4b19dac3255a4833a'
        }
        build_run_arguments = {
            'items': [{
                'name': 'MAJOR_VERSION',
                'value': '1'
            }]
        }
        params = [
            'devops', 'build-run', 'create', '--build-pipeline-id',
            build_pipeline_id, '--display-name', build_run_name,
            '--commit-info',
            json.dumps(commit_info), '--build-run-arguments',
            json.dumps(build_run_arguments)
        ]
        time.sleep(10)
        result = invoke(runner, config_file, config_profile, params)

        util.validate_response(result)
        build_run_id = json.loads(result.output)['data']['id']
        wait_until(
            ['devops', 'build-run', 'get', '--build-run-id', build_run_id],
            'ACCEPTED', 300)
        time.sleep(10)
    yield build_run_id
Пример #17
0
def _do_certificate_waiters(runner, load_balancer_id, config_file,
                            config_profile, key_pair_files):
    private_key_filename = key_pair_files[1]
    certificate_filename = key_pair_files[2]

    cert_name = util.random_name('cli_lb_certificate')

    params = [
        'certificate', 'create', '--certificate-name', cert_name,
        '--load-balancer-id', load_balancer_id, '--ca-certificate-file',
        certificate_filename, '--private-key-file', private_key_filename,
        '--public-certificate-file', certificate_filename, '--passphrase',
        'secret!', '--wait-for-state', 'SUCCEEDED'
    ]
    result = invoke(runner, config_file, config_profile, params)
    _validate_work_request_result(result, load_balancer_id)

    params = [
        'certificate', 'delete', '--load-balancer-id', load_balancer_id,
        '--certificate-name', cert_name, '--force', '--wait-for-state',
        'SUCCEEDED'
    ]
    result = invoke(runner, config_file, config_profile, params)
    _validate_work_request_result(result, load_balancer_id)
Пример #18
0
    def subtest_drg_operations(self):
        drg_name = util.random_name('cli_test_drg')

        result = self.invoke([
            'drg', 'create', '--compartment-id', util.COMPARTMENT_ID,
            '--display-name', drg_name
        ])

        # If we have hit a limit, skip the test
        if 'Limit vcn-tenant-drg' in result.output or 'Limit tenant-drg' in result.output:
            self.drg_capacity_issue = True
            print(
                'Unable to execute subtest_drg_operations as a DRG is not available'
            )
            return

        self.drg_ocid = util.find_id_in_response(result.output)
        util.validate_response(result, expect_etag=True)

        util.wait_until(['network', 'drg', 'get', '--drg-id', self.drg_ocid],
                        'AVAILABLE',
                        max_wait_seconds=600)

        result = self.invoke(
            ['drg', 'list', '--compartment-id', util.COMPARTMENT_ID])
        util.validate_response(result)

        drg_name = drg_name + "_updated"
        result = self.invoke([
            'drg', 'update', '--drg-id', self.drg_ocid, '--display-name',
            drg_name
        ])
        util.validate_response(result, expect_etag=True)

        result = self.invoke(['drg', 'get', '--drg-id', self.drg_ocid])
        util.validate_response(result, expect_etag=True)
Пример #19
0
def test_listener_operations(runner, config_file, config_profile,
                             load_balancer, backend_set, certificate):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_listener_operations.yml'):
        # create listener
        listener_name = util.random_name('cli_listener')
        params = [
            'listener', 'create', '--default-backend-set-name', backend_set,
            '--load-balancer-id', load_balancer, '--name', listener_name,
            '--port', '8080', '--protocol', 'HTTP', '--ssl-certificate-name',
            certificate
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # returns a work request
        response = json.loads(result.output)
        work_request_ocid = response['opc-work-request-id']

        get_work_request_result = util.wait_until(
            [
                'lb', 'work-request', 'get', '--work-request-id',
                work_request_ocid
            ],
            'SUCCEEDED',
            max_wait_seconds=LB_PROVISIONING_TIME_SEC)
        util.validate_response(get_work_request_result)

        # update listener
        params = [
            'listener', 'update', '--listener-name', listener_name,
            '--default-backend-set-name', backend_set, '--load-balancer-id',
            load_balancer, '--port', '8080', '--protocol', 'HTTP',
            '--ssl-certificate-name', certificate, '--force'
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # returns a work request
        response = json.loads(result.output)
        work_request_ocid = response['opc-work-request-id']

        get_work_request_result = util.wait_until(
            [
                'lb', 'work-request', 'get', '--work-request-id',
                work_request_ocid
            ],
            'SUCCEEDED',
            max_wait_seconds=LB_PROVISIONING_TIME_SEC)
        util.validate_response(get_work_request_result)

        # delete listener
        params = [
            'listener', 'delete', '--load-balancer-id', load_balancer,
            '--listener-name', listener_name, '--force'
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # returns a work request
        response = json.loads(result.output)
        work_request_ocid = response['opc-work-request-id']

        get_work_request_result = util.wait_until(
            [
                'lb', 'work-request', 'get', '--work-request-id',
                work_request_ocid
            ],
            'SUCCEEDED',
            max_wait_seconds=LB_PROVISIONING_TIME_SEC)
        util.validate_response(get_work_request_result)
Пример #20
0
    def subtest_instance_operations(self):
        instance_name = util.random_name('cli_test_instance')
        fault_domain = 'FAULT-DOMAIN-1'
        image_id = util.oracle_linux_image()
        shape = 'VM.Standard1.1'

        result = self.invoke([
            'compute', 'instance', 'launch', '--compartment-id',
            util.COMPARTMENT_ID, '--availability-domain',
            util.availability_domain(), '--display-name', instance_name,
            '--fault-domain', fault_domain, '--subnet-id', self.subnet_ocid,
            '--image-id', image_id, '--shape', shape, '--metadata',
            util.remove_outer_quotes(oci_cli_compute.compute_cli_extended.
                                     compute_instance_launch_metadata_example)
        ])
        self.instance_ocid = util.find_id_in_response(result.output)
        util.validate_response(result, expect_etag=True)

        util.wait_until([
            'compute', 'instance', 'get', '--instance-id', self.instance_ocid
        ],
                        'RUNNING',
                        max_wait_seconds=600)

        result = self.invoke([
            'compute', 'instance', 'list', '--compartment-id',
            util.COMPARTMENT_ID
        ])
        util.validate_response(result)

        # list with compartment shortcut
        result = self.invoke(
            ['compute', 'instance', 'list', '-c', util.COMPARTMENT_ID])
        util.validate_response(result)

        instance_name = instance_name + "_updated"
        result = self.invoke([
            'compute', 'instance', 'update', '--instance-id',
            self.instance_ocid, '--display-name', instance_name
        ])
        util.validate_response(result, expect_etag=True)

        result = self.invoke([
            'compute', 'instance', 'get', '--instance-id', self.instance_ocid
        ])
        util.validate_response(result, expect_etag=True)

        result = self.invoke([
            'compute', 'instance', 'launch', '--compartment-id',
            util.COMPARTMENT_ID, '--availability-domain',
            util.availability_domain(), '--display-name', instance_name + "_2",
            '--fault-domain', fault_domain, '--subnet-id', self.subnet_ocid,
            '--image-id', image_id, '--shape', shape, '--metadata',
            util.remove_outer_quotes(oci_cli_compute.compute_cli_extended.
                                     compute_instance_launch_metadata_example),
            '--wait-for-state', 'RUNNING', '--max-wait-seconds', '20',
            '--wait-interval-seconds', '5'
        ])
        self.instance_ocid_2 = util.find_id_in_response(
            result.output[result.output.index('{'):])
        assert result.exit_code != 0
Пример #21
0
def vcn_and_subnets(network_client):
    from tests import util

    with test_config_container.create_vcr().use_cassette(
            '_conftest_fixture_vcn_and_subnets.yml'):
        # create VCN
        vcn_name = util.random_name('cli_lb_test_vcn')
        cidr_block = "10.0.0.0/16"
        vcn_dns_label = util.random_name('vcn', insert_underscore=False)

        create_vcn_details = oci.core.models.CreateVcnDetails()
        create_vcn_details.cidr_block = cidr_block
        create_vcn_details.display_name = vcn_name
        create_vcn_details.compartment_id = os.environ[
            'OCI_CLI_COMPARTMENT_ID']
        create_vcn_details.dns_label = vcn_dns_label

        result = network_client.create_vcn(create_vcn_details)
        vcn_ocid = result.data.id
        assert result.status == 200

        oci.wait_until(network_client,
                       network_client.get_vcn(vcn_ocid),
                       'lifecycle_state',
                       'AVAILABLE',
                       max_wait_seconds=300,
                       max_interval_seconds=WAIT_INTERVAL_SECONDS)

        # create subnet in first AD
        subnet_name = util.random_name('cli_lb_test_subnet')
        cidr_block = "10.0.1.0/24"
        subnet_dns_label = util.random_name('subnet', insert_underscore=False)

        create_subnet_details = oci.core.models.CreateSubnetDetails()
        create_subnet_details.compartment_id = os.environ[
            'OCI_CLI_COMPARTMENT_ID']
        create_subnet_details.availability_domain = util.availability_domain()
        create_subnet_details.display_name = subnet_name
        create_subnet_details.vcn_id = vcn_ocid
        create_subnet_details.cidr_block = cidr_block
        create_subnet_details.dns_label = subnet_dns_label

        result = network_client.create_subnet(create_subnet_details)
        subnet_ocid_1 = result.data.id
        assert result.status == 200

        oci.wait_until(network_client,
                       network_client.get_subnet(subnet_ocid_1),
                       'lifecycle_state',
                       'AVAILABLE',
                       max_wait_seconds=300,
                       max_interval_seconds=WAIT_INTERVAL_SECONDS)

        # create subnet in second AD
        subnet_name = util.random_name('cli_lb_test_subnet')
        cidr_block = "10.0.0.0/24"
        subnet_dns_label = util.random_name('subnet2', insert_underscore=False)

        create_subnet_details = oci.core.models.CreateSubnetDetails()
        create_subnet_details.compartment_id = os.environ[
            'OCI_CLI_COMPARTMENT_ID']
        create_subnet_details.availability_domain = util.second_availability_domain(
        )
        create_subnet_details.display_name = subnet_name
        create_subnet_details.vcn_id = vcn_ocid
        create_subnet_details.cidr_block = cidr_block
        create_subnet_details.dns_label = subnet_dns_label

        result = network_client.create_subnet(create_subnet_details)
        subnet_ocid_2 = result.data.id
        assert result.status == 200

        oci.wait_until(network_client,
                       network_client.get_subnet(subnet_ocid_2),
                       'lifecycle_state',
                       'AVAILABLE',
                       max_wait_seconds=300,
                       max_interval_seconds=WAIT_INTERVAL_SECONDS)

    yield [vcn_ocid, subnet_ocid_1, subnet_ocid_2]

    # For some reason VCR doesn't like that the post-yield stuff here is all in one cassette. Splitting into different cassettes seems to work
    with test_config_container.create_vcr().use_cassette(
            '_conftest_fixture_vcn_and_subnets_delete.yml'):
        # delete VCN and subnets
        network_client.delete_subnet(subnet_ocid_1)

        try:
            oci.wait_until(network_client,
                           network_client.get_subnet(subnet_ocid_1),
                           'lifecycle_state',
                           'TERMINATED',
                           max_wait_seconds=600,
                           max_interval_seconds=WAIT_INTERVAL_SECONDS)
        except oci.exceptions.ServiceError as error:
            if not hasattr(error, 'status') or error.status != 404:
                util.print_latest_exception(error)

        network_client.delete_subnet(subnet_ocid_2)

        try:
            oci.wait_until(network_client,
                           network_client.get_subnet(subnet_ocid_2),
                           'lifecycle_state',
                           'TERMINATED',
                           max_wait_seconds=600,
                           max_interval_seconds=WAIT_INTERVAL_SECONDS)
        except oci.exceptions.ServiceError as error:
            if not hasattr(error, 'status') or error.status != 404:
                util.print_latest_exception(error)

        network_client.delete_vcn(vcn_ocid)
Пример #22
0
def api_gateway_and_deployment(vcn_and_subnet, runner, config_file,
                               config_profile):
    vcn_id = vcn_and_subnet[0]
    subnet_id = vcn_and_subnet[1]

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'functions_application_and_function_fixture.yml'):
        params = [
            'fn', 'application', 'create', '--compartment-id',
            util.COMPARTMENT_ID, '--subnet-ids',
            json.dumps([subnet_id]), '--display-name',
            util.random_name("fnapp", insert_underscore=False)
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        fn_app_id = json.loads(result.output)['data']['id']

        util.wait_until(
            ['fn', 'application', 'get', '--application-id', fn_app_id],
            'ACTIVE',
            max_wait_seconds=300)

        params = [
            'fn', 'function', 'create', '--application-id', fn_app_id,
            '--image', 'phx.ocir.io/apigw/faas/helloworld-func:0.0.14',
            '--memory-in-mbs', '128', '--display-name',
            util.random_name("fnfunc", insert_underscore=False)
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        fn_func_id = json.loads(result.output)['data']['id']

        util.wait_until(['fn', 'function', 'get', '--function-id', fn_func_id],
                        'ACTIVE',
                        max_wait_seconds=300)

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'apigateway_api_gateway_and_deployment_fixture.yml'):

        params = [
            'api-gateway', 'gateway', 'create', '--compartment-id',
            util.COMPARTMENT_ID, '--subnet-id', subnet_id, '--display-name',
            util.random_name("apigateway", insert_underscore=False),
            '--endpoint-type', 'PUBLIC'
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        api_gateway_id = json.loads(result.output)['data']['id']

        util.wait_until(
            ['api-gateway', 'gateway', 'get', '--gateway-id', api_gateway_id],
            'ACTIVE',
            max_wait_seconds=300)

        params = [
            'api-gateway', 'deployment', 'create', '--compartment-id',
            util.COMPARTMENT_ID, '--display-name',
            util.random_name('deployment',
                             insert_underscore=False), '--path-prefix',
            util.random_name('/foo', insert_underscore=False), '--gateway-id',
            api_gateway_id, '--specification',
            build_simple_api_specification('http://www.oracle.com')
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        api_deployment_id = json.loads(result.output)['data']['id']

        util.wait_until([
            'api-gateway', 'deployment', 'get', '--deployment-id',
            api_deployment_id
        ],
                        'ACTIVE',
                        max_wait_seconds=300)

    yield api_gateway_id, api_deployment_id, fn_func_id

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'apigateway_api_gateway_and_deployment_fixture_cleanup.yml'):
        params = [
            'api-gateway', 'deployment', 'delete', '--deployment-id',
            api_deployment_id, '--force'
        ]

        invoke(runner, config_file, config_profile, params)
        util.wait_until([
            'api-gateway', 'deployment', 'get', '--deployment-id',
            api_deployment_id
        ],
                        'DELETED',
                        max_wait_seconds=300)

        params = [
            'api-gateway', 'gateway', 'delete', '--gateway-id', api_gateway_id,
            '--force'
        ]

        invoke(runner, config_file, config_profile, params)
        util.wait_until(
            ['api-gateway', 'gateway', 'get', '--gateway-id', api_gateway_id],
            'DELETED',
            max_wait_seconds=300)

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'functions_application_and_function_fixture_cleanup.yml'):
        params = [
            'fn', 'function', 'delete', '--function-id', fn_func_id, '--force'
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        util.wait_until(['fn', 'function', 'get', '--function-id', fn_func_id],
                        'DELETED',
                        succeed_if_not_found=True,
                        max_wait_seconds=300)

        params = [
            'fn', 'application', 'delete', '--application-id', fn_app_id,
            '--force'
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        util.wait_until(
            ['fn', 'application', 'get', '--application-id', fn_app_id],
            'DELETED',
            succeed_if_not_found=True,
            max_wait_seconds=300)
Пример #23
0
    def subtest_subnet_secondary_ip_operations(self):
        self.set_up_vcn_and_subnet("10.0.0.0/16")
        available_ip_addresses = self.get_ip_addresses_from_cidr("10.0.0.0/16")

        # First we need to launch two instances and get their VNICs. We get two instances
        # so that we can move the secondary private IP around. The instances need to be
        # in the same subnet for the secondary private IP address moves to be valid
        image_id = util.oracle_linux_image()
        shape = 'VM.Standard1.1'

        first_instance_name = util.random_name('cli_test_instance')
        result = self.invoke([
            'compute', 'instance', 'launch', '--compartment-id',
            util.COMPARTMENT_ID, '--availability-domain',
            util.availability_domain(), '--display-name', first_instance_name,
            '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape',
            shape
        ])
        self.first_instance_id = util.find_id_in_response(result.output)

        second_instance_name = util.random_name('cli_test_instance')
        result = self.invoke([
            'compute', 'instance', 'launch', '--compartment-id',
            util.COMPARTMENT_ID, '--availability-domain',
            util.availability_domain(), '--display-name', second_instance_name,
            '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape',
            shape
        ])
        self.second_instance_id = util.find_id_in_response(result.output)

        util.wait_until([
            'compute', 'instance', 'get', '--instance-id',
            self.first_instance_id
        ],
                        'RUNNING',
                        max_wait_seconds=600)
        util.wait_until([
            'compute', 'instance', 'get', '--instance-id',
            self.second_instance_id
        ],
                        'RUNNING',
                        max_wait_seconds=600)

        vnics_on_instance_result = self.invoke([
            'compute', 'instance', 'list-vnics', '--instance-id',
            self.first_instance_id
        ])
        vnics = json.loads(vnics_on_instance_result.output)
        first_vnic_id = vnics['data'][0]['id']
        first_vnic_primary_private_ip = vnics['data'][0]['private-ip']

        # So we don't try and re-use the IP address unintentionally
        available_ip_addresses.remove(first_vnic_primary_private_ip)

        vnics_on_instance_result = self.invoke([
            'compute', 'instance', 'list-vnics', '--instance-id',
            self.second_instance_id
        ])
        vnics = json.loads(vnics_on_instance_result.output)
        second_vnic_id = vnics['data'][0]['id']
        second_vnic_primary_private_ip = vnics['data'][0]['private-ip']
        available_ip_addresses.remove(second_vnic_primary_private_ip)

        # Running the assign command against a non-existent VNIC fails
        fudged_vnic_id = self.fudge_ocid(first_vnic_id)
        result = self.invoke([
            'network', 'vnic', 'assign-private-ip', '--vnic-id', fudged_vnic_id
        ])
        self.assertNotEqual(0, result.exit_code)
        assert 'Either VNIC with ID {} does not exist or you are not authorized to access it.'.format(
            fudged_vnic_id) in result.output

        # Most basic call with VNIC only - in this case we assign the IP automatically
        result = self.invoke([
            'network', 'vnic', 'assign-private-ip', '--vnic-id', first_vnic_id
        ])
        first_secondary_private_ip_data = json.loads(result.output)['data']
        first_secondary_private_ip_id = first_secondary_private_ip_data['id']
        first_secondary_private_ip_address = first_secondary_private_ip_data[
            'ip-address']
        available_ip_addresses.remove(first_secondary_private_ip_address)

        # Assign a new secondary IP with all parameters given
        second_secondary_private_ip_address = available_ip_addresses.pop()
        result = self.invoke([
            'network',
            'vnic',
            'assign-private-ip',
            '--vnic-id',
            first_vnic_id,
            '--ip-address',
            second_secondary_private_ip_address,
            '--display-name',
            'My second secondary',
            '--hostname-label',
            'secondary-1',

            # The --unassign-if-already-assigned should not have an impact as the IP address doesn't exist
            '--unassign-if-already-assigned'
        ])
        second_secondary_private_ip_data = json.loads(result.output)['data']
        second_secondary_private_ip_id = second_secondary_private_ip_data['id']
        self.assertEqual(second_secondary_private_ip_address,
                         second_secondary_private_ip_data['ip-address'])

        # Checkpoint by listing the private IPs. Our created secondaries should be there
        result = self.invoke(
            ['network', 'private-ip', 'list', '--vnic-id', first_vnic_id])
        private_ips = json.loads(result.output)['data']

        self.assertEqual(3, len(private_ips))
        self.find_private_ip_and_do_assertions(
            private_ips, first_secondary_private_ip_id,
            first_secondary_private_ip_address, None, None)
        self.find_private_ip_and_do_assertions(
            private_ips, second_secondary_private_ip_id,
            second_secondary_private_ip_address, 'My second secondary',
            'secondary-1')

        # Trying to assign the same private IP to the same VNIC is a no-op
        result = self.invoke([
            'network', 'vnic', 'assign-private-ip', '--vnic-id', first_vnic_id,
            '--ip-address', first_secondary_private_ip_address
        ])
        assert 'Taking no action as IP address {} is already assigned to VNIC {}'.format(
            first_secondary_private_ip_address, first_vnic_id) in result.output

        # Trying to move a primary IP fails
        result = self.invoke([
            'network', 'vnic', 'assign-private-ip', '--vnic-id', first_vnic_id,
            '--ip-address', second_vnic_primary_private_ip,
            '--unassign-if-already-assigned'
        ])
        self.assertNotEqual(0, result.exit_code)

        result = self.invoke([
            'network', 'vnic', 'assign-private-ip', '--vnic-id',
            second_vnic_id, '--ip-address', first_vnic_primary_private_ip,
            '--unassign-if-already-assigned'
        ])
        self.assertNotEqual(0, result.exit_code)

        # Trying to move an existing IP address without saying "unassign" fails
        result = self.invoke([
            'network', 'vnic', 'assign-private-ip', '--vnic-id',
            second_vnic_id, '--ip-address', first_secondary_private_ip_address
        ])
        target_message = 'IP address {} is already assigned to a different VNIC: {}. To reassign it, re-run this command with the --unassign-if-already-assigned option'.format(
            first_secondary_private_ip_address, first_vnic_id)
        assert target_message in result.output
        self.assertNotEqual(0, result.exit_code)

        # Move the secondary IP and also update some information
        result = self.invoke([
            'network', 'vnic', 'assign-private-ip', '--vnic-id',
            second_vnic_id, '--ip-address', first_secondary_private_ip_address,
            '--display-name', 'My first secondary', '--hostname-label',
            'moved-first-secondary-1', '--unassign-if-already-assigned'
        ])
        private_ip_data_after_move = json.loads(result.output)['data']
        self.assertEqual(first_secondary_private_ip_id,
                         private_ip_data_after_move['id'])
        self.assertEqual(first_secondary_private_ip_address,
                         private_ip_data_after_move['ip-address'])
        self.assertEqual('My first secondary',
                         private_ip_data_after_move['display-name'])
        self.assertEqual('moved-first-secondary-1',
                         private_ip_data_after_move['hostname-label'])

        # List each VNIC - we expect 2 results per list call (1 x primary private and 1 x secondary private per VNIC) after moving stuff around
        result = self.invoke(
            ['network', 'private-ip', 'list', '--vnic-id', first_vnic_id])
        private_ips = json.loads(result.output)['data']
        self.assertEqual(2, len(private_ips))
        self.ensure_private_ip_record_not_present(
            private_ips, first_secondary_private_ip_id)
        self.find_private_ip_and_do_assertions(
            private_ips, second_secondary_private_ip_id,
            second_secondary_private_ip_address, 'My second secondary',
            'secondary-1')

        result = self.invoke(
            ['network', 'private-ip', 'list', '--vnic-id', second_vnic_id])
        private_ips = json.loads(result.output)['data']
        self.assertEqual(2, len(private_ips))
        self.ensure_private_ip_record_not_present(
            private_ips, second_secondary_private_ip_id)
        self.find_private_ip_and_do_assertions(
            private_ips, first_secondary_private_ip_id,
            first_secondary_private_ip_address, 'My first secondary',
            'moved-first-secondary-1')

        # Listing by subnet should give us 4 records (2 x primary private and 2 x secondary private) as it queries across all VNICs in the subnet
        result = self.invoke(
            ['network', 'private-ip', 'list', '--subnet-id', self.subnet_ocid])
        private_ips = json.loads(result.output)['data']
        self.assertEqual(4, len(private_ips))
        self.find_private_ip_and_do_assertions(
            private_ips, first_secondary_private_ip_id,
            first_secondary_private_ip_address, 'My first secondary',
            'moved-first-secondary-1')
        self.find_private_ip_and_do_assertions(
            private_ips, second_secondary_private_ip_id,
            second_secondary_private_ip_address, 'My second secondary',
            'secondary-1')

        # Update the display name and hostname
        result = self.invoke([
            'network', 'private-ip', 'update', '--private-ip-id',
            second_secondary_private_ip_id, '--display-name',
            'batman display name', '--hostname-label', 'batman-secondary-1'
        ])
        updated_private_ip_info = json.loads(result.output)['data']
        self.assertEqual(second_secondary_private_ip_id,
                         updated_private_ip_info['id'])
        self.assertEqual(second_secondary_private_ip_address,
                         updated_private_ip_info['ip-address'])
        self.assertEqual(first_vnic_id, updated_private_ip_info['vnic-id'])
        self.assertEqual('batman display name',
                         updated_private_ip_info['display-name'])
        self.assertEqual('batman-secondary-1',
                         updated_private_ip_info['hostname-label'])

        # Do a get and confirm the information which we receive
        result = self.invoke([
            'network', 'private-ip', 'get', '--private-ip-id',
            second_secondary_private_ip_id
        ])
        private_ip_info_from_get = json.loads(result.output)['data']
        self.assertEqual(second_secondary_private_ip_id,
                         private_ip_info_from_get['id'])
        self.assertEqual(second_secondary_private_ip_address,
                         private_ip_info_from_get['ip-address'])
        self.assertEqual(first_vnic_id, private_ip_info_from_get['vnic-id'])
        self.assertEqual('batman display name',
                         private_ip_info_from_get['display-name'])
        self.assertEqual('batman-secondary-1',
                         private_ip_info_from_get['hostname-label'])

        # Running the unassign command against a non-existent VNIC fails
        # Listing by VNIC should give us one record (the primary private IP) per call
        result = self.invoke([
            'network', 'vnic', 'unassign-private-ip', '--vnic-id',
            fudged_vnic_id, '--ip-address', second_secondary_private_ip_address
        ])
        self.assertNotEqual(0, result.exit_code)
        # The error message from the service is not being sent correctly to the CLI. The Error code is correct.
        # This needs to be investigated
        # assert 'Either VNIC with ID {} does not exist or you are not authorized to access it.'.format(fudged_vnic_id) in result.output

        # Unassigning an IP address not in the VNIC fails
        result = self.invoke([
            'network', 'vnic', 'unassign-private-ip', '--vnic-id',
            second_vnic_id, '--ip-address', second_secondary_private_ip_address
        ])
        assert 'IP address {} was not found on VNIC {}'.format(
            second_secondary_private_ip_address,
            second_vnic_id) in result.output
        self.assertNotEqual(0, result.exit_code)

        # Unassigning a primary private IP address is not supported
        result = self.invoke([
            'network', 'vnic', 'unassign-private-ip', '--vnic-id',
            second_vnic_id, '--ip-address', second_vnic_primary_private_ip
        ])
        assert 'Taking no action as {} is the primary private IP on VNIC {}'.format(
            second_vnic_primary_private_ip, second_vnic_id) in result.output
        self.assertNotEqual(0, result.exit_code)

        # Unassign a secondary private IP
        result = self.invoke([
            'network', 'vnic', 'unassign-private-ip', '--vnic-id',
            second_vnic_id, '--ip-address', first_secondary_private_ip_address
        ])
        assert 'Unassigned IP address {} from VNIC {}'.format(
            first_secondary_private_ip_address,
            second_vnic_id) in result.output

        # Delete a secondary private IP (by its OCID)
        result = self.invoke([
            'network', 'private-ip', 'delete', '--private-ip-id',
            second_secondary_private_ip_id, '--force'
        ])
        self.assertEqual(0, result.exit_code)

        # Listing by VNIC should give us one record (the primary private IP) per call
        result = self.invoke(
            ['network', 'private-ip', 'list', '--vnic-id', first_vnic_id])
        private_ips = json.loads(result.output)['data']
        self.assertEqual(1, len(private_ips))
        self.assertTrue(private_ips[0]['is-primary'])

        result = self.invoke(
            ['network', 'private-ip', 'list', '--vnic-id', second_vnic_id])
        private_ips = json.loads(result.output)['data']
        self.assertEqual(1, len(private_ips))
        self.assertTrue(private_ips[0]['is-primary'])

        # Listing by subnet should give us two records (the primary private IP for each VNIC)
        result = self.invoke(
            ['network', 'private-ip', 'list', '--subnet-id', self.subnet_ocid])
        private_ips = json.loads(result.output)['data']
        self.assertEqual(2, len(private_ips))
        self.assertTrue(private_ips[0]['is-primary'])
        self.assertTrue(private_ips[1]['is-primary'])
Пример #24
0
def backend_set(runner, config_file, config_profile, load_balancer):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_fixture_backend_set.yml'):
        backend_set_name = util.random_name('cli_lb_backend_set')

        params = [
            'backend-set',
            'create',
            '--name',
            backend_set_name,
            '--policy',
            'ROUND_ROBIN',
            '--load-balancer-id',
            load_balancer,
            '--health-checker-protocol',
            'HTTP',
            '--health-checker-return-code',
            '200',
            '--health-checker-url-path',
            '/healthcheck',
            '--health-checker-interval-in-ms',
            '60000',  # 1 minute
            '--session-persistence-cookie-name',
            '*',
            '--session-persistence-disable-fallback',
            'false'
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # create lb returns work request
        response = json.loads(result.output)
        work_request_ocid = response['opc-work-request-id']

        get_work_request_result = util.wait_until(
            [
                'lb', 'work-request', 'get', '--work-request-id',
                work_request_ocid
            ],
            'SUCCEEDED',
            max_wait_seconds=DEFAULT_WAIT_TIME)
        util.validate_response(get_work_request_result)

        yield backend_set_name

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_fixture_backend_set_delete.yml'):
        params = [
            'backend-set', 'delete', '--load-balancer-id', load_balancer,
            '--backend-set-name', backend_set_name, '--force'
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        response = json.loads(result.output)
        work_request_ocid = response['opc-work-request-id']

        get_work_request_result = util.wait_until(
            [
                'lb', 'work-request', 'get', '--work-request-id',
                work_request_ocid
            ],
            'SUCCEEDED',
            max_wait_seconds=DEFAULT_WAIT_TIME)
        util.validate_response(get_work_request_result)
Пример #25
0
def test_load_balancer_tagging(runner, config_file, config_profile,
                               vcn_and_subnets, key_pair_files):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_tagging.yml'):
        subnet_ocid_1 = vcn_and_subnets[1]
        subnet_ocid_2 = vcn_and_subnets[2]

        # Setup the tag inputs
        tag_names_to_values = {}
        for t in tag_data_container.tags:
            tag_names_to_values[t.name] = 'somevalue {}'.format(t.name)
        tag_data_container.write_defined_tags_to_file(
            os.path.join('tests', 'temp', 'defined_tags_lb.json'),
            tag_data_container.tag_namespace, tag_names_to_values)

        # Create the LB with tags
        lb_name = util.random_name('cli_lb')
        params = [
            'load-balancer', 'create', '-c', util.COMPARTMENT_ID,
            '--display-name', lb_name, '--shape-name', '100Mbps',
            '--subnet-ids', '["{}","{}"]'.format(subnet_ocid_1, subnet_ocid_2),
            '--freeform-tags',
            'file://tests/resources/tagging/freeform_tags_2.json',
            '--defined-tags', 'file://tests/temp/defined_tags_lb.json',
            '--wait-for-state', 'SUCCEEDED', '--wait-interval-seconds',
            util.WAIT_INTERVAL_SECONDS
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result, json_response_expected=False)
        load_balancer = util.get_json_from_mixed_string(result.output)
        id = load_balancer['data']['id']

        try:
            # Make sure the tags are in the results
            assert "tagOne" in load_balancer['data']['freeform-tags']
            assert "value three" == load_balancer['data']['freeform-tags'][
                "tagOne"]
            assert "cli_tag_ns_320683" in load_balancer['data']['defined-tags']
            assert "cli_tag_320683" in load_balancer['data']['defined-tags'][
                'cli_tag_ns_320683']
            assert "cli_tag_320683" in load_balancer['data']['defined-tags'][
                'cli_tag_ns_320683']
            assert "somevalue cli_tag_320683" == load_balancer['data'][
                'defined-tags']['cli_tag_ns_320683']['cli_tag_320683']

            # Get the LB and make sure the tags are in the results
            params = ['load-balancer', 'get', '--load-balancer-id', id]
            result = invoke(runner, config_file, config_profile, params)
            util.validate_response(result, json_response_expected=False)
            load_balancer = util.get_json_from_mixed_string(result.output)
            id = load_balancer['data']['id']
            assert "tagOne" in load_balancer['data']['freeform-tags']
            assert "value three" == load_balancer['data']['freeform-tags'][
                "tagOne"]
            assert "cli_tag_ns_320683" in load_balancer['data']['defined-tags']
            assert "cli_tag_320683" in load_balancer['data']['defined-tags'][
                'cli_tag_ns_320683']
            assert "cli_tag_320683" in load_balancer['data']['defined-tags'][
                'cli_tag_ns_320683']
            assert "somevalue cli_tag_320683" == load_balancer['data'][
                'defined-tags']['cli_tag_ns_320683']['cli_tag_320683']

            # List the LB and check that the tags are in the result
            params = ['load-balancer', 'list', '-c', util.COMPARTMENT_ID]
            result = invoke(runner, config_file, config_profile, params)
            util.validate_response(result, json_response_expected=False)
            list_result = util.get_json_from_mixed_string(result.output)
            if len(list_result['data']) == 1:
                load_balancer = list_result['data'][0]
                assert "tagOne" in load_balancer['freeform-tags']
                assert "value three" == load_balancer['freeform-tags'][
                    "tagOne"]
                assert "cli_tag_ns_320683" in load_balancer['defined-tags']
                assert "cli_tag_320683" in load_balancer['defined-tags'][
                    'cli_tag_ns_320683']
                assert "cli_tag_320683" in load_balancer['defined-tags'][
                    'cli_tag_ns_320683']
                assert "somevalue cli_tag_320683" == load_balancer[
                    'defined-tags']['cli_tag_ns_320683']['cli_tag_320683']

            # Update the display name for the lb.
            params = [
                'load-balancer', 'update', '--load-balancer-id', id,
                '--display-name', 'new' + lb_name, '--wait-for-state',
                'SUCCEEDED'
            ]
            result = invoke(runner, config_file, config_profile, params)
            util.validate_response(result, json_response_expected=False)

            params = ['load-balancer', 'get', '--load-balancer-id', id]
            result = invoke(runner, config_file, config_profile, params)
            util.validate_response(result, json_response_expected=False)
            load_balancer = util.get_json_from_mixed_string(result.output)
            assert "new" + lb_name == load_balancer['data']['display-name']

            # Setup the tag inputs
            tag_names_to_values = {}
            for t in tag_data_container.tags:
                tag_names_to_values[t.name] = 'newvalue {}'.format(t.name)
            tag_data_container.write_defined_tags_to_file(
                os.path.join('tests', 'temp', 'defined_tags_lb.json'),
                tag_data_container.tag_namespace, tag_names_to_values)

            # Update the tags for the lb.
            params = [
                'load-balancer', 'update', '--load-balancer-id', id,
                '--freeform-tags',
                'file://tests/resources/tagging/freeform_tags_1.json',
                '--defined-tags', 'file://tests/temp/defined_tags_lb.json',
                '--wait-for-state', 'SUCCEEDED', '--force'
            ]
            result = invoke(runner, config_file, config_profile, params)
            util.validate_response(result, json_response_expected=False)

            params = ['load-balancer', 'get', '--load-balancer-id', id]
            result = invoke(runner, config_file, config_profile, params)
            util.validate_response(result, json_response_expected=False)
            load_balancer = util.get_json_from_mixed_string(result.output)
            assert "tagOne" in load_balancer['data']['freeform-tags']
            assert "tag_Two" in load_balancer['data']['freeform-tags']
            assert "value1" == load_balancer['data']['freeform-tags']["tagOne"]
            assert "value two" == load_balancer['data']['freeform-tags'][
                "tag_Two"]
            assert "cli_tag_ns_320683" in load_balancer['data']['defined-tags']
            assert "cli_tag_320683" in load_balancer['data']['defined-tags'][
                'cli_tag_ns_320683']
            assert "newvalue cli_tag_320683" == load_balancer['data'][
                'defined-tags']['cli_tag_ns_320683']['cli_tag_320683']

        finally:
            # Delete the LB
            params = [
                'load-balancer', 'delete', '--load-balancer-id', id, '--force',
                '--wait-for-state', 'SUCCEEDED', '--wait-interval-seconds',
                util.WAIT_INTERVAL_SECONDS
            ]
            result = invoke(runner, config_file, config_profile, params)
            _validate_work_request_result(result, id)
Пример #26
0
def vcn_and_subnet(runner, config_file, config_profile, network_client):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'apigateway_vcn_and_subnet_fixture.yml'):
        # create VCN
        vcn_name = util.random_name('cli_db_test_vcn')
        cidr_block = "10.0.0.0/16"
        vcn_dns_label = util.random_name('vcn', insert_underscore=False)

        create_vcn_details = oci.core.models.CreateVcnDetails()
        create_vcn_details.cidr_block = cidr_block
        create_vcn_details.display_name = vcn_name
        create_vcn_details.compartment_id = util.COMPARTMENT_ID
        create_vcn_details.dns_label = vcn_dns_label

        result = network_client.create_vcn(create_vcn_details)
        vcn_ocid = result.data.id
        assert result.status == 200

        oci.wait_until(network_client,
                       network_client.get_vcn(vcn_ocid),
                       'lifecycle_state',
                       'AVAILABLE',
                       max_wait_seconds=300)

        # create subnet in first AD
        subnet_name = util.random_name('python_cli_test_subnet')
        cidr_block = "10.0.1.0/24"
        subnet_dns_label = util.random_name('subnet',
                                            insert_underscore=False) + '1'

        create_subnet_details = oci.core.models.CreateSubnetDetails()
        create_subnet_details.compartment_id = util.COMPARTMENT_ID
        create_subnet_details.display_name = subnet_name
        create_subnet_details.vcn_id = vcn_ocid
        create_subnet_details.cidr_block = cidr_block
        create_subnet_details.dns_label = subnet_dns_label

        result = network_client.create_subnet(create_subnet_details)
        subnet_ocid = result.data.id
        assert result.status == 200

        oci.wait_until(network_client,
                       network_client.get_subnet(subnet_ocid),
                       'lifecycle_state',
                       'AVAILABLE',
                       max_wait_seconds=300)

    yield vcn_ocid, subnet_ocid

    # # this code does not run inside the vcr_fixture because it is outside any test function
    # # thus we are explicitly creating a separate cassette for it here
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'apigateway_vcn_and_subnet_fixture_cleanup.yml'):
        # Sometimes we can't delete the subnet straight after the mount target because some VNIC is still
        # hanging around. If we get a conflict, try a few times before bailing out
        attempts = 0
        while attempts < 5:
            try:
                network_client.delete_subnet(subnet_ocid)
                test_config_container.do_wait(
                    network_client,
                    network_client.get_subnet(subnet_ocid),
                    'lifecycle_state',
                    'TERMINATED',
                    max_wait_seconds=600,
                    succeed_on_not_found=True)
                break
            except oci.exceptions.ServiceError as e:
                attempts += 1
                if e.status == 409 and attempts < 5:
                    time.sleep(5)
                # succeed_on_not_found doesn't work as expected
                elif e.status == 404:
                    break
                else:
                    raise

        network_client.delete_vcn(vcn_ocid)
Пример #27
0
def _do_backend_and_backend_set_waiters(runner, load_balancer_id, config_file,
                                        config_profile):
    backend_set_name = util.random_name('cli_lb_backend_set')

    params = [
        'backend-set',
        'create',
        '--name',
        backend_set_name,
        '--policy',
        'ROUND_ROBIN',
        '--load-balancer-id',
        load_balancer_id,
        '--health-checker-protocol',
        'HTTP',
        '--health-checker-return-code',
        '200',
        '--health-checker-url-path',
        '/healthcheck',
        '--health-checker-interval-in-ms',
        '60000',  # 1 minute
        '--session-persistence-cookie-name',
        '*',
        '--session-persistence-disable-fallback',
        'false',
        '--wait-for-state',
        'SUCCEEDED'
    ]
    result = invoke(runner, config_file, config_profile, params)
    _validate_work_request_result(result, load_balancer_id)

    ip_address = '10.0.0.10'
    port = '80'
    params = [
        'backend', 'create', '--ip-address', ip_address, '--port', port,
        '--load-balancer-id', load_balancer_id, '--backend-set-name',
        backend_set_name, '--weight', '3', '--wait-for-state', 'SUCCEEDED'
    ]
    result = invoke(runner, config_file, config_profile, params)
    _validate_work_request_result(result, load_balancer_id)

    backend_name = "{}:{}".format(ip_address, port)
    params = [
        'backend', 'update', '--load-balancer-id', load_balancer_id,
        '--backend-set-name', backend_set_name, '--backend-name', backend_name,
        '--weight', '2', '--offline', 'true', '--backup', 'false', '--drain',
        'false', '--wait-for-state', 'SUCCEEDED'
    ]
    result = invoke(runner, config_file, config_profile, params)
    _validate_work_request_result(result, load_balancer_id)

    params = [
        'backend', 'delete', '--load-balancer-id', load_balancer_id,
        '--backend-set-name', backend_set_name, '--backend-name', backend_name,
        '--force', '--wait-for-state', 'SUCCEEDED'
    ]
    result = invoke(runner, config_file, config_profile, params)
    _validate_work_request_result(result, load_balancer_id)

    _do_listener_waiters(runner, load_balancer_id, backend_set_name,
                         config_file, config_profile)

    params = [
        'backend-set', 'delete', '--load-balancer-id', load_balancer_id,
        '--backend-set-name', backend_set_name, '--force', '--wait-for-state',
        'SUCCEEDED'
    ]
    result = invoke(runner, config_file, config_profile, params)
    _validate_work_request_result(result, load_balancer_id)
Пример #28
0
def test_oce_node_pool(runner, config_file, config_profile, oce_cluster):
    # Set-up of cross-connect group
    node_pool_id = None
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_oce_fixture_node_pool.yml'):
        # Find the node-pool options
        params = [
            'ce', 'node-pool-options', 'get', '--node-pool-option-id', 'all'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        # Pick the first version in the response to be used for the test cluster
        kub_version = json.loads(
            result.output)['data']['kubernetes-versions'][0]
        node_shape = "VM.Standard1.1"
        node_image = "Oracle-Linux-7.4"
        # Get the return values from the cluster generator
        cluster_id, subnet_id1, subnet_id2, subnet_id3 = oce_cluster
        node_pool_subnet_ids = '["' + subnet_id1 + '", "' + subnet_id2 + '", "' + subnet_id3 + '"]'

        # Create a node pool
        np_name = util.random_name('cli_test_oce_np')
        params = [
            'ce', 'node-pool', 'create', '--name', np_name, '--cluster-id',
            cluster_id, '--compartment-id', util.COMPARTMENT_ID,
            '--kubernetes-version', kub_version, '--node-image-name',
            node_image, '--node-shape', node_shape, '--subnet-ids',
            node_pool_subnet_ids
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        # Create node-pool returns work request. Get work request response to obtain the node-pool OCID.
        response = json.loads(result.output)
        work_request_id = response['opc-work-request-id']
        get_work_request_result = util.wait_until(
            [
                'ce', 'work-request', 'get', '--work-request-id',
                work_request_id
            ],
            'SUCCEEDED',
            state_property_name='status',
            max_wait_seconds=PROVISIONING_TIME_SEC)
        util.validate_response(get_work_request_result)
        node_pool_id = json.loads(get_work_request_result.output
                                  )['data']['resources'][0]['identifier']

        # Get a node pool from node pool id
        params = ['ce', 'node-pool', 'get', '--node-pool-id', node_pool_id]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # List the node pool
        params = [
            'ce', 'node-pool', 'list', '--compartment-id', util.COMPARTMENT_ID
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        assert len(json.loads(result.output)['data']) > 0

        # Update the node pool
        np_name = util.random_name('cli_test_oce_node_pool')
        params = [
            'ce', 'node-pool', 'update', '--node-pool-id', node_pool_id,
            '--name', np_name
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        # Update node pool returns work request. Get work request response to check the command succeeded
        response = json.loads(result.output)
        work_request_id = response['opc-work-request-id']
        get_work_request_result = util.wait_until(
            [
                'ce', 'work-request', 'get', '--work-request-id',
                work_request_id
            ],
            'SUCCEEDED',
            state_property_name='status',
            max_wait_seconds=PROVISIONING_TIME_SEC)
        util.validate_response(get_work_request_result)

    # Tear down the node pool resource
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_oce_fixture_node_pool_delete.yml'):
        # Delete the node pool
        params = [
            'ce', 'node-pool', 'delete', '--node-pool-id', node_pool_id,
            '--force'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        # Delete node pool returns work request. Get work request response to check the command succeeded
        response = json.loads(result.output)
        work_request_id = response['opc-work-request-id']
        get_work_request_result = util.wait_until(
            [
                'ce', 'work-request', 'get', '--work-request-id',
                work_request_id
            ],
            'SUCCEEDED',
            state_property_name='status',
            max_wait_seconds=DELETION_TIME_SEC)
        util.validate_response(get_work_request_result)
Пример #29
0
def test_boot_volume_clone_backup(network_resources):
    with test_config_container.create_vcr(cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette('boot_volume_test_boot_volume_clone_backup.yml'):
        boot_volume_id = None
        instance_ocid = None
        backup_boot_volume_id = None
        cloned_boot_volume_id = None
        backup_id = None
        try:
            instance_name = util.random_name('boot_vol_instance')
            image_id = util.oracle_linux_image()
            shape = 'VM.Standard1.1'
            hostname_label = util.random_name('bootvolinst', insert_underscore=False)
            boot_volume_size_in_gbs = '51'

            result = invoke([
                'compute', 'instance', 'launch',
                '--compartment-id', util.COMPARTMENT_ID,
                '--availability-domain', util.availability_domain(),
                '--display-name', instance_name,
                '--subnet-id', network_resources[1],
                '--image-id', image_id,
                '--shape', shape,
                '--hostname-label', hostname_label,
                '--boot-volume-size-in-gbs', boot_volume_size_in_gbs,
                '--wait-for-state', 'RUNNING',
                '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS
            ])
            util.validate_response(result, json_response_expected=False)
            instance_data = util.get_json_from_mixed_string(result.output)['data']
            instance_ocid = instance_data['id']
            assert 'image' == instance_data['source-details']['source-type']
            assert image_id == instance_data['source-details']['image-id']

            result = invoke([
                'compute', 'boot-volume-attachment', 'list',
                '-c', util.COMPARTMENT_ID,
                '--availability-domain', util.availability_domain(),
                '--instance-id', instance_data['id']
            ])
            util.validate_response(result)
            parsed_result = json.loads(result.output)
            assert len(parsed_result['data']) == 1
            boot_volume_id = parsed_result['data'][0]['boot-volume-id']

            result = invoke([
                'bv', 'boot-volume', 'get',
                '--boot-volume-id', boot_volume_id
            ])
            util.validate_response(result)
            parsed_result = json.loads(result.output)
            boot_volume_size_in_gbs = parsed_result['data']['size-in-gbs']
            assert boot_volume_size_in_gbs == int(boot_volume_size_in_gbs)

            result = invoke([
                'compute', 'instance', 'terminate',
                '--instance-id', instance_ocid,
                '--wait-for-state', 'TERMINATED',
                '--preserve-boot-volume', 'true',
                '--force'
            ])
            util.validate_response(result, json_response_expected=False)
            instance_ocid = None

            # Since we preserved the volume it should still be available
            result = invoke(['bv', 'boot-volume', 'get', '--boot-volume-id', boot_volume_id])
            util.validate_response(result)
            parsed_result = json.loads(result.output)
            assert util.availability_domain() == parsed_result['data']['availability-domain']
            assert 'AVAILABLE' == parsed_result['data']['lifecycle-state']
            assert image_id == parsed_result['data']['image-id']
            size_in_gbs = int(parsed_result['data']['size-in-gbs'])

            new_size_in_gbs = size_in_gbs + 10

            # Resize boot volume to new_size_in_gbs
            result = invoke(['bv', 'boot-volume', 'update', '--boot-volume-id', boot_volume_id,
                             '--size-in-gbs', str(new_size_in_gbs),
                             '--wait-for-state', 'AVAILABLE',
                             '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS])
            util.validate_response(result, json_response_expected=False)

            # Since we preserved the volume it should still be available
            result = invoke(['bv', 'boot-volume', 'get', '--boot-volume-id', boot_volume_id])
            util.validate_response(result)
            parsed_result = json.loads(result.output)
            assert 'AVAILABLE' == parsed_result['data']['lifecycle-state']
            assert new_size_in_gbs == int(parsed_result['data']['size-in-gbs'])

            # Take a backup
            result = invoke(['bv', 'boot-volume-backup', 'create', '--boot-volume-id', boot_volume_id,
                             '--wait-for-state', 'AVAILABLE',
                             '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS])
            util.validate_response(result, json_response_expected=False)
            parsed_result = util.get_json_from_mixed_string(result.output)
            assert boot_volume_id == parsed_result['data']['boot-volume-id']
            assert image_id == parsed_result['data']['image-id']
            assert 'AVAILABLE' == parsed_result['data']['lifecycle-state']
            backup_id = parsed_result['data']['id']

            # Boot Volume Create Error cases

            # Error 1: No option specified
            result = invoke(['bv', 'boot-volume', 'create',
                             '--wait-for-state', 'AVAILABLE',
                             '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS])
            assert "An empty boot volume cannot be created. Please specify either --boot-volume-backup-id, --source-boot-volume-id or --source-volume-replica-id" in result.output

            # Error 2: Both options specified
            result = invoke(['bv', 'boot-volume', 'create',
                             '--source-boot-volume-id', boot_volume_id[0],
                             '--boot-volume-backup-id', boot_volume_id[0],
                             '--wait-for-state', 'AVAILABLE',
                             '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS])
            assert "You can only specify one of either --source-boot-volume-id, --boot-volume-backup-id or --source-volume-replica-id option" in result.output

            # Clone the boot volume (Error 1: Invalid Boot Volume ID)
            result = invoke(['bv', 'boot-volume', 'create',
                             '--source-boot-volume-id', boot_volume_id[0],
                             '--wait-for-state', 'AVAILABLE',
                             '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS])
            util.validate_service_error(result, error_message="InvalidParameter")

            backup_policy_ids = get_backup_policy_ids()
            create_new_size_in_gbs = new_size_in_gbs + 10

            # Clone the boot volume with bronze backup policy and larger size
            result = invoke(['bv', 'boot-volume', 'create',
                             '--source-boot-volume-id', boot_volume_id,
                             '--backup-policy-id', backup_policy_ids["bronze"],
                             '--wait-for-state', 'AVAILABLE',
                             '--size-in-gbs', str(create_new_size_in_gbs),
                             '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS])
            util.validate_response(result, json_response_expected=False)
            parsed_result = util.get_json_from_mixed_string(result.output)
            assert util.availability_domain() == parsed_result['data']['availability-domain']
            assert 'AVAILABLE' == parsed_result['data']['lifecycle-state']
            assert image_id == parsed_result['data']['image-id']
            assert create_new_size_in_gbs == int(parsed_result['data']['size-in-gbs'])
            cloned_boot_volume_id = parsed_result['data']['id']

            # Verify the backup policy
            result = invoke(['bv', 'volume-backup-policy-assignment',
                             'get-volume-backup-policy-asset-assignment',
                             '--asset-id', cloned_boot_volume_id])
            util.validate_response(result)
            parsed_result = json.loads(result.output)
            backup_policy_assignment_id = parsed_result["data"][0]["id"]
            assert parsed_result["data"][0]["policy-id"] == backup_policy_ids["bronze"]

            # Remove backup policy
            result = invoke(['bv', 'volume-backup-policy-assignment',
                             'delete', '--policy-assignment-id', backup_policy_assignment_id, '--force'])
            util.validate_response(result)

            # Change backup policy to silver
            result = invoke(['bv', 'volume-backup-policy-assignment', 'create',
                             '--asset-id', cloned_boot_volume_id,
                             '--policy-id', backup_policy_ids['silver']])
            util.validate_response(result)
            parsed_result = json.loads(result.output)
            backup_policy_assignment_id = parsed_result["data"]["id"]
            assert parsed_result["data"]["policy-id"] == backup_policy_ids["silver"]

            # Remove the backup policy
            result = invoke(['bv', 'volume-backup-policy-assignment',
                             'delete', '--policy-assignment-id', backup_policy_assignment_id, '--force'])
            util.validate_response(result)

            # We can now launch an instance using that boot volume
            result = invoke([
                'compute', 'instance', 'launch',
                '--compartment-id', util.COMPARTMENT_ID,
                '--availability-domain', util.availability_domain(),
                '--display-name', instance_name,
                '--subnet-id', network_resources[1],
                '--shape', shape,
                '--hostname-label', hostname_label,
                '--source-boot-volume-id', cloned_boot_volume_id,
                '--wait-for-state', 'RUNNING',
                '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS
            ])
            util.validate_response(result, json_response_expected=False)
            instance_data = util.get_json_from_mixed_string(result.output)['data']
            instance_ocid = instance_data['id']
            assert 'bootVolume' == instance_data['source-details']['source-type']
            assert cloned_boot_volume_id == instance_data['source-details']['boot-volume-id']

            clean_up_instances(instance_ocid)
            cloned_boot_volume_id = None
            instance_ocid = None

            # Delete existing boot volume
            clean_up_boot_volume(boot_volume_id)
            boot_volume_id = None

            # Create boot volume from backup (Error 1: Invalid Backup Volume ID)
            result = invoke(['bv', 'boot-volume', 'create',
                             '--boot-volume-backup-id', backup_id[0],
                             '--availability-domain', util.availability_domain(),
                             '--wait-for-state', 'AVAILABLE',
                             '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS])
            util.validate_service_error(result, error_message="InvalidParameter")

            # Create boot volume from backup (Error 2: Availability domain not specified)
            result = invoke(['bv', 'boot-volume', 'create',
                             '--boot-volume-backup-id', backup_id,
                             '--wait-for-state', 'AVAILABLE',
                             '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS])
            assert "An availability domain must be specified when restoring a boot volume from backup" in result.output

            # Create boot volume from backup
            result = invoke(['bv', 'boot-volume', 'create',
                             '--boot-volume-backup-id', backup_id,
                             '--availability-domain', util.availability_domain(),
                             '--wait-for-state', 'AVAILABLE',
                             '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS])
            util.validate_response(result, json_response_expected=False)
            parsed_result = util.get_json_from_mixed_string(result.output)
            assert util.availability_domain() == parsed_result['data']['availability-domain']
            assert 'AVAILABLE' == parsed_result['data']['lifecycle-state']
            assert image_id == parsed_result['data']['image-id']
            backup_boot_volume_id = parsed_result['data']['id']

            # We can now launch an instance using that boot volume
            result = invoke([
                'compute', 'instance', 'launch',
                '--compartment-id', util.COMPARTMENT_ID,
                '--availability-domain', util.availability_domain(),
                '--display-name', instance_name,
                '--subnet-id', network_resources[1],
                '--shape', shape,
                '--hostname-label', hostname_label,
                '--source-boot-volume-id', backup_boot_volume_id,
                '--wait-for-state', 'RUNNING',
                '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS
            ])
            util.validate_response(result, json_response_expected=False)
            instance_data = util.get_json_from_mixed_string(result.output)['data']
            instance_ocid = instance_data['id']
            assert 'bootVolume' == instance_data['source-details']['source-type']
            assert backup_boot_volume_id == instance_data['source-details']['boot-volume-id']

            clean_up_instances(instance_ocid)
            backup_boot_volume_id = None
            instance_ocid = None

        finally:
            clean_up_instances(instance_ocid)
            clean_up_boot_volume(boot_volume_id)
            clean_up_boot_volume(cloned_boot_volume_id)
            clean_up_boot_volume(backup_boot_volume_id)
            clean_up_boot_volume_backup(backup_id)
Пример #30
0
def oce_cluster(runner, config_file, config_profile):
    # Set-up of cross-connect group
    cluster_id = None
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_oce_fixture_cluster.yml'):
        # Create a VCN for Kubernetes cluster
        vcn_name = util.random_name('cli_test_oce_vcn')
        vcn_cidr_block = "10.0.0.0/16"
        pod_cidr_block = "10.96.0.0/16"
        kub_svcs_cidr_block = "10.240.0.0/16"
        params = [
            'network', 'vcn', 'create', '--compartment-id',
            util.COMPARTMENT_ID, '--display-name', vcn_name, '--cidr-block',
            vcn_cidr_block
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        vcn_ocid = util.find_id_in_response(result.output)
        util.wait_until(['network', 'vcn', 'get', '--vcn-id', vcn_ocid],
                        'AVAILABLE',
                        max_wait_seconds=PROVISIONING_TIME_SEC)

        # Create 5 subnets: 1st 3 subnets for Kubernetes worker nodes and last 2 subnets for load balancers
        subnet_cidrs = [
            "10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24", "10.0.4.0/24",
            "10.0.5.0/24"
        ]
        subnet_names = list()
        subnet_ocids = list()

        for idx, subnet_cidr_block in enumerate(subnet_cidrs):
            subnet_names.append(util.random_name('cli_test_compute_subnet'))
            params = [
                'network',
                'subnet',
                'create',
                '--compartment-id',
                util.COMPARTMENT_ID,
                '--availability-domain',
                util.availability_domain(),
                '--display-name',
                subnet_names[idx],
                '--vcn-id',
                vcn_ocid,
                '--cidr-block',
                subnet_cidr_block,
            ]
            result = invoke(runner, config_file, config_profile, params)
            util.validate_response(result)
            subnet_ocids.append(util.find_id_in_response(result.output))
            util.wait_until(
                ['network', 'subnet', 'get', '--subnet-id', subnet_ocids[idx]],
                'AVAILABLE',
                max_wait_seconds=PROVISIONING_TIME_SEC)

        regional_subnet_name = util.random_name('cli_test_compute_subnet')
        subnet_names.append(regional_subnet_name)
        params = [
            'network',
            'subnet',
            'create',
            '--compartment-id',
            util.COMPARTMENT_ID,
            '--display-name',
            regional_subnet_name,
            '--vcn-id',
            vcn_ocid,
            '--cidr-block',
            "10.0.6.0/24",
        ]

        # Create a public regional subnet for the cluster endpoint
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        regional_subnet_ocid = util.find_id_in_response(result.output)
        subnet_ocids.append(regional_subnet_ocid)
        util.wait_until(
            ['network', 'subnet', 'get', '--subnet-id', regional_subnet_ocid],
            'AVAILABLE',
            max_wait_seconds=PROVISIONING_TIME_SEC)

        # Find Supported Kubernetes versions
        params = ['ce', 'cluster-options', 'get', '--cluster-option-id', 'all']
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        # Pick the first version in the response to be used for the test cluster
        kub_version = json.loads(
            result.output)['data']['kubernetes-versions'][0]
        kub_upgrade_version = json.loads(
            result.output)['data']['kubernetes-versions'][1]

        # Create a cluster
        cluster_lb_subnets = '["' + subnet_ocids[3] + '", "' + subnet_ocids[
            4] + '"]'
        cluster_name = util.random_name('cli_oce_cluster_name')
        params = [
            'ce', 'cluster', 'create', '--compartment-id', util.COMPARTMENT_ID,
            '--name', cluster_name, '--vcn-id', vcn_ocid,
            '--kubernetes-version', kub_version, '--dashboard-enabled', 'true',
            '--tiller-enabled', 'true', '--pods-cidr', pod_cidr_block,
            '--services-cidr', kub_svcs_cidr_block, '--service-lb-subnet-ids',
            cluster_lb_subnets, '--endpoint-subnet-id', regional_subnet_ocid
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # Create cluster returns work request. Get work request response to obtain cluster OCID.
        response = json.loads(result.output)
        work_request_id = response['opc-work-request-id']
        get_work_request_result = util.wait_until(
            [
                'ce', 'work-request', 'get', '--work-request-id',
                work_request_id
            ],
            'SUCCEEDED',
            state_property_name='status',
            max_wait_seconds=CLUSTER_CREATE_PROVISIONING_TIME_SEC)
        util.validate_response(get_work_request_result)
        cluster_id = json.loads(get_work_request_result.output
                                )['data']['resources'][0]['identifier']

        # Get a cluster using cluster ID
        get_params = ['ce', 'cluster', 'get', '--cluster-id', cluster_id]
        result = invoke(runner, config_file, config_profile, get_params)
        util.validate_response(result)

        # Check the kubeconfig file generation
        params = [
            'ce', 'cluster', 'create-kubeconfig', '--cluster-id', cluster_id,
            '--file', 'kubeconfig'
        ]
        invoke(runner, config_file, config_profile, params)
        # Validate the kubernetes config is in valid YAML format
        with open('kubeconfig', 'r') as config:
            config_data = config.read()
            yaml.safe_load(config_data)
        if os.path.exists('kubeconfig'):
            os.remove('kubeconfig')

        # Get the list of clusters in the compartment
        params = [
            'ce', 'cluster', 'list', '--compartment-id', util.COMPARTMENT_ID
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        assert len(json.loads(result.output)['data']) > 0

        # Update the cluster using cluster ID
        cluster_name = util.random_name('cli_test_oce_cluster')
        params = [
            'ce', 'cluster', 'update', '--cluster-id', cluster_id, '--name',
            cluster_name, '--kubernetes-version', kub_upgrade_version
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # Update cluster returns work request. Get work request response to check the command succeeded
        response = json.loads(result.output)
        work_request_id = response['opc-work-request-id']
        get_work_request_result = util.wait_until(
            [
                'ce', 'work-request', 'get', '--work-request-id',
                work_request_id
            ],
            'SUCCEEDED',
            state_property_name='status',
            max_wait_seconds=CLUSTER_UPDATE_TIME_SEC)
        util.validate_response(get_work_request_result)

        # Get the list of work request logs
        params = [
            'ce', 'work-request-log-entry', 'list', '--work-request-id',
            work_request_id, '--compartment-id', util.COMPARTMENT_ID
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # Update a cluster endpoint
        params = [
            'ce', 'cluster', 'update-endpoint-config', '--cluster-id',
            cluster_id, '--is-public-ip-enabled', 'true', '--nsg-ids', '[]'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # Update endpoint config returns a work request. Get work request response to check the command succeeded
        response = json.loads(result.output)
        work_request_id = response['opc-work-request-id']
        get_work_request_result = util.wait_until(
            [
                'ce', 'work-request', 'get', '--work-request-id',
                work_request_id
            ],
            'SUCCEEDED',
            state_property_name='status',
            max_wait_seconds=CLUSTER_UPDATE_TIME_SEC)
        util.validate_response(get_work_request_result)

        yield cluster_id, subnet_ocids[0], subnet_ocids[1], subnet_ocids[2]

    # Tear down sequence
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_oce_fixture_cluster_delete.yml'):
        # Delete the cluster
        params = [
            'ce', 'cluster', 'delete', '--cluster-id', cluster_id, '--force'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        # Get the cluster and check that it moves to DELETED state
        invoke(runner, config_file, config_profile, get_params)
        util.wait_until(get_params,
                        'DELETED',
                        max_wait_seconds=DELETION_TIME_SEC)

        # Delete the subnets
        for subnet_id in subnet_ocids:
            params = [
                'network', 'subnet', 'delete', '--subnet-id', subnet_id,
                '--wait-for-state', 'TERMINATED', '--force'
            ]
            result = invoke(runner, config_file, config_profile, params)
            util.validate_response(result, json_response_expected=False)

        # Delete the VCN
        params = [
            'network', 'vcn', 'delete', '--vcn-id', vcn_ocid,
            '--wait-for-state', 'TERMINATED', '--force'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result, json_response_expected=False)

    if os.path.isdir(
            os.path.expandvars(os.path.expanduser(USER_KUBECONFIG_DIR))):
        shutil.rmtree(USER_KUBECONFIG_DIR)