Exemple #1
0
def file_system(filestorage_client, runner, config_file, config_profile):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'filestorage_file_system_fixture.yml'):
        params = [
            'file-system', 'create', '--compartment-id', util.COMPARTMENT_ID,
            '--availability-domain',
            util.availability_domain()
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        file_system_id = json.loads(result.output)['data']['id']

        util.wait_until(
            ['fs', 'file-system', 'get', '--file-system-id', file_system_id],
            'ACTIVE',
            max_wait_seconds=300)

    yield file_system_id

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'filestorage_file_system_fixture_cleanup.yml'):
        params = [
            'file-system', 'delete', '--file-system-id', file_system_id,
            '--force'
        ]

        invoke(runner, config_file, config_profile, params)
        util.wait_until(
            ['fs', 'file-system', 'get', '--file-system-id', file_system_id],
            'DELETED',
            max_wait_seconds=300)
Exemple #2
0
def gitlab_connection(project_and_pipeline, runner, config_file,
                      config_profile):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_gitlab_connection_fixture.yml'):
        # create connection
        project_id = project_and_pipeline[0]
        connection_name = util.random_name('cli_devops_build_test_connection')
        access_token_secret = 'ocid1.vaultsecret.oc1.iad.amaaaaaa34lgq7aarejtkg6o4m5zt5wuscdruhk5hwqkwyzeyfhkxz5zdyia'
        params = [
            'devops', 'connection', 'create-gitlab-connection',
            '--display-name', connection_name, '--project-id', project_id,
            '--personal-access-token', access_token_secret
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        connection_id = json.loads(result.output)['data']['id']
        wait_until(
            ['devops', 'connection', 'get', '--connection-id', connection_id],
            'ACTIVE', 300)
    yield connection_id

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_gitlab_connection_fixture_cleanup.yml'):
        # delete connection
        params = [
            'devops', 'connection', 'delete', '--connection-id', connection_id,
            '--force'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
Exemple #3
0
def wait_stage(project_and_pipeline, runner, config_file, config_profile):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_wait_stage_fixture.yml'):
        # create wait stage
        build_pipeline_id = project_and_pipeline[1]
        wait_stage_name = util.random_name('cli_devops_build_test_wait_stage')
        wait_criteria = {'waitType': 'ABSOLUTE_WAIT', 'waitDuration': 'PT10S'}
        stage_predecessor_collection = {'items': [{'id': build_pipeline_id}]}
        params = [
            'devops', 'build-pipeline-stage', 'create-wait-stage',
            '--build-pipeline-id', build_pipeline_id, '--display-name',
            wait_stage_name, '--wait-criteria',
            json.dumps(wait_criteria), '--stage-predecessor-collection',
            json.dumps(stage_predecessor_collection)
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        wait_stage_id = json.loads(result.output)['data']['id']
        wait_until([
            'devops', 'build-pipeline-stage', 'get', '--stage-id',
            wait_stage_id
        ], 'ACTIVE', 300)
    yield wait_stage_id

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_wait_stage_fixture_cleanup.yml'):
        # delete wait stage
        params = [
            'devops', 'build-pipeline-stage', 'delete', '--stage-id',
            wait_stage_id, '--force'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
def certificate(runner, config_file, config_profile, load_balancer,
                key_pair_files):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_fixture_certificate.yml'):
        private_key_filename = key_pair_files[1]
        certificate_filename = key_pair_files[2]

        cert_name = util.random_name('cli_lb_certificate')

        params = [
            'certificate', 'create', '--certificate-name', cert_name,
            '--load-balancer-id', load_balancer, '--ca-certificate-file',
            certificate_filename, '--private-key-file', private_key_filename,
            '--public-certificate-file', certificate_filename, '--passphrase',
            LB_PRIVATE_KEY_PASSPHRASE
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # returns work request
        response = json.loads(result.output)
        work_request_ocid = response['opc-work-request-id']

        get_work_request_result = util.wait_until(
            [
                'lb', 'work-request', 'get', '--work-request-id',
                work_request_ocid
            ],
            'SUCCEEDED',
            max_wait_seconds=LB_PROVISIONING_TIME_SEC)
        util.validate_response(get_work_request_result)

        yield cert_name

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_fixture_certificate_delete.yml'):
        # delete cert
        params = [
            'certificate', 'delete', '--load-balancer-id', load_balancer,
            '--certificate-name', cert_name, '--force'
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        response = json.loads(result.output)
        work_request_ocid = response['opc-work-request-id']

        get_work_request_result = util.wait_until(
            [
                'lb', 'work-request', 'get', '--work-request-id',
                work_request_ocid
            ],
            'SUCCEEDED',
            max_wait_seconds=LB_PROVISIONING_TIME_SEC)
        util.validate_response(get_work_request_result)
Exemple #5
0
def cross_connect_group(runner, config_file, config_profile):
    # Set-up of cross-connect group
    ccg_id = None
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_fastconnect_fixture_crossconnectgroup.yml'):
        # Create cross connect group
        ccg_name = util.random_name('cli_test_network_ccg')
        params = [
            'network', 'cross-connect-group', 'create', '--display-name',
            ccg_name, '--compartment-id', util.COMPARTMENT_ID
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        ccg_id = json.loads(result.output)['data']['id']

        # Get cross connect group
        params = [
            'network', 'cross-connect-group', 'get',
            '--cross-connect-group-id', ccg_id
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # List cross connect group
        params = [
            'network', 'cross-connect-group', 'list', '--compartment-id',
            util.COMPARTMENT_ID
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        assert len(json.loads(result.output)['data']) > 0

        # Update cross connect group
        ccg_name = util.random_name('cli_test_network_crossconnect_grp')
        params = [
            'network', 'cross-connect-group', 'update',
            '--cross-connect-group-id', ccg_id, '--display-name', ccg_name
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        yield ccg_id

    # Teardown of cross-connect group
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_fastconnect_fixture_crossconnectgroup_delete.yml'):
        if ccg_id:
            # Delete cross connect group
            params = [
                'network', 'cross-connect-group', 'delete',
                '--cross-connect-group-id', ccg_id, '--wait-for-state',
                'TERMINATED', '--force'
            ]
            result = invoke(runner, config_file, config_profile, params)
            util.validate_response(result, json_response_expected=False)
def backend(runner, config_file, config_profile, load_balancer, backend_set):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_fixture_backend.yml'):
        ip_address = '10.0.0.10'
        port = '80'
        params = [
            'backend', 'create', '--ip-address', ip_address, '--port', port,
            '--load-balancer-id', load_balancer, '--backend-set-name',
            backend_set, '--weight', '3'
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # returns work request
        response = json.loads(result.output)
        work_request_ocid = response['opc-work-request-id']

        get_work_request_result = util.wait_until(
            [
                'lb', 'work-request', 'get', '--work-request-id',
                work_request_ocid
            ],
            'SUCCEEDED',
            max_wait_seconds=DEFAULT_WAIT_TIME)
        util.validate_response(get_work_request_result)

        # backend name defaults to "ipaddress:port"
        backend_name = "{}:{}".format(ip_address, port)
        yield backend_name

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_fixture_backend_delete.yml'):
        params = [
            'backend', 'delete', '--load-balancer-id', load_balancer,
            '--backend-set-name', backend_set, '--backend-name', backend_name,
            '--force'
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # returns work request
        response = json.loads(result.output)
        work_request_ocid = response['opc-work-request-id']

        get_work_request_result = util.wait_until(
            [
                'lb', 'work-request', 'get', '--work-request-id',
                work_request_ocid
            ],
            'SUCCEEDED',
            max_wait_seconds=DEFAULT_WAIT_TIME)
        util.validate_response(get_work_request_result)
Exemple #7
0
def project_and_pipeline(runner, config_file, config_profile):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_project_and_pipeline_fixture.yml'):
        # create project
        notification_topic_id = 'ocid1.onstopic.oc1.iad.aaaaaaaatklfw3733kbwc2dzus633rb553dt52fdewujfea5tunntmqykmoq'
        notification_config = {'topicId': notification_topic_id}
        project_name = util.random_name('cli_devops_build_test_project')
        params = [
            'devops', 'project', 'create', '--compartment-id',
            util.COMPARTMENT_ID, '--name', project_name,
            '--notification-config',
            json.dumps(notification_config)
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        project_id = json.loads(result.output)['data']['id']
        wait_until(['devops', 'project', 'get', '--project-id', project_id],
                   'ACTIVE', 300)

        # create build pipeline
        build_pipeline_name = util.random_name(
            'cli_devops_build_test_build_pipeline')
        params = [
            'devops', 'build-pipeline', 'create', '--display-name',
            build_pipeline_name, '--project-id', project_id
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        build_pipeline_id = json.loads(result.output)['data']['id']
        wait_until([
            'devops', 'build-pipeline', 'get', '--build-pipeline-id',
            build_pipeline_id
        ], 'ACTIVE', 300)
    yield project_id, build_pipeline_id

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_project_and_pipeline_fixture_cleanup.yml'):
        # delete build-pipeline
        params = [
            'devops', 'build-pipeline', 'delete', '--build-pipeline-id',
            build_pipeline_id, '--force'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        # delete project
        params = [
            'devops', 'project', 'delete', '--project-id', project_id,
            '--force'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
Exemple #8
0
def build_stage(project_and_pipeline, github_connection, runner, config_file,
                config_profile):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_build_stage_fixture.yml'):
        # create build stage
        build_pipeline_id = project_and_pipeline[1]
        github_connection_id = github_connection
        build_stage_name = util.random_name(
            'cli_devops_build_test_build_stage')
        source_name = 'SdkCliIntegrationTest'
        build_source_collection = {
            'items': [{
                'connectionId': github_connection_id,
                'connectionType': 'GITHUB',
                'repositoryUrl':
                'https://github.com/dlcbld/SdkCliIntegrationTest.git',
                'branch': 'main',
                'name': source_name
            }]
        }
        stage_predecessor_collection = {'items': [{'id': build_pipeline_id}]}
        params = [
            'devops', 'build-pipeline-stage', 'create-build-stage',
            '--build-pipeline-id', build_pipeline_id, '--display-name',
            build_stage_name, '--build-spec-file', 'build_spec.yml',
            '--primary-build-source', source_name, '--image',
            'OL7_X86_64_STANDARD_10', '--build-source-collection',
            json.dumps(build_source_collection),
            '--stage-predecessor-collection',
            json.dumps(stage_predecessor_collection)
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        build_stage_id = json.loads(result.output)['data']['id']
        wait_until([
            'devops', 'build-pipeline-stage', 'get', '--stage-id',
            build_stage_id
        ], 'ACTIVE', 300)
    yield build_stage_id

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_build_stage_fixture_cleanup.yml'):
        # delete build stage
        params = [
            'devops', 'build-pipeline-stage', 'delete', '--stage-id',
            build_stage_id, '--force'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
Exemple #9
0
def mount_target(filestorage_client, vcn_and_subnet, runner, config_file,
                 config_profile):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'filestorage_mount_target_fixture.yml'):
        vcn_id = vcn_and_subnet[0]
        subnet_id = vcn_and_subnet[1]

        mount_target_name = util.random_name('cli_test_mt')

        params = [
            'mount-target', 'create', '--availability-domain',
            util.availability_domain(), '-c', util.COMPARTMENT_ID,
            '--subnet-id', subnet_id
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        mount_target = json.loads(result.output)['data']
        mount_target_id = mount_target['id']

        test_config_container.do_wait(
            filestorage_client,
            filestorage_client.get_mount_target(mount_target_id),
            'lifecycle_state', 'ACTIVE')

        # exercise CLI get mount target
        params = ['mount-target', 'get', '--mount-target-id', mount_target_id]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

    yield mount_target

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'filestorage_mount_target_fixture_cleanup.yml'):
        params = [
            'mount-target', 'delete', '--mount-target-id', mount_target_id,
            '--force'
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        util.wait_until([
            'fs', 'mount-target', 'get', '--mount-target-id', mount_target_id
        ],
                        'DELETED',
                        max_wait_seconds=300)
def load_balancer(runner, config_file, config_profile, vcn_and_subnets):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_fixture_lb.yml'):
        subnet_ocid_1 = vcn_and_subnets[1]
        subnet_ocid_2 = vcn_and_subnets[2]

        params = [
            'load-balancer', 'create', '-c', util.COMPARTMENT_ID,
            '--display-name',
            util.random_name('cli_lb'), '--shape-name', '100Mbps',
            '--subnet-ids', '["{}","{}"]'.format(subnet_ocid_1, subnet_ocid_2)
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # create lb returns work request
        response = json.loads(result.output)
        work_request_ocid = response['opc-work-request-id']

        get_work_request_result = util.wait_until(
            [
                'lb', 'work-request', 'get', '--work-request-id',
                work_request_ocid
            ],
            'SUCCEEDED',
            max_wait_seconds=LB_PROVISIONING_TIME_SEC)
        util.validate_response(get_work_request_result)

        lb_ocid = json.loads(
            get_work_request_result.output)['data']['load-balancer-id']

        yield lb_ocid

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_fixture_lb_delete.yml'):
        params = [
            'load-balancer', 'delete', '--load-balancer-id', lb_ocid, '--force'
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        util.wait_until(
            ['lb', 'load-balancer', 'get', '--load-balancer-id', lb_ocid],
            'TERMINATED',
            max_wait_seconds=LB_PROVISIONING_TIME_SEC,
            succeed_if_not_found=True)
Exemple #11
0
def cli_testing_service_client():
    try:
        from tests.cli_testing_service_client import CLITestingServiceClient
        client = CLITestingServiceClient()

        with test_config_container.create_vcr().use_cassette('generated/create_test_service_session.yml'):
            client.create_session()

        yield client

        with test_config_container.create_vcr().use_cassette('generated/close_test_service_session.yml'):
            client.end_session()
    except ImportError:
        yield None
Exemple #12
0
def github_trigger(project_and_pipeline, runner, config_file, config_profile):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_github_trigger_fixture.yml'):
        # create trigger
        project_id = project_and_pipeline[0]
        build_pipeline_id = project_and_pipeline[1]
        trigger_name = util.random_name('cli_devops_build_test_trigger')
        trigger_source = 'GITHUB'
        actions = [{
            'type': 'TRIGGER_BUILD_PIPELINE',
            'filter': {
                'triggerSource':
                trigger_source,
                'events': [
                    'PUSH', 'PULL_REQUEST_UPDATED', 'PULL_REQUEST_REOPENED',
                    'PULL_REQUEST_MERGED'
                ],
                'include': {
                    'headRef': 'feature',
                    'baseRef': 'master'
                }
            },
            'buildPipelineId': build_pipeline_id
        }]

        params = [
            'devops', 'trigger', 'create-github-trigger', '--display-name',
            trigger_name, '--project-id', project_id, '--actions',
            json.dumps(actions)
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        trigger_id = json.loads(result.output)['data']['id']
        wait_until(['devops', 'trigger', 'get', '--trigger-id', trigger_id],
                   'ACTIVE', 300)
    yield trigger_id

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_github_trigger_fixture_cleanup.yml'):
        # delete trigger
        params = [
            'devops', 'trigger', 'delete', '--trigger-id', trigger_id,
            '--force'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
 def test_vnic_skip_source_dest(self):
     with test_config_container.create_vcr(cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette('vnic_skip_source_dest.yml'):
         try:
             self.set_up_resources()
             self.subtest_do_source_dest_operations()
         finally:
             self.clean_up_resources()
Exemple #14
0
def test_create_kubeconfig_6(runner, config_file, config_profile, oce_cluster,
                             request):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_oce_create_kubeconfig_test6.yml'):
        config_file_path = os.path.expandvars(
            os.path.expanduser(USER_KUBECONFIG_LOCATION + "_" +
                               request.function.__name__))
        # There should be no file at user provided location for this test.
        if os.path.isfile(config_file_path):
            os.remove(config_file_path)

        cluster_id, _, _, _ = oce_cluster
        params = [
            'ce', 'cluster', 'create-kubeconfig', '--cluster-id', cluster_id,
            '--file', config_file_path, '--overwrite'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result, json_response_expected=False)

        # Check that a file got created at default kubeconfig location
        assert (os.path.isfile(config_file_path))
        # Check if it is a valid yaml. yaml load will raise an exception in case of invalid yaml
        with open(config_file_path, 'r') as f:
            kubeconfig = yaml.safe_load(f)
        # Check there is only ONE cluster, user and context in the downloaded kubeconfig for this test.
        assert (len(kubeconfig['clusters']) == 1)
        assert (len(kubeconfig['contexts']) == 1)
        assert (len(kubeconfig['users']) == 1)
Exemple #15
0
    def test_all_operations(self):
        """Successfully calls every operation with basic options. The exceptions are 'vnic get' and 'vnic update', which are tested
        in test_compute.py since they require an instance.

        We also have exceptions for private-ip get/update/delete/list and attaching and detaching private IPs from VNICs, as
        these are handlde in test_secondary_private_ip.py"""
        with test_config_container.create_vcr(
                cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                    'virtual_network.yml'):
            try:
                self.subtest_vcn_operations()
                self.subtest_security_list_operations()
                self.subtest_security_list_stateless_rules()
                self.subtest_subnet_operations()
                self.subtest_internet_gateway_operations()
                self.subtest_cpe_operations()
                self.subtest_dhcp_option_operations()
                self.subtest_drg_operations()
                self.subtest_drg_attachment_operations()
                self.subtest_ip_sec_connection_operations()
                self.subtest_route_table_operations()

                if hasattr(self, 'drg_capacity_issue'):
                    pytest.skip('Skipped DRG tests due to capacity issues')
            finally:
                util.vcr_mode_aware_sleep(20)
                self.subtest_delete()
Exemple #16
0
def zone(dns_client, runner, config_file, config_profile):
    # Since zone names are global, if the name is used in another tenancy or even another instance
    # of this test, it would make this test fail.  So by varying the zone name, we have less
    # chances of name collision.
    zone_name = 'clitest-dnszone-' + str(random.randint(0, 1000000)) + '.test'
    params = [
        'zone', 'create', '--name', zone_name, '--compartment-id',
        util.COMPARTMENT_ID, '--zone-type', 'PRIMARY'
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)

    oci.wait_until(
        dns_client,
        dns_client.get_zone(zone_name),
        evaluate_response=lambda r: r.data.lifecycle_state == 'ACTIVE',
        max_wait_seconds=360)
    # The zone name from a cassette response will not match the zone name randomly generated.
    zone_name = json.loads(result.output)['data']['name']
    zone_id = dns_client.get_zone(zone_name).data.id

    print("created zone_id=" + str(zone_id) + ", zone_name=" + str(zone_name))
    yield zone_id, zone_name

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'dns_test_cleanup.yml'):
        params = ['zone', 'delete', '--zone-name-or-id', zone_id, '--force']

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        print("deleted zone_id=" + str(zone_id) + ", zone_name=" +
              str(zone_name))
Exemple #17
0
def test_create_kubeconfig_4(runner, config_file, config_profile, oce_cluster,
                             request):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_oce_create_kubeconfig_test4.yml'):
        # There should be an EMPTY kubeconfig file at user provided location for this test.
        config_file_path = os.path.expandvars(
            os.path.expanduser(USER_KUBECONFIG_LOCATION + "_" +
                               request.function.__name__))
        # Remove any previous file from user provided location USER_KUBECONFIG_LOCATION
        if os.path.isfile(config_file_path):
            os.remove(config_file_path)
        # Create the directory path for the file based on user provided file location USER_KUBECONFIG_LOCATION
        if os.path.dirname(config_file_path) and not os.path.exists(
                os.path.dirname(config_file_path)):
            os.makedirs(os.path.dirname(config_file_path))
        # Create an empty file at the user provided location
        open(config_file_path, 'w').close()

        cluster_id, _, _, _ = oce_cluster
        params = [
            'ce', 'cluster', 'create-kubeconfig', '--cluster-id', cluster_id,
            '--file', config_file_path
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result, json_response_expected=False)

        # Check if it is a valid yaml. yaml load will raise an exception in case of invalid yaml
        with open(config_file_path, 'r') as f:
            kubeconfig = yaml.safe_load(f)
        # Check there is only ONE cluster, user and context in the downloaded kubeconfig for this test.
        assert (len(kubeconfig['clusters']) == 1)
        assert (len(kubeconfig['contexts']) == 1)
        assert (len(kubeconfig['users']) == 1)
    def test_import_from_object(self):
        with test_config_container.create_vcr(
                cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                    'test_image_import_export_os.yml'):
            bucket_name = 'CliImageImportExport_vcr'
            image_name = 'exported-image-20200715-1133'
            operating_system = 'Windows'
            operating_system_version = 'Server 2019 Standard'
            result = util.invoke_command([
                'compute', 'image', 'import', 'from-object', '-c',
                util.COMPARTMENT_ID, '-bn', bucket_name, '--name', image_name,
                '-ns', util.NAMESPACE, '--operating-system', operating_system,
                '--operating-system-version', operating_system_version
            ])
            image_from_object = json.loads(result.output)
            assert operating_system in image_from_object['data'][
                'operating-system']
            assert operating_system_version in image_from_object['data'][
                'operating-system-version']

            result = util.invoke_command([
                'compute', 'image', 'import', 'from-object-uri', '-c',
                util.COMPARTMENT_ID, '--uri',
                'https://objectstorage.us-phoenix-1.oraclecloud.com/n/dex-us-phx-cli-1/b/CliImageImportExport_vcr/o/exported-image-20200715-1133',
                '--operating-system', operating_system,
                '--operating-system-version', operating_system_version
            ])
            image_from_object_uri = json.loads(result.output)
            assert operating_system in image_from_object_uri['data'][
                'operating-system']
            assert operating_system_version in image_from_object_uri['data'][
                'operating-system-version']
Exemple #19
0
def test_update_retire_reactivate_namespace_and_tag(identity_client, tag_namespace_and_tags):
    with test_config_container.create_vcr(cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette('tag_management.yml'):
        if os.environ.get('OCI_CLI_TAG_MGMT_USE_EXISTING_TAG_AND_NAMESPACE'):
            tag_namespace_id = tag_data_container.tag_namespace.id
            tag_name = tag_data_container.tags[0].name
            print('Reusing existing tag namespace {} and tag {}'.format(tag_namespace_id, tag_name))

            tag_data_container.ensure_namespace_and_tags_active(invoke)
        else:
            suffix = str(random.randint(1, int(time.time())))
            namespace_name = ('cliTagNamespace_{}'.format(suffix)).lower()
            tag_name = ('cliTag_{}'.format(suffix)).lower()

            result = invoke(['iam', 'tag-namespace', 'create', '-c', util.COMPARTMENT_ID, '--name', namespace_name, '--description', 'initial description'])
            util.validate_response(result)
            parsed_result = json.loads(result.output)
            tag_namespace_id = parsed_result['data']['id']
            assert namespace_name == parsed_result['data']['name']
            assert 'initial description' == parsed_result['data']['description']
            assert not parsed_result['data']['is-retired']

            result = invoke(['iam', 'tag', 'create', '--tag-namespace-id', tag_namespace_id, '--name', tag_name, '--description', 'tag description'])
            util.validate_response(result)
            parsed_result = json.loads(result.output)
            assert tag_name == parsed_result['data']['name']
            assert 'tag description' == parsed_result['data']['description']
            assert not parsed_result['data']['is-retired']

        apply_tags_to_tag_namespace(tag_namespace_id)
        apply_tags_to_tag(tag_namespace_id, tag_name)
        update_retire_reactivate_operations(tag_namespace_id, tag_name)
        get_and_list_operations(identity_client, tag_namespace_id, tag_name)
def test_list_lb_policy(runner, config_file, config_profile):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_lb_policy.yml'):
        params = ['policy', 'list', '-c', util.COMPARTMENT_ID]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
Exemple #21
0
def vcr_fixture(request):
    my_vcr = test_config_container.create_vcr(cassette_library_dir=CASSETTE_LIBRARY_DIR)
    # This will help us match the random zone name from the cassette.
    # For test_update_zone, we are only expecting POST for the fixture and PUT.
    print(request.function.__name__)
    if request.function.__name__ == "test_update_zone":
        my_vcr.match_on = ['method']
    with my_vcr.use_cassette('dns_{name}.yml'.format(name=request.function.__name__)):
        yield
Exemple #22
0
def vcr_fixture(request):
    # use the default matching logic (link below) with the exception of 'session_agnostic_query_matcher'
    # instead of 'query' matcher (which ignores sessionId in the url)
    # https://vcrpy.readthedocs.io/en/latest/configuration.html#request-matching
    custom_vcr = test_config_container.create_vcr(cassette_library_dir="services/key_management/tests/cassettes/for_generated")

    cassette_location = 'key_management_{name}.yml'.format(name=request.function.__name__)
    with custom_vcr.use_cassette(cassette_location):
        yield
 def test_vlan_secondary_ip_operations(self):
     with test_config_container.create_vcr(cassette_library_dir=CASSETTE_LIBRARY_DIR)\
             .use_cassette('vlan_secondary_ip_operations.yml'):
         # We delegate to an internal method and have a try-catch so that we have
         # an opportunity to clean up resources after the meat of the test is over
         try:
             self.subtest_vlan_secondary_ip_operations()
         finally:
             self.clean_up_resources()
Exemple #24
0
def deliver_artifact_stage(project_and_pipeline, build_stage, runner,
                           config_file, config_profile):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_deliver_artifact_stage_fixture.yml'):
        # create deliver artifact stage
        build_pipeline_id = project_and_pipeline[1]
        build_stage_id = build_stage
        deliver_artifact_stage_name = util.random_name(
            'cli_devops_build_test_deliver_artifact_stage')
        deliver_artifact_collection = {
            'items': [{
                'artifactId': 'artifactId',
                'artifactName': 'artifactName'
            }]
        }
        stage_predecessor_collection = {'items': [{'id': build_stage_id}]}
        params = [
            'devops', 'build-pipeline-stage', 'create-deliver-artifact-stage',
            '--build-pipeline-id', build_pipeline_id, '--display-name',
            deliver_artifact_stage_name, '--deliver-artifact-collection',
            json.dumps(deliver_artifact_collection),
            '--stage-predecessor-collection',
            json.dumps(stage_predecessor_collection)
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        deliver_artifact_stage_id = json.loads(result.output)['data']['id']
        wait_until([
            'devops', 'build-pipeline-stage', 'get', '--stage-id',
            deliver_artifact_stage_id
        ], 'ACTIVE', 300)
    yield deliver_artifact_stage_id

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'devops_build_deliver_artifact_stage_fixture_cleanup.yml'):
        # delete deliver artifact stage
        params = [
            'devops', 'build-pipeline-stage', 'delete', '--stage-id',
            deliver_artifact_stage_id, '--force'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
Exemple #25
0
def test_create_kubeconfig_2(runner, config_file, config_profile, oce_cluster,
                             request):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_oce_create_kubeconfig_test2.yml'):

        # TEST 2A
        # There should be a pre-populated default kubeconfig file for this test.
        config_file_path = os.path.expandvars(
            os.path.expanduser(DEFAULT_KUBECONFIG_LOCATION))
        if is_multi_threaded_test:
            config_file_path += "_" + request.function.__name__
        # Remove any previous ./kube/config file if it exists
        if os.path.isfile(config_file_path):
            os.remove(config_file_path)
        # Create the directory path for the file based on default path ~/.kube/config
        if os.path.dirname(config_file_path) and not os.path.exists(
                os.path.dirname(config_file_path)):
            os.makedirs(os.path.dirname(config_file_path))
        # Write a sample kubeconfig to kubeconfig file at default location
        with open((config_file_path), 'w') as f:
            f.write(sample_kubeconfig)
        sample_kubeconfig_yaml = yaml.safe_load(sample_kubeconfig)
        cluster_id, _, _, _ = oce_cluster
        params = [
            'ce', 'cluster', 'create-kubeconfig', '--cluster-id', cluster_id
        ]
        if is_multi_threaded_test:
            params.append('--file')
            params.append(config_file_path)
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result, json_response_expected=False)

        # Check if it is a valid yaml. yaml load will raise an exception in case of invalid yaml
        with open(config_file_path, 'r') as f:
            kubeconfig = yaml.safe_load(f)
        # Check there is ONE more cluster, user and context added in the merged kubeconnfig file.
        assert (len(kubeconfig['clusters']) == len(
            sample_kubeconfig_yaml['clusters']) + 1)
        assert (len(kubeconfig['contexts']) == len(
            sample_kubeconfig_yaml['contexts']) + 1)
        assert (len(
            kubeconfig['users']) == len(sample_kubeconfig_yaml['users']) + 1)

        # TEST 2B
        # For this test, execute the command again
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result, json_response_expected=False)

        # Check if it is a valid yaml. yaml load will raise an exception in case of invalid yaml
        with open(config_file_path, 'r') as f:
            kubeconfig2 = yaml.safe_load(f)
        # Check the number of clusters, users and contexts remained the same after the merge
        assert (len(kubeconfig['clusters']) == len(kubeconfig2['clusters']))
        assert (len(kubeconfig['contexts']) == len(kubeconfig2['contexts']))
        assert (len(kubeconfig['users']) == len(kubeconfig2['users']))
 def test_image_import_export(self, config):
     the_vcr = test_config_container.create_vcr(cassette_library_dir=CASSETTE_LIBRARY_DIR)
     with the_vcr.use_cassette('compute_test_image_import_export.yml'):
         try:
             self.set_up_resources()
             self.subtest_image_import_export_via_tuple()
             self.subtest_image_import_export_via_uri(config)
             # self.subtest_image_import_export_via_preauthenticated_url(config)
         finally:
             self.clean_up_resources()
def test_backend_health_operations(runner, config_file, config_profile,
                                   load_balancer, backend_set, backend):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_backend_health_operations.yml'):
        params = [
            'backend-health', 'get', '--load-balancer-id', load_balancer,
            '--backend-set-name', backend_set, '--backend-name', backend
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
def test_listener_with_connection_timeout_operations(runner, config_file,
                                                     config_profile,
                                                     load_balancer,
                                                     backend_set):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR
    ).use_cassette(
            'test_load_balancer_listener_with_connection_timeout_operations.yml'
    ):
        listener_name = util.random_name('cli_listener_ct')
        params = [
            'listener', 'create', '--default-backend-set-name', backend_set,
            '--load-balancer-id', load_balancer, '--name', listener_name,
            '--port', '8080', '--protocol', 'HTTP',
            '--connection-configuration-idle-timeout', '100',
            '--wait-for-state', 'SUCCEEDED'
        ]
        result = invoke(runner, config_file, config_profile, params)
        _validate_work_request_result(result, load_balancer)

        result = invoke(
            runner, config_file, config_profile,
            ['load-balancer', 'get', '--load-balancer-id', load_balancer])
        parsed_result = json.loads(result.output)
        assert parsed_result['data']['listeners'][listener_name][
            'connection-configuration']['idle-timeout'] == 100

        params = [
            'listener', 'update', '--listener-name', listener_name,
            '--default-backend-set-name', backend_set, '--load-balancer-id',
            load_balancer, '--port', '8080', '--protocol', 'HTTP',
            '--connection-configuration-idle-timeout', '75', '--force',
            '--wait-for-state', 'SUCCEEDED'
        ]
        result = invoke(runner, config_file, config_profile, params)
        _validate_work_request_result(result, load_balancer)

        result = invoke(
            runner, config_file, config_profile,
            ['load-balancer', 'get', '--load-balancer-id', load_balancer])
        parsed_result = json.loads(result.output)
        assert parsed_result['data']['listeners'][listener_name][
            'connection-configuration']['idle-timeout'] == 75

        params = [
            'listener', 'delete', '--load-balancer-id', load_balancer,
            '--listener-name', listener_name, '--force', '--wait-for-state',
            'SUCCEEDED'
        ]
        result = invoke(runner, config_file, config_profile, params)
        _validate_work_request_result(result, load_balancer)
Exemple #29
0
def network_resources():
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'boot_volume_network_resources_fixture.yml'):
        vcn_name = util.random_name('cli_test_boot_vol')
        cidr_block = "10.0.0.0/16"
        vcn_dns_label = util.random_name('vcn', insert_underscore=False)

        result = invoke([
            'network', 'vcn', 'create', '--compartment-id',
            util.COMPARTMENT_ID, '--display-name', vcn_name, '--cidr-block',
            cidr_block, '--dns-label', vcn_dns_label, '--wait-for-state',
            'AVAILABLE', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS
        ])
        util.validate_response(result, json_response_expected=False)
        vcn_ocid = util.get_json_from_mixed_string(result.output)['data']['id']

        subnet_name = util.random_name('cli_test_boot_vol')
        subnet_dns_label = util.random_name('subnet', insert_underscore=False)

        result = invoke([
            'network', 'subnet', 'create', '--compartment-id',
            util.COMPARTMENT_ID, '--availability-domain',
            util.availability_domain(), '--display-name', subnet_name,
            '--vcn-id', vcn_ocid, '--cidr-block', cidr_block, '--dns-label',
            subnet_dns_label, '--wait-for-state', 'AVAILABLE',
            '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS
        ])
        util.validate_response(result,
                               expect_etag=True,
                               json_response_expected=False)
        subnet_ocid = util.get_json_from_mixed_string(
            result.output)['data']['id']

        yield (vcn_ocid, subnet_ocid)

        result = invoke([
            'network', 'subnet', 'delete', '--subnet-id', subnet_ocid,
            '--force', '--wait-for-state', 'TERMINATED',
            '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS
        ])
        util.validate_response(result, json_response_expected=False)

        result = util.invoke_command([
            'network', 'vcn', 'delete', '--vcn-id', vcn_ocid, '--force',
            '--wait-for-state', 'TERMINATED', '--wait-interval-seconds',
            util.WAIT_INTERVAL_SECONDS
        ])
        util.validate_response(result, json_response_expected=False)
def test_load_balancer_health_operations(runner, config_file, config_profile,
                                         load_balancer):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_lb_health_operations.yml'):
        params = [
            'load-balancer-health', 'get', '--load-balancer-id', load_balancer
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        params = ['load-balancer-health', 'list', '-c', util.COMPARTMENT_ID]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)