Ejemplo n.º 1
0
 def subtest_instance_action_operations(self):
     result = self.invoke(['compute', 'instance', 'action', '--instance-id', self.instance_ocid, '--action', 'RESET'])
     util.validate_response(result)
     util.vcr_mode_aware_sleep(10)
     util.wait_until(['compute', 'instance', 'get', '--instance-id', self.instance_ocid], 'RUNNING',
                     max_wait_seconds=300)
     util.vcr_mode_aware_sleep(5)
Ejemplo n.º 2
0
    def subtest_instance_console_connections_tagging(self):
        tag_names_to_values = {}
        for t in tag_data_container.tags:
            tag_names_to_values[t.name] = 'somevalue {}'.format(t.name)
        tag_data_container.write_defined_tags_to_file(
            os.path.join('tests', 'temp', 'defined_tags_compute.json'),
            tag_data_container.tag_namespace,
            tag_names_to_values
        )

        result = self.invoke([
            'compute', 'instance-console-connection', 'create',
            '--instance-id', self.instance_ocid,
            '--ssh-public-key-file', util.SSH_AUTHORIZED_KEYS_FILE,
            '--freeform-tags', 'file://tests/resources/tagging/freeform_tags_1.json',
            '--defined-tags', 'file://tests/temp/defined_tags_compute.json'
        ])
        util.validate_response(result)
        instance_console_connection_details = json.loads(result.output)
        expected_freeform = {'tagOne': 'value1', 'tag_Two': 'value two'}
        expected_defined = {tag_data_container.tag_namespace.name: tag_names_to_values}
        self.assertEquals(expected_freeform, instance_console_connection_details['data']['freeform-tags'])
        self.assertEquals(expected_defined, instance_console_connection_details['data']['defined-tags'])

        self.invoke(['compute', 'instance-console-connection', 'delete', '--instance-console-connection-id', instance_console_connection_details['data']['id'], '--force'])
        result = self.invoke(['compute', 'instance-console-connection', 'get', '--instance-console-connection-id', instance_console_connection_details['data']['id']])
        parsed_result = json.loads(result.output)
        if 'DELET' not in parsed_result['data']['lifecycle-state']:
            print("parsed_result=" + str(parsed_result) + ", lifecycle-state=" + str(parsed_result['data']['lifecycle-state']))
            util.vcr_mode_aware_sleep(60)
            result = self.invoke(['compute', 'instance-console-connection', 'get', '--instance-console-connection-id', instance_console_connection_details['data']['id']])
            parsed_result = json.loads(result.output)
        self.assertTrue(parsed_result['data']['lifecycle-state'] == 'DELETED' or parsed_result['data']['lifecycle-state'] == 'DELETING')
Ejemplo n.º 3
0
def zone(dns_client, runner, config_file, config_profile):
    # Since zone names are global, if the name is used in another tenancy or even another instance
    # of this test, it would make this test fail.  So by varying the zone name, we have less
    # chances of name collision.
    zone_name = 'clitest-dnszone-' + str(random.randint(0, 1000000)) + '.test'
    params = [
        'zone', 'create', '--name', zone_name, '--compartment-id',
        util.COMPARTMENT_ID, '--zone-type', 'PRIMARY'
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)

    oci.wait_until(
        dns_client,
        dns_client.get_zone(zone_name),
        evaluate_response=lambda r: r.data.lifecycle_state == 'ACTIVE',
        max_wait_seconds=360)
    # The zone name from a cassette response will not match the zone name randomly generated.
    zone_name = json.loads(result.output)['data']['name']
    zone_id = dns_client.get_zone(zone_name).data.id

    print("created zone_id=" + str(zone_id) + ", zone_name=" + str(zone_name))
    yield zone_id, zone_name

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'dns_test_cleanup.yml'):
        params = ['zone', 'delete', '--zone-name-or-id', zone_id, '--force']

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        print("deleted zone_id=" + str(zone_id) + ", zone_name=" +
              str(zone_name))
Ejemplo n.º 4
0
    def subtest_delete(self):
        error_count = 0

        if len(self.instance_ocids) > 0:
            for ocid in self.instance_ocids:
                try:
                    print("checking TERMINATED for " + ocid)
                    util.wait_until(['compute', 'instance', 'get', '--instance-id', ocid], 'TERMINATED', max_wait_seconds=1200, succeed_if_not_found=True)
                except Exception as error:
                    util.print_latest_exception(error)
                    error_count = error_count + 1

        if hasattr(self, 'subnet_ocid'):
            try:
                print("Deleting subnet")
                result = util.invoke_command(['network', 'subnet', 'delete', '--subnet-id', self.subnet_ocid, '--force'])
                util.validate_response(result)
                util.wait_until(['network', 'subnet', 'get', '--subnet-id', self.subnet_ocid], 'TERMINATED',
                                max_wait_seconds=600, succeed_if_not_found=True)
            except Exception as error:
                util.print_latest_exception(error)
                error_count = error_count + 1

        if hasattr(self, 'vcn_ocid'):
            try:
                print("Deleting vcn")
                result = util.invoke_command(['network', 'vcn', 'delete', '--vcn-id', self.vcn_ocid, '--force'])
                util.validate_response(result)
                util.wait_until(['network', 'vcn', 'get', '--vcn-id', self.vcn_ocid], 'TERMINATED',
                                max_wait_seconds=600, succeed_if_not_found=True)
            except Exception as error:
                util.print_latest_exception(error)
                error_count = error_count + 1

        self.assertEquals(0, error_count)
Ejemplo n.º 5
0
def test_create_kubeconfig_6(runner, config_file, config_profile, oce_cluster,
                             request):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_oce_create_kubeconfig_test6.yml'):
        config_file_path = os.path.expandvars(
            os.path.expanduser(USER_KUBECONFIG_LOCATION + "_" +
                               request.function.__name__))
        # There should be no file at user provided location for this test.
        if os.path.isfile(config_file_path):
            os.remove(config_file_path)

        cluster_id, _, _, _ = oce_cluster
        params = [
            'ce', 'cluster', 'create-kubeconfig', '--cluster-id', cluster_id,
            '--file', config_file_path, '--overwrite'
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result, json_response_expected=False)

        # Check that a file got created at default kubeconfig location
        assert (os.path.isfile(config_file_path))
        # Check if it is a valid yaml. yaml load will raise an exception in case of invalid yaml
        with open(config_file_path, 'r') as f:
            kubeconfig = yaml.safe_load(f)
        # Check there is only ONE cluster, user and context in the downloaded kubeconfig for this test.
        assert (len(kubeconfig['clusters']) == 1)
        assert (len(kubeconfig['contexts']) == 1)
        assert (len(kubeconfig['users']) == 1)
Ejemplo n.º 6
0
def file_system(filestorage_client, runner, config_file, config_profile):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'filestorage_file_system_fixture.yml'):
        params = [
            'file-system', 'create', '--compartment-id', util.COMPARTMENT_ID,
            '--availability-domain',
            util.availability_domain()
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        file_system_id = json.loads(result.output)['data']['id']

        util.wait_until(
            ['fs', 'file-system', 'get', '--file-system-id', file_system_id],
            'ACTIVE',
            max_wait_seconds=300)

    yield file_system_id

    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'filestorage_file_system_fixture_cleanup.yml'):
        params = [
            'file-system', 'delete', '--file-system-id', file_system_id,
            '--force'
        ]

        invoke(runner, config_file, config_profile, params)
        util.wait_until(
            ['fs', 'file-system', 'get', '--file-system-id', file_system_id],
            'DELETED',
            max_wait_seconds=300)
Ejemplo n.º 7
0
def exa_db_system_cleanup(runner, config_file, config_profile, db_system_id_1):
    if SKIP_CLEAN_UP_RESOURCES:
        print("Skipping clean up of DB systems and dependent resources.")
        return

    success_terminating_db_systems = True

    try:
        # terminate db system 1
        params = [
            'system', 'terminate', '--db-system-id', db_system_id_1, '--force'
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # validate that it goes into terminating state
        params = ['system', 'get', '--db-system-id', db_system_id_1]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        state = json.loads(result.output)['data']['lifecycle-state']
        assert "TERMINAT" in state
        util.wait_until(
            ['db', 'system', 'get', '--db-system-id', db_system_id_1],
            'TERMINATED',
            max_wait_seconds=DB_SYSTEM_PROVISIONING_TIME_SEC,
            succeed_if_not_found=True)
    except Exception as error:
        util.print_latest_exception(error)
        success_terminating_db_systems = False

    assert success_terminating_db_systems
    def subtest_launch_instance_merges_user_data_file_param_with_metadata(
            self):
        instance_name = util.random_name('cli_test_instance_options')
        image_id = util.oracle_linux_image()
        shape = 'VM.Standard1.2'
        hostname_label = util.random_name('bminstance',
                                          insert_underscore=False)

        launch_instance_result = util.invoke_command([
            'compute', 'instance', 'launch', '--compartment-id',
            util.COMPARTMENT_ID, '--availability-domain',
            util.availability_domain(), '--display-name', instance_name,
            '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape',
            shape, '--hostname-label', hostname_label + "4",
            '--user-data-file', USER_DATA_FILE, '--metadata',
            util.remove_outer_quotes(oci_cli_compute.compute_cli_extended.
                                     compute_instance_launch_metadata_example)
        ])

        util.validate_response(launch_instance_result, expect_etag=True)
        temp_instance_ocid = util.find_id_in_response(
            launch_instance_result.output)
        self.instance_ocids.append(temp_instance_ocid)

        response = json.loads(launch_instance_result.output)
        instance_metadata = response['data']['metadata']
        assert instance_metadata['user_data']
        assert instance_metadata['ssh_authorized_keys']

        self.delete_instance(temp_instance_ocid)
    def subtest_setup(self):
        # Create a VCN
        vcn_name = util.random_name('cli_test_compute_vcn')
        cidr_block = "10.0.0.0/16"
        vcn_dns_label = util.random_name('vcn', insert_underscore=False)

        result = util.invoke_command([
            'network', 'vcn', 'create', '--compartment-id',
            util.COMPARTMENT_ID, '--display-name', vcn_name, '--cidr-block',
            cidr_block, '--dns-label', vcn_dns_label
        ])
        self.vcn_ocid = util.find_id_in_response(result.output)
        util.validate_response(result, expect_etag=True)
        util.wait_until(['network', 'vcn', 'get', '--vcn-id', self.vcn_ocid],
                        'AVAILABLE',
                        max_wait_seconds=300)

        # Create a subnet
        subnet_name = util.random_name('cli_test_compute_subnet')
        subnet_dns_label = util.random_name('subnet', insert_underscore=False)

        result = util.invoke_command([
            'network', 'subnet', 'create', '--compartment-id',
            util.COMPARTMENT_ID, '--availability-domain',
            util.availability_domain(), '--display-name', subnet_name,
            '--vcn-id', self.vcn_ocid, '--cidr-block', cidr_block,
            '--dns-label', subnet_dns_label
        ])
        self.subnet_ocid = util.find_id_in_response(result.output)
        util.validate_response(result, expect_etag=True)
        util.wait_until(
            ['network', 'subnet', 'get', '--subnet-id', self.subnet_ocid],
            'AVAILABLE',
            max_wait_seconds=300)
Ejemplo n.º 10
0
    def subtest_volume_group_backup_operations(self):
        # create a volume group backup, perform get & list, update it & restore from it
        backup_name = util.random_name('cli_test_volume_group_backup')
        result = self.invoke([
            'volume-group-backup', 'create', '--volume-group-id',
            self.volume_group, '--display-name', backup_name
        ])
        util.validate_response(result)
        self.volume_group_backup_id = util.find_id_in_response(result.output)

        util.wait_until([
            'bv', 'volume-group-backup', 'get', '--volume-group-backup-id',
            self.volume_group_backup_id
        ],
                        'AVAILABLE',
                        max_wait_seconds=600)

        result = self.invoke([
            'volume-group-backup', 'get', '--volume-group-backup-id',
            self.volume_group_backup_id
        ])
        util.validate_response(result)
        parsed_result = json.loads(result.output)
        assert parsed_result['data']['size-in-mbs'] is not None
        assert parsed_result['data']['unique-size-in-mbs'] is not None

        result = self.invoke([
            'volume-group-backup', 'list', '--compartment-id',
            util.COMPARTMENT_ID
        ])
        util.validate_response(result)

        result = self.invoke([
            'volume-group-backup', 'list', '--compartment-id',
            util.COMPARTMENT_ID, '--volume-group-id', self.volume_group
        ])
        util.validate_response(result)
        self.assertEquals(1, len(json.loads(result.output)['data']))

        backup_name = backup_name + "_UPDATED"
        result = self.invoke([
            'volume-group-backup', 'update', '--volume-group-backup-id',
            self.volume_group_backup_id, '--display-name', backup_name
        ])
        util.validate_response(result)

        volume_group_name = util.random_name('cli_test_volume_group_restore')
        source_details = {
            'type': 'volumeGroupBackupId',
            'volumeGroupBackupId': self.volume_group_backup_id
        }
        params = [
            'volume-group', 'create', '--availability-domain',
            util.availability_domain(), '--compartment-id',
            util.COMPARTMENT_ID, '--display-name', volume_group_name,
            '--source-details',
            json.dumps(source_details)
        ]
        self.volume_group_restored, self.restored_volumes = self.volume_group_operations_internal(
            volume_group_name, params)
Ejemplo n.º 11
0
 def subtest_config_get(self):
     util.set_admin_pass_phrase()
     result = util.invoke_command_as_admin(['audit', 'config', 'get', '--compartment-id', util.TENANT_ID])
     util.unset_admin_pass_phrase()
     util.validate_response(result)
     response = json.loads(result.output)
     assert response["data"]["retention-period-days"] is not None
Ejemplo n.º 12
0
def test_create_kubeconfig_4(runner, config_file, config_profile, oce_cluster,
                             request):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_oce_create_kubeconfig_test4.yml'):
        # There should be an EMPTY kubeconfig file at user provided location for this test.
        config_file_path = os.path.expandvars(
            os.path.expanduser(USER_KUBECONFIG_LOCATION + "_" +
                               request.function.__name__))
        # Remove any previous file from user provided location USER_KUBECONFIG_LOCATION
        if os.path.isfile(config_file_path):
            os.remove(config_file_path)
        # Create the directory path for the file based on user provided file location USER_KUBECONFIG_LOCATION
        if os.path.dirname(config_file_path) and not os.path.exists(
                os.path.dirname(config_file_path)):
            os.makedirs(os.path.dirname(config_file_path))
        # Create an empty file at the user provided location
        open(config_file_path, 'w').close()

        cluster_id, _, _, _ = oce_cluster
        params = [
            'ce', 'cluster', 'create-kubeconfig', '--cluster-id', cluster_id,
            '--file', config_file_path
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result, json_response_expected=False)

        # Check if it is a valid yaml. yaml load will raise an exception in case of invalid yaml
        with open(config_file_path, 'r') as f:
            kubeconfig = yaml.safe_load(f)
        # Check there is only ONE cluster, user and context in the downloaded kubeconfig for this test.
        assert (len(kubeconfig['clusters']) == 1)
        assert (len(kubeconfig['contexts']) == 1)
        assert (len(kubeconfig['users']) == 1)
Ejemplo n.º 13
0
    def subtest_clone_operations(self):
        volume_name = util.random_name('cli_test_clone_vol')
        params = [
            'volume', 'create', '--source-volume-id', self.volume_id,
            '--display-name', volume_name, '--size-in-gbs', '60'
        ]

        result = self.invoke(params)
        util.validate_response(result)

        parsed_result = json.loads(result.output)
        source_details = {'id': self.volume_id, 'type': 'volume'}
        assert source_details == parsed_result['data']['source-details']
        assert util.availability_domain(
        ) == parsed_result['data']['availability-domain']
        assert 60 == int(
            parsed_result['data']['size-in-gbs']
        )  # We initially created a 50GB volume, now increasing to 60

        volume_id = util.find_id_in_response(result.output)
        util.wait_until(['bv', 'volume', 'get', '--volume-id', volume_id],
                        'AVAILABLE',
                        max_wait_seconds=180)
        util.wait_until(['bv', 'volume', 'get', '--volume-id', volume_id],
                        True,
                        max_wait_seconds=360,
                        state_property_name="is-hydrated")

        result = self.invoke(
            ['volume', 'delete', '--volume-id', volume_id, '--force'])
        util.validate_response(result)
Ejemplo n.º 14
0
def _validate_work_request_result(result, load_balancer_id):
    util.validate_response(result, json_response_expected=False)
    assert 'Action completed. Waiting until the work request has entered state:' in result.output

    work_request = util.get_json_from_mixed_string(result.output)
    assert work_request['data']['load-balancer-id'] == load_balancer_id
    assert work_request['data']['lifecycle-state'] == 'SUCCEEDED'
Ejemplo n.º 15
0
def test_erratum(managed_instance_id_fixture, runner, config_file, config_profile):
    managed_instance_id = managed_instance_id_fixture
    # get list available updates
    params = [
        'os-management', 'managed-instance', 'list-available-updates',
        '--managed-instance-id', managed_instance_id,
        '--limit', '5'
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)
    availableUpdates = json.loads(result.output)['data']
    erratumId = None
    for availableUpdate in availableUpdates:
        errata = availableUpdate['errata']
        if len(errata) > 0:
            for erratum in errata:
                erratumId = erratum['id']

    if erratumId is not None:
        params = [
            'os-management', 'erratum', 'get',
            '--erratum-id', erratumId
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
Ejemplo n.º 16
0
 def test_010_compute_pic_listing_list(self):
     # A policy is required so we don't have to run as ADMIN:
     # Allow group PythonCLITestGroup to manage app-catalog-listing in tenancy
     result = self.invoke(['compute', 'pic', 'listing', 'list'])
     util.validate_response(result)
     json_result = json.loads(result.output)
     TestComputePic.listing_id = json_result['data'][0]['listing-id']
Ejemplo n.º 17
0
def test_managed_instance_software_source(managed_instance_id_fixture, runner,
                                          config_file, config_profile):
    managed_instance_id = managed_instance_id_fixture

    # get the managed instance
    params = [
        'os-management', 'managed-instance', 'get', '--managed-instance-id',
        managed_instance_id
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)
    managed_instance = json.loads(result.output)['data']
    child_software_sources = managed_instance['child-software-sources']
    parent_software_source_id = managed_instance['parent-software-source'][
        'id']

    # test detach of child software source
    child_software_source_id = child_software_sources[0]['id']
    params = [
        'os-management', 'managed-instance', 'detach-child',
        '--managed-instance-id', managed_instance_id, '--software-source-id',
        child_software_source_id
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)

    # test detach of parent software source
    params = [
        'os-management', 'managed-instance', 'detach-parent',
        '--managed-instance-id', managed_instance_id, '--software-source-id',
        parent_software_source_id
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)

    # test attach of parent software source
    params = [
        'os-management', 'managed-instance', 'attach-parent',
        '--managed-instance-id', managed_instance_id, '--software-source-id',
        parent_software_source_id
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)

    # test attach of child software sources
    for child_software_source in child_software_sources:
        child_software_source_id = child_software_source['id']
        params = [
            'os-management', 'managed-instance', 'attach-child',
            '--managed-instance-id', managed_instance_id,
            '--software-source-id', child_software_source_id
        ]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
Ejemplo n.º 18
0
def get_and_list_operations(identity_client, tag_namespace_id, tag_name):
    result = invoke([
        'iam', 'tag-namespace', 'get', '--tag-namespace-id', tag_namespace_id
    ])
    util.validate_response(result)
    parsed_result = json.loads(result.output)
    assert 'updated description' == parsed_result['data']['description']
    assert parsed_result['data']['is-retired']

    result = oci_cli.cli_util.list_call_get_all_results(
        identity_client.list_tag_namespaces,
        compartment_id=util.COMPARTMENT_ID)
    filtered_results = list(
        filter(lambda d: d.id == tag_namespace_id, result.data))
    assert len(filtered_results) == 1
    assert 'updated description' == filtered_results[0].description
    assert filtered_results[0].is_retired

    result = invoke(
        ['iam', 'tag-namespace', 'list', '-c', util.COMPARTMENT_ID, '--all'])
    parsed_result = json.loads(result.output)
    found_namespace = False
    for pr in parsed_result['data']:
        if pr['id'] == tag_namespace_id:
            assert 'updated description' == pr['description']
            assert pr['is-retired']
            found_namespace = True
            break
    assert found_namespace

    result = invoke([
        'iam', 'tag', 'get', '--tag-namespace-id', tag_namespace_id,
        '--tag-name', tag_name
    ])
    util.validate_response(result)
    parsed_result = json.loads(result.output)
    assert 'updated tag desc' == parsed_result['data']['description']
    assert parsed_result['data']['is-retired']

    result = oci_cli.cli_util.list_call_get_all_results(
        identity_client.list_tags, tag_namespace_id=tag_namespace_id)
    filtered_results = list(filter(lambda d: d.name == tag_name, result.data))
    assert len(filtered_results) == 1
    assert 'updated tag desc' == filtered_results[0].description
    assert filtered_results[0].is_retired

    result = invoke([
        'iam', 'tag', 'list', '--tag-namespace-id', tag_namespace_id, '--all'
    ])
    parsed_result = json.loads(result.output)
    found_tag = False
    for pr in parsed_result['data']:
        assert pr[
            'is-retired']  # since the namespace is retired, all tags under it should be retired
        if pr['name'] == tag_name:
            assert 'updated tag desc' == pr['description']
            found_tag = True
            break
    assert found_tag
Ejemplo n.º 19
0
def clean_up_boot_volume_backup(backup_boot_volume_id):
    if backup_boot_volume_id:
        result = invoke([
            'bv', 'boot-volume-backup', 'delete', '--boot-volume-backup-id',
            backup_boot_volume_id, '--force', '--wait-for-state', 'TERMINATED',
            '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS
        ])
        util.validate_response(result, json_response_expected=False)
Ejemplo n.º 20
0
def clean_up_instances(instance_ocid):
    if instance_ocid:
        result = invoke([
            'compute', 'instance', 'terminate', '--instance-id', instance_ocid,
            '--wait-for-state', 'TERMINATED', '--wait-interval-seconds',
            util.WAIT_INTERVAL_SECONDS, '--force'
        ])
        util.validate_response(result, json_response_expected=False)
Ejemplo n.º 21
0
def get_backup_policy_ids():
    result = invoke('bv volume-backup-policy list --profile ADMIN'.split())
    util.validate_response(result)
    parsed_result = json.loads(result.output)
    backup_policy_ids = {}
    for policy in parsed_result["data"]:
        backup_policy_ids[policy["display-name"]] = policy["id"]
    return backup_policy_ids
Ejemplo n.º 22
0
def test_list_lb_policy(runner, config_file, config_profile):
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_load_balancer_lb_policy.yml'):
        params = ['policy', 'list', '-c', util.COMPARTMENT_ID]

        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
Ejemplo n.º 23
0
def test_list_dns_zones(runner, config_file, config_profile):
    params = [
        'zone', 'list',
        '--compartment-id', util.COMPARTMENT_ID
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)
Ejemplo n.º 24
0
def test_gitlab_trigger_get(project_and_pipeline, gitlab_trigger, runner,
                            config_file, config_profile):
    trigger_id = gitlab_trigger
    params = ['devops', 'trigger', 'get', '--trigger-id', trigger_id]
    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)
    assert trigger_id == json.loads(result.output)['data']['id'], \
        "Get API should return correct trigger id"
Ejemplo n.º 25
0
def test_gitlab_connection_get(project_and_pipeline, gitlab_connection, runner,
                               config_file, config_profile):
    connection_id = gitlab_connection
    params = ['devops', 'connection', 'get', '--connection-id', connection_id]
    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)
    assert connection_id == json.loads(result.output)['data']['id'], \
        "Get API should return correct connection id"
Ejemplo n.º 26
0
    def subtest_config_get(self):
        result = self.invoke([
            'audit', 'config', 'get', '--compartment-id', util.COMPARTMENT_ID
        ])

        util.validate_response(result)
        response = json.loads(result.output)
        assert response["data"]["retention-period-days"] is not None
Ejemplo n.º 27
0
def test_gitlab_trigger_list(project_and_pipeline, gitlab_trigger, runner,
                             config_file, config_profile):
    params = [
        'devops', 'trigger', 'list', '--compartment-id', util.COMPARTMENT_ID
    ]
    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)
    triggers = json.loads(result.output)['data']['items']
    assert len(triggers) > 0, "List API should return at least one trigger"
Ejemplo n.º 28
0
def test_build_run_list(project_and_pipeline, build_run, runner, config_file,
                        config_profile):
    params = [
        'devops', 'build-run', 'list', '--compartment-id', util.COMPARTMENT_ID
    ]
    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)
    build_runs = json.loads(result.output)['data']['items']
    assert len(build_runs) > 0, "List API should return at least one build run"
Ejemplo n.º 29
0
def cross_connect_group(runner, config_file, config_profile):
    # Set-up of cross-connect group
    ccg_id = None
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_fastconnect_fixture_crossconnectgroup.yml'):
        # Create cross connect group
        ccg_name = util.random_name('cli_test_network_ccg')
        params = [
            'network', 'cross-connect-group', 'create', '--display-name',
            ccg_name, '--compartment-id', util.COMPARTMENT_ID
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        ccg_id = json.loads(result.output)['data']['id']

        # Get cross connect group
        params = [
            'network', 'cross-connect-group', 'get',
            '--cross-connect-group-id', ccg_id
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        # List cross connect group
        params = [
            'network', 'cross-connect-group', 'list', '--compartment-id',
            util.COMPARTMENT_ID
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)
        assert len(json.loads(result.output)['data']) > 0

        # Update cross connect group
        ccg_name = util.random_name('cli_test_network_crossconnect_grp')
        params = [
            'network', 'cross-connect-group', 'update',
            '--cross-connect-group-id', ccg_id, '--display-name', ccg_name
        ]
        result = invoke(runner, config_file, config_profile, params)
        util.validate_response(result)

        yield ccg_id

    # Teardown of cross-connect group
    with test_config_container.create_vcr(
            cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette(
                'test_fastconnect_fixture_crossconnectgroup_delete.yml'):
        if ccg_id:
            # Delete cross connect group
            params = [
                'network', 'cross-connect-group', 'delete',
                '--cross-connect-group-id', ccg_id, '--wait-for-state',
                'TERMINATED', '--force'
            ]
            result = invoke(runner, config_file, config_profile, params)
            util.validate_response(result, json_response_expected=False)
Ejemplo n.º 30
0
def test_list_file_systems(file_system, runner, config_file, config_profile):
    params = [
        'file-system', 'list', '--compartment-id', util.COMPARTMENT_ID,
        '--availability-domain',
        util.availability_domain()
    ]

    result = invoke(runner, config_file, config_profile, params)
    util.validate_response(result)