def test_volume_create_validations(self): result = self.invoke([ 'volume', 'create', '--source-volume-id', 'unit-test', '--volume-backup-id', 'unit-test', '--availability-domain', 'unit-test', '--compartment-id', 'unit-test' ]) assert 'You cannot specify both the --volume-backup-id and --source-volume-id options' in result.output result = self.invoke([ 'volume', 'create', '-c', util.COMPARTMENT_ID, '--size-in-gbs', '50' ]) assert 'An availability domain must be specified when creating an empty volume or restoring a volume from a backup' in result.output result = self.invoke([ 'volume', 'create', '-c', util.COMPARTMENT_ID, '--volume-backup-id', 'unit-test' ]) assert 'An availability domain must be specified when creating an empty volume or restoring a volume from a backup' in result.output result = self.invoke([ 'volume', 'create', '--availability-domain', util.availability_domain() ]) assert 'A compartment ID must be specified when creating an empty volume' in result.output result = self.invoke([ 'volume', 'create', '-c', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--size-in-gbs', '50', '--size-in-mbs', '51200' ]) assert 'You cannot specify both --size-in-mbs and --size-in-gbs' in result.output
def subtest_setup(self): # Create a VCN vcn_name = util.random_name('cli_test_compute_vcn') cidr_block = "10.0.0.0/16" result = self.invoke([ 'network', 'vcn', 'create', '--compartment-id', util.COMPARTMENT_ID, '--display-name', vcn_name, '--dns-label', 'clivcn', '--cidr-block', cidr_block ]) self.vcn_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until(['network', 'vcn', 'get', '--vcn-id', self.vcn_ocid], 'AVAILABLE', max_wait_seconds=300) # Create a subnet subnet_name = util.random_name('cli_test_compute_subnet') result = self.invoke([ 'network', 'subnet', 'create', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', subnet_name, '--dns-label', 'clisubnet', '--vcn-id', self.vcn_ocid, '--cidr-block', cidr_block, ]) self.subnet_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until( ['network', 'subnet', 'get', '--subnet-id', self.subnet_ocid], 'AVAILABLE', max_wait_seconds=300) # Create a volume volume_name = util.random_name('cli_test_compute_volume') result = self.invoke([ 'bv', 'volume', 'create', '--availability-domain', util.availability_domain(), '--compartment-id', util.COMPARTMENT_ID, '--display-name', volume_name ]) util.validate_response(result) self.volume_ocid = util.find_id_in_response(result.output) util.wait_until( ['bv', 'volume', 'get', '--volume-id', self.volume_ocid], 'AVAILABLE', max_wait_seconds=180)
def subtest_instance_operations(self): instance_name = util.random_name('cli_test_instance') fault_domain = 'FAULT-DOMAIN-1' image_id = util.oracle_linux_image() shape = 'VM.Standard1.1' result = self.invoke( ['compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', instance_name, '--fault-domain', fault_domain, '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape', shape, '--metadata', util.remove_outer_quotes(oci_cli_compute.compute_cli_extended.compute_instance_launch_metadata_example)]) self.instance_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until(['compute', 'instance', 'get', '--instance-id', self.instance_ocid], 'RUNNING', max_wait_seconds=600) result = self.invoke(['compute', 'instance', 'list', '--compartment-id', util.COMPARTMENT_ID]) util.validate_response(result) # list with compartment shortcut result = self.invoke(['compute', 'instance', 'list', '-c', util.COMPARTMENT_ID]) util.validate_response(result) instance_name = instance_name + "_updated" result = self.invoke(['compute', 'instance', 'update', '--instance-id', self.instance_ocid, '--display-name', instance_name]) util.validate_response(result, expect_etag=True) result = self.invoke(['compute', 'instance', 'get', '--instance-id', self.instance_ocid]) util.validate_response(result, expect_etag=True) result = self.invoke( ['compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', instance_name + "_2", '--fault-domain', fault_domain, '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape', shape, '--metadata', util.remove_outer_quotes(oci_cli_compute.compute_cli_extended.compute_instance_launch_metadata_example), '--wait-for-state', 'RUNNING', '--max-wait-seconds', '20', '--wait-interval-seconds', '5']) self.instance_ocid_2 = util.find_id_in_response(result.output[result.output.index('{'):]) assert result.exit_code != 0
def file_system(filestorage_client, runner, config_file, config_profile): with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'filestorage_file_system_fixture.yml'): params = [ 'file-system', 'create', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain() ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) file_system_id = json.loads(result.output)['data']['id'] util.wait_until( ['fs', 'file-system', 'get', '--file-system-id', file_system_id], 'ACTIVE', max_wait_seconds=300) yield file_system_id with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'filestorage_file_system_fixture_cleanup.yml'): params = [ 'file-system', 'delete', '--file-system-id', file_system_id, '--force' ] invoke(runner, config_file, config_profile, params) util.wait_until( ['fs', 'file-system', 'get', '--file-system-id', file_system_id], 'DELETED', max_wait_seconds=300)
def test_tutorial(virtual_network, compute, block_storage, config): test_id = tests.util.random_number_string() # print('Running Launching Your First Instance tutorial') # print('Objects will have ID ' + test_id) availability_domain = util.availability_domain() compartment = config["tenancy"] ssh_file = os.environ['OCI_PYSDK_PUBLIC_SSH_KEY_FILE'] with open(ssh_file) as f: public_key = f.read().strip() vcn = None subnet = None instance = None volume = None attachment = None with test_config_container.create_vcr().use_cassette( 'launch_instance_tutorial.yml'): try: vcn = create_cloud_network(virtual_network, compartment, test_id) subnet = create_subnet(virtual_network, compartment, test_id, availability_domain, vcn) gateway = create_internet_gateway(virtual_network, compartment, test_id, vcn) update_route_table(virtual_network, test_id, vcn, gateway) # There's a bug where the instance will immediately terminate if we # don't add some extra wait time before launching. (COM-79) time.sleep(15) instance = launch_instance(compute, compartment, test_id, availability_domain, subnet, public_key) log_public_ip_address(compute, virtual_network, compartment, instance) volume = create_volume(block_storage, compartment, test_id, availability_domain) attachment = attach_volume(compute, compartment, instance, volume) except Exception as e: # print('Exception during creation phase: ' + str(e)) raise e finally: if volume: if attachment: detach_volume(compute, attachment) delete_volume(block_storage, volume) if instance: terminate_instance(compute, instance) if subnet: delete_subnet(virtual_network, subnet) if gateway: # Clear the route table so it does not have a reference to the internet gateway virtual_network.update_route_table( vcn.default_route_table_id, oci.core.models.UpdateRouteTableDetails(route_rules=[])) delete_internet_gateway(virtual_network, gateway.id) if vcn: delete_cloud_network(virtual_network, vcn)
def subtest_setup(self): # Create a VCN vcn_name = util.random_name('cli_test_compute_vcn') cidr_block = "10.0.0.0/16" vcn_dns_label = util.random_name('vcn', insert_underscore=False) result = util.invoke_command([ 'network', 'vcn', 'create', '--compartment-id', util.COMPARTMENT_ID, '--display-name', vcn_name, '--cidr-block', cidr_block, '--dns-label', vcn_dns_label ]) self.vcn_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until(['network', 'vcn', 'get', '--vcn-id', self.vcn_ocid], 'AVAILABLE', max_wait_seconds=300) # Create a subnet subnet_name = util.random_name('cli_test_compute_subnet') subnet_dns_label = util.random_name('subnet', insert_underscore=False) result = util.invoke_command([ 'network', 'subnet', 'create', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', subnet_name, '--vcn-id', self.vcn_ocid, '--cidr-block', cidr_block, '--dns-label', subnet_dns_label ]) self.subnet_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until( ['network', 'subnet', 'get', '--subnet-id', self.subnet_ocid], 'AVAILABLE', max_wait_seconds=300)
def subtest_volume_group_backup_operations(self): # create a volume group backup, perform get & list, update it & restore from it backup_name = util.random_name('cli_test_volume_group_backup') result = self.invoke([ 'volume-group-backup', 'create', '--volume-group-id', self.volume_group, '--display-name', backup_name ]) util.validate_response(result) self.volume_group_backup_id = util.find_id_in_response(result.output) util.wait_until([ 'bv', 'volume-group-backup', 'get', '--volume-group-backup-id', self.volume_group_backup_id ], 'AVAILABLE', max_wait_seconds=600) result = self.invoke([ 'volume-group-backup', 'get', '--volume-group-backup-id', self.volume_group_backup_id ]) util.validate_response(result) parsed_result = json.loads(result.output) assert parsed_result['data']['size-in-mbs'] is not None assert parsed_result['data']['unique-size-in-mbs'] is not None result = self.invoke([ 'volume-group-backup', 'list', '--compartment-id', util.COMPARTMENT_ID ]) util.validate_response(result) result = self.invoke([ 'volume-group-backup', 'list', '--compartment-id', util.COMPARTMENT_ID, '--volume-group-id', self.volume_group ]) util.validate_response(result) self.assertEquals(1, len(json.loads(result.output)['data'])) backup_name = backup_name + "_UPDATED" result = self.invoke([ 'volume-group-backup', 'update', '--volume-group-backup-id', self.volume_group_backup_id, '--display-name', backup_name ]) util.validate_response(result) volume_group_name = util.random_name('cli_test_volume_group_restore') source_details = { 'type': 'volumeGroupBackupId', 'volumeGroupBackupId': self.volume_group_backup_id } params = [ 'volume-group', 'create', '--availability-domain', util.availability_domain(), '--compartment-id', util.COMPARTMENT_ID, '--display-name', volume_group_name, '--source-details', json.dumps(source_details) ] self.volume_group_restored, self.restored_volumes = self.volume_group_operations_internal( volume_group_name, params)
def subtest_clone_operations(self): volume_name = util.random_name('cli_test_clone_vol') params = [ 'volume', 'create', '--source-volume-id', self.volume_id, '--display-name', volume_name, '--size-in-gbs', '60' ] result = self.invoke(params) util.validate_response(result) parsed_result = json.loads(result.output) source_details = {'id': self.volume_id, 'type': 'volume'} assert source_details == parsed_result['data']['source-details'] assert util.availability_domain( ) == parsed_result['data']['availability-domain'] assert 60 == int( parsed_result['data']['size-in-gbs'] ) # We initially created a 50GB volume, now increasing to 60 volume_id = util.find_id_in_response(result.output) util.wait_until(['bv', 'volume', 'get', '--volume-id', volume_id], 'AVAILABLE', max_wait_seconds=180) util.wait_until(['bv', 'volume', 'get', '--volume-id', volume_id], True, max_wait_seconds=360, state_property_name="is-hydrated") result = self.invoke( ['volume', 'delete', '--volume-id', volume_id, '--force']) util.validate_response(result)
def subtest_windows_instance_operations(self): instance_name = util.random_name('cli_test_instance') image_id = util.windows_vm_image() shape = 'VM.Standard1.1' result = self.invoke( ['compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', instance_name, '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape', shape]) self.windows_instance_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until(['compute', 'instance', 'get', '--instance-id', self.windows_instance_ocid], 'RUNNING', max_wait_seconds=600) result = self.invoke(['compute', 'instance', 'get', '--instance-id', self.windows_instance_ocid]) util.validate_response(result, expect_etag=True) result = self.invoke( ['compute', 'instance', 'get-windows-initial-creds', '--instance-id', self.windows_instance_ocid]) util.validate_response(result) credentials = json.loads(result.output)['data'] assert credentials['username'] == 'opc' assert 'password' in credentials result = self.invoke( ['compute', 'instance', 'terminate', '--instance-id', self.windows_instance_ocid, '--force']) util.validate_response(result)
def subtest_launch_instance_merges_user_data_file_param_with_metadata( self): instance_name = util.random_name('cli_test_instance_options') image_id = util.oracle_linux_image() shape = 'VM.Standard1.2' hostname_label = util.random_name('bminstance', insert_underscore=False) launch_instance_result = util.invoke_command([ 'compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', instance_name, '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape', shape, '--hostname-label', hostname_label + "4", '--user-data-file', USER_DATA_FILE, '--metadata', util.remove_outer_quotes(oci_cli_compute.compute_cli_extended. compute_instance_launch_metadata_example) ]) util.validate_response(launch_instance_result, expect_etag=True) temp_instance_ocid = util.find_id_in_response( launch_instance_result.output) self.instance_ocids.append(temp_instance_ocid) response = json.loads(launch_instance_result.output) instance_metadata = response['data']['metadata'] assert instance_metadata['user_data'] assert instance_metadata['ssh_authorized_keys'] self.delete_instance(temp_instance_ocid)
def test_list_file_systems(file_system, runner, config_file, config_profile): params = [ 'file-system', 'list', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain() ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result)
def subtest_list_vnics(self): result = self.invoke([ 'compute', 'instance', 'list-vnics', '--compartment-id', util.COMPARTMENT_ID ]) util.validate_response(result) json_data = json.loads(result.output) assert (len(json_data['data']) > 0) # Check that the command works with the --availability-domain option result = self.invoke([ 'compute', 'instance', 'list-vnics', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain() ]) util.validate_response(result) json_data = json.loads(result.output) assert (len(json_data['data']) > 0) result = self.invoke([ 'compute', 'instance', 'list-vnics', '--instance-id', self.instance_ocid ]) util.validate_response(result) json_data = json.loads(result.output) assert (len(json_data['data']) == 1) # Check that setting limit to 1 will give us a next page token, and calling with that page will give us 0 items. result = self.invoke([ 'compute', 'instance', 'list-vnics', '--instance-id', self.instance_ocid, '--limit', '1' ]) util.validate_response(result) json_data = json.loads(result.output) assert ('data' not in json_data or len(json_data['data']) == 1) assert ('opc-next-page' in json_data) next_page = json_data['opc-next-page'] result = self.invoke([ 'compute', 'instance', 'list-vnics', '--instance-id', self.instance_ocid, '--page', next_page ]) # Grab all the things result = self.invoke([ 'compute', 'instance', 'list-vnics', '--instance-id', self.instance_ocid, '--all' ]) util.validate_response(result) json_data = json.loads(result.output) assert ('data' not in json_data or len(json_data['data']) == 1) assert ('opc-next-page' not in json_data)
def mount_target(filestorage_client, vcn_and_subnet, runner, config_file, config_profile): with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'filestorage_mount_target_fixture.yml'): vcn_id = vcn_and_subnet[0] subnet_id = vcn_and_subnet[1] mount_target_name = util.random_name('cli_test_mt') params = [ 'mount-target', 'create', '--availability-domain', util.availability_domain(), '-c', util.COMPARTMENT_ID, '--subnet-id', subnet_id ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) mount_target = json.loads(result.output)['data'] mount_target_id = mount_target['id'] test_config_container.do_wait( filestorage_client, filestorage_client.get_mount_target(mount_target_id), 'lifecycle_state', 'ACTIVE') # exercise CLI get mount target params = ['mount-target', 'get', '--mount-target-id', mount_target_id] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) yield mount_target with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'filestorage_mount_target_fixture_cleanup.yml'): params = [ 'mount-target', 'delete', '--mount-target-id', mount_target_id, '--force' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) util.wait_until([ 'fs', 'mount-target', 'get', '--mount-target-id', mount_target_id ], 'DELETED', max_wait_seconds=300)
def subtest_volume_operations(self): volume_name = util.random_name('cli_test_volume') params = [ 'volume', 'create', '--availability-domain', util.availability_domain(), '--compartment-id', util.COMPARTMENT_ID, '--display-name', volume_name ] self.volume_id = self.volume_operations_internal( volume_name, params, None, str(50 * 1024)) self.volume_id_two = self.volume_operations_internal( volume_name, params, '50', None) retrieve_list_and_ensure_sorted([ 'bv', 'volume', 'list', '-c', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--sort-by', 'DISPLAYNAME', '--sort-order', 'asc' ], 'display-name', 'asc') retrieve_list_and_ensure_sorted([ 'bv', 'volume', 'list', '-c', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--sort-by', 'DISPLAYNAME', '--sort-order', 'desc' ], 'display-name', 'desc') retrieve_list_and_ensure_sorted([ 'bv', 'volume', 'list', '-c', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--sort-by', 'TIMECREATED', '--sort-order', 'asc', '--all' ], 'time-created', 'asc') retrieve_list_and_ensure_sorted([ 'bv', 'volume', 'list', '-c', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--sort-by', 'TIMECREATED', '--sort-order', 'desc', '--all' ], 'time-created', 'desc')
def network_resources(): with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'boot_volume_network_resources_fixture.yml'): vcn_name = util.random_name('cli_test_boot_vol') cidr_block = "10.0.0.0/16" vcn_dns_label = util.random_name('vcn', insert_underscore=False) result = invoke([ 'network', 'vcn', 'create', '--compartment-id', util.COMPARTMENT_ID, '--display-name', vcn_name, '--cidr-block', cidr_block, '--dns-label', vcn_dns_label, '--wait-for-state', 'AVAILABLE', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS ]) util.validate_response(result, json_response_expected=False) vcn_ocid = util.get_json_from_mixed_string(result.output)['data']['id'] subnet_name = util.random_name('cli_test_boot_vol') subnet_dns_label = util.random_name('subnet', insert_underscore=False) result = invoke([ 'network', 'subnet', 'create', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', subnet_name, '--vcn-id', vcn_ocid, '--cidr-block', cidr_block, '--dns-label', subnet_dns_label, '--wait-for-state', 'AVAILABLE', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS ]) util.validate_response(result, expect_etag=True, json_response_expected=False) subnet_ocid = util.get_json_from_mixed_string( result.output)['data']['id'] yield (vcn_ocid, subnet_ocid) result = invoke([ 'network', 'subnet', 'delete', '--subnet-id', subnet_ocid, '--force', '--wait-for-state', 'TERMINATED', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS ]) util.validate_response(result, json_response_expected=False) result = util.invoke_command([ 'network', 'vcn', 'delete', '--vcn-id', vcn_ocid, '--force', '--wait-for-state', 'TERMINATED', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS ]) util.validate_response(result, json_response_expected=False)
def test_tutorial(virtual_network, compute, block_storage, config): test_id = tests.util.random_number_string() print('Running Launching Your First Instance tutorial') print('Objects will have ID ' + test_id) availability_domain = util.availability_domain() compartment = config["tenancy"] ssh_file = os.environ['OCI_PYSDK_PUBLIC_SSH_KEY_FILE'] with open(ssh_file) as f: public_key = f.read().strip() vcn = None subnet = None instance = None volume = None attachment = None with test_config_container.create_vcr().use_cassette('launch_instance_tutorial.yml'): try: vcn = create_cloud_network(virtual_network, compartment, test_id) subnet = create_subnet(virtual_network, compartment, test_id, availability_domain, vcn) gateway = create_internet_gateway(virtual_network, compartment, test_id, vcn) update_route_table(virtual_network, test_id, vcn, gateway) # There's a bug where the instance will immediately terminate if we # don't add some extra wait time before launching. (COM-79) time.sleep(15) instance = launch_instance( compute, compartment, test_id, availability_domain, subnet, public_key) log_public_ip_address(compute, virtual_network, compartment, instance) volume = create_volume(block_storage, compartment, test_id, availability_domain) attachment = attach_volume(compute, compartment, instance, volume) except Exception as e: print('Exception during creation phase: ' + str(e)) raise finally: if volume: if attachment: detach_volume(compute, attachment) delete_volume(block_storage, volume) if instance: terminate_instance(compute, instance) if subnet: delete_subnet(virtual_network, subnet) if vcn: delete_cloud_network(virtual_network, vcn)
def subtest_volume_group_operations(self): # create volumes to add to a volume group for volume_num in range(0, 3): volume_name = util.random_name('cli_test_volume') params = [ 'volume', 'create', '--availability-domain', util.availability_domain(), '--compartment-id', util.COMPARTMENT_ID, '--display-name', volume_name ] volume = self.volume_operations_internal(volume_name, params, '50', None) self.volumes.append(volume) volume_group_name = util.random_name('cli_test_volume_group') source_details = {'type': 'volumeIds', 'volumeIds': self.volumes} params = [ 'volume-group', 'create', '--availability-domain', util.availability_domain(), '--compartment-id', util.COMPARTMENT_ID, '--display-name', volume_group_name, '--source-details', json.dumps(source_details) ] self.volume_group, self.volume_ids = self.volume_group_operations_internal( volume_group_name, params)
def subtest_subnet_operations(self): subnet_name = util.random_name('cli_test_subnet') cidr_block = "10.0.0.0/16" security_list_ids = util.remove_outer_quotes( oci_cli_virtual_network.virtualnetwork_cli_extended. network_create_subnet_security_list_ids_example.format( sl_id=self.sl_ocid)) subnet_dns_label = util.random_name('subnet', insert_underscore=False) result = self.invoke([ 'subnet', 'create', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', subnet_name, '--vcn-id', self.vcn_ocid, '--cidr-block', cidr_block, '--security-list-ids', security_list_ids, '--dns-label', subnet_dns_label ]) self.subnet_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until( ['network', 'subnet', 'get', '--subnet-id', self.subnet_ocid], 'AVAILABLE', max_wait_seconds=300) result = self.invoke([ 'subnet', 'list', '--compartment-id', util.COMPARTMENT_ID, '--vcn-id', self.vcn_ocid ]) util.validate_response(result) self.run_list_filter_verification('subnet', subnet_name) subnet_name = subnet_name + "_updated" result = self.invoke([ 'subnet', 'update', '--subnet-id', self.subnet_ocid, '--display-name', subnet_name ]) util.validate_response(result, expect_etag=True) result = self.invoke( ['subnet', 'get', '--subnet-id', self.subnet_ocid]) util.validate_response(result, expect_etag=True) subnet_response = json.loads(result.output) assert subnet_response['data']['dns-label'] == subnet_dns_label
def subtest_volume_group_clone_operations(self): # clone a volume group volume_group_name = util.random_name('cli_test_volume_group_clone') source_details = { 'type': 'volumeGroupId', 'volumeGroupId': self.volume_group } params = [ 'volume-group', 'create', '--availability-domain', util.availability_domain(), '--compartment-id', util.COMPARTMENT_ID, '--display-name', volume_group_name, '--source-details', json.dumps(source_details) ] self.volume_group_clone, self.volume_clones = self.volume_group_operations_internal( volume_group_name, params)
def subtest_launch_instance_ssh_authorized_keys_in_param_and_in_metadata_throws_error(self): instance_name = util.random_name('cli_test_instance_options') image_id = util.oracle_linux_image() shape = 'VM.Standard1.2' hostname_label = util.random_name('bminstance', insert_underscore=False) launch_instance_result = util.invoke_command( ['compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', instance_name, '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape', shape, '--hostname-label', hostname_label + "2", '--ssh-authorized-keys-file', util.SSH_AUTHORIZED_KEYS_FILE, '--metadata', util.remove_outer_quotes(oci_cli_compute.compute_cli_extended.compute_instance_launch_metadata_example)]) assert launch_instance_result.exit_code != 0
def subtest_launch_instance_user_data_in_param_and_in_metadata_throws_error( self): instance_name = util.random_name('cli_test_instance_options') image_id = util.oracle_linux_image() shape = 'VM.Standard1.2' hostname_label = util.random_name('bminstance', insert_underscore=False) metadata = """{"user_data": "IyEvYmluL2Jhc2gKCm1rZGlyIC90bXAvbXlkaXIKdG91Y2ggL3RtcC9teWRpci9teXR4dC50eHQ="}""" launch_instance_result = util.invoke_command([ 'compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', instance_name, '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape', shape, '--hostname-label', hostname_label, '--user-data-file', USER_DATA_FILE, '--metadata', metadata ]) assert launch_instance_result.exit_code != 0
def test_list_and_update_mount_targets(mount_target, runner, config_file, config_profile): params = [ 'mount-target', 'list', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain() ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) new_display_name = util.random_name('up_cli_test_mt') params = [ 'mount-target', 'update', '--mount-target-id', mount_target['id'], '--display-name', new_display_name ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) assert json.loads(result.output)['data']['display-name'] == new_display_name
def test_crud_export_set(mount_target, runner, config_file, config_profile): params = [ 'export-set', 'list', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain() ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) found_export_set = False export_sets = json.loads(result.output)['data'] for es in export_sets: if es['id'] == mount_target['export-set-id']: found_export_set = True break assert found_export_set updated_export_set_name = util.random_name('up_cli_test_es') params = [ 'export-set', 'update', '--export-set-id', mount_target['export-set-id'], '--display-name', updated_export_set_name ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) assert json.loads( result.output)['data']['display-name'] == updated_export_set_name params = [ 'export-set', 'get', '--export-set-id', mount_target['export-set-id'] ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result)
def set_up_vcn_and_vlan(self, cidr_block): # Create a VCN vcn_name = util.random_name('cli_test_compute_vcn') result = self.invoke([ 'network', 'vcn', 'create', '--compartment-id', util.COMPARTMENT_ID, '--display-name', vcn_name, '--dns-label', 'clivcn', '--cidr-block', cidr_block ]) util.validate_response(result, expect_etag=True) self.vcn_ocid = util.find_id_in_response(result.output) util.wait_until(['network', 'vcn', 'get', '--vcn-id', self.vcn_ocid], 'AVAILABLE', max_wait_seconds=300) # Create a vlan vlan_name = util.random_name('cli_test_compute_vlan') result = self.invoke([ 'network', 'vlan', 'create', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', vlan_name, '--vcn-id', self.vcn_ocid, '--cidr-block', cidr_block, ]) util.validate_response(result, expect_etag=True) self.vlan_ocid = util.find_id_in_response(result.output) util.wait_until( ['network', 'vlan', 'get', '--vlan-id', self.vlan_ocid], 'AVAILABLE', max_wait_seconds=300)
def test_boot_volume_clone_backup(network_resources): with test_config_container.create_vcr(cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette('boot_volume_test_boot_volume_clone_backup.yml'): boot_volume_id = None instance_ocid = None backup_boot_volume_id = None cloned_boot_volume_id = None backup_id = None try: instance_name = util.random_name('boot_vol_instance') image_id = util.oracle_linux_image() shape = 'VM.Standard1.1' hostname_label = util.random_name('bootvolinst', insert_underscore=False) boot_volume_size_in_gbs = '51' result = invoke([ 'compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', instance_name, '--subnet-id', network_resources[1], '--image-id', image_id, '--shape', shape, '--hostname-label', hostname_label, '--boot-volume-size-in-gbs', boot_volume_size_in_gbs, '--wait-for-state', 'RUNNING', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS ]) util.validate_response(result, json_response_expected=False) instance_data = util.get_json_from_mixed_string(result.output)['data'] instance_ocid = instance_data['id'] assert 'image' == instance_data['source-details']['source-type'] assert image_id == instance_data['source-details']['image-id'] result = invoke([ 'compute', 'boot-volume-attachment', 'list', '-c', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--instance-id', instance_data['id'] ]) util.validate_response(result) parsed_result = json.loads(result.output) assert len(parsed_result['data']) == 1 boot_volume_id = parsed_result['data'][0]['boot-volume-id'] result = invoke([ 'bv', 'boot-volume', 'get', '--boot-volume-id', boot_volume_id ]) util.validate_response(result) parsed_result = json.loads(result.output) boot_volume_size_in_gbs = parsed_result['data']['size-in-gbs'] assert boot_volume_size_in_gbs == int(boot_volume_size_in_gbs) result = invoke([ 'compute', 'instance', 'terminate', '--instance-id', instance_ocid, '--wait-for-state', 'TERMINATED', '--preserve-boot-volume', 'true', '--force' ]) util.validate_response(result, json_response_expected=False) instance_ocid = None # Since we preserved the volume it should still be available result = invoke(['bv', 'boot-volume', 'get', '--boot-volume-id', boot_volume_id]) util.validate_response(result) parsed_result = json.loads(result.output) assert util.availability_domain() == parsed_result['data']['availability-domain'] assert 'AVAILABLE' == parsed_result['data']['lifecycle-state'] assert image_id == parsed_result['data']['image-id'] size_in_gbs = int(parsed_result['data']['size-in-gbs']) new_size_in_gbs = size_in_gbs + 10 # Resize boot volume to new_size_in_gbs result = invoke(['bv', 'boot-volume', 'update', '--boot-volume-id', boot_volume_id, '--size-in-gbs', str(new_size_in_gbs), '--wait-for-state', 'AVAILABLE', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS]) util.validate_response(result, json_response_expected=False) # Since we preserved the volume it should still be available result = invoke(['bv', 'boot-volume', 'get', '--boot-volume-id', boot_volume_id]) util.validate_response(result) parsed_result = json.loads(result.output) assert 'AVAILABLE' == parsed_result['data']['lifecycle-state'] assert new_size_in_gbs == int(parsed_result['data']['size-in-gbs']) # Take a backup result = invoke(['bv', 'boot-volume-backup', 'create', '--boot-volume-id', boot_volume_id, '--wait-for-state', 'AVAILABLE', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS]) util.validate_response(result, json_response_expected=False) parsed_result = util.get_json_from_mixed_string(result.output) assert boot_volume_id == parsed_result['data']['boot-volume-id'] assert image_id == parsed_result['data']['image-id'] assert 'AVAILABLE' == parsed_result['data']['lifecycle-state'] backup_id = parsed_result['data']['id'] # Boot Volume Create Error cases # Error 1: No option specified result = invoke(['bv', 'boot-volume', 'create', '--wait-for-state', 'AVAILABLE', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS]) assert "An empty boot volume cannot be created. Please specify either --boot-volume-backup-id, --source-boot-volume-id or --source-volume-replica-id" in result.output # Error 2: Both options specified result = invoke(['bv', 'boot-volume', 'create', '--source-boot-volume-id', boot_volume_id[0], '--boot-volume-backup-id', boot_volume_id[0], '--wait-for-state', 'AVAILABLE', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS]) assert "You can only specify one of either --source-boot-volume-id, --boot-volume-backup-id or --source-volume-replica-id option" in result.output # Clone the boot volume (Error 1: Invalid Boot Volume ID) result = invoke(['bv', 'boot-volume', 'create', '--source-boot-volume-id', boot_volume_id[0], '--wait-for-state', 'AVAILABLE', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS]) util.validate_service_error(result, error_message="InvalidParameter") backup_policy_ids = get_backup_policy_ids() create_new_size_in_gbs = new_size_in_gbs + 10 # Clone the boot volume with bronze backup policy and larger size result = invoke(['bv', 'boot-volume', 'create', '--source-boot-volume-id', boot_volume_id, '--backup-policy-id', backup_policy_ids["bronze"], '--wait-for-state', 'AVAILABLE', '--size-in-gbs', str(create_new_size_in_gbs), '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS]) util.validate_response(result, json_response_expected=False) parsed_result = util.get_json_from_mixed_string(result.output) assert util.availability_domain() == parsed_result['data']['availability-domain'] assert 'AVAILABLE' == parsed_result['data']['lifecycle-state'] assert image_id == parsed_result['data']['image-id'] assert create_new_size_in_gbs == int(parsed_result['data']['size-in-gbs']) cloned_boot_volume_id = parsed_result['data']['id'] # Verify the backup policy result = invoke(['bv', 'volume-backup-policy-assignment', 'get-volume-backup-policy-asset-assignment', '--asset-id', cloned_boot_volume_id]) util.validate_response(result) parsed_result = json.loads(result.output) backup_policy_assignment_id = parsed_result["data"][0]["id"] assert parsed_result["data"][0]["policy-id"] == backup_policy_ids["bronze"] # Remove backup policy result = invoke(['bv', 'volume-backup-policy-assignment', 'delete', '--policy-assignment-id', backup_policy_assignment_id, '--force']) util.validate_response(result) # Change backup policy to silver result = invoke(['bv', 'volume-backup-policy-assignment', 'create', '--asset-id', cloned_boot_volume_id, '--policy-id', backup_policy_ids['silver']]) util.validate_response(result) parsed_result = json.loads(result.output) backup_policy_assignment_id = parsed_result["data"]["id"] assert parsed_result["data"]["policy-id"] == backup_policy_ids["silver"] # Remove the backup policy result = invoke(['bv', 'volume-backup-policy-assignment', 'delete', '--policy-assignment-id', backup_policy_assignment_id, '--force']) util.validate_response(result) # We can now launch an instance using that boot volume result = invoke([ 'compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', instance_name, '--subnet-id', network_resources[1], '--shape', shape, '--hostname-label', hostname_label, '--source-boot-volume-id', cloned_boot_volume_id, '--wait-for-state', 'RUNNING', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS ]) util.validate_response(result, json_response_expected=False) instance_data = util.get_json_from_mixed_string(result.output)['data'] instance_ocid = instance_data['id'] assert 'bootVolume' == instance_data['source-details']['source-type'] assert cloned_boot_volume_id == instance_data['source-details']['boot-volume-id'] clean_up_instances(instance_ocid) cloned_boot_volume_id = None instance_ocid = None # Delete existing boot volume clean_up_boot_volume(boot_volume_id) boot_volume_id = None # Create boot volume from backup (Error 1: Invalid Backup Volume ID) result = invoke(['bv', 'boot-volume', 'create', '--boot-volume-backup-id', backup_id[0], '--availability-domain', util.availability_domain(), '--wait-for-state', 'AVAILABLE', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS]) util.validate_service_error(result, error_message="InvalidParameter") # Create boot volume from backup (Error 2: Availability domain not specified) result = invoke(['bv', 'boot-volume', 'create', '--boot-volume-backup-id', backup_id, '--wait-for-state', 'AVAILABLE', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS]) assert "An availability domain must be specified when restoring a boot volume from backup" in result.output # Create boot volume from backup result = invoke(['bv', 'boot-volume', 'create', '--boot-volume-backup-id', backup_id, '--availability-domain', util.availability_domain(), '--wait-for-state', 'AVAILABLE', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS]) util.validate_response(result, json_response_expected=False) parsed_result = util.get_json_from_mixed_string(result.output) assert util.availability_domain() == parsed_result['data']['availability-domain'] assert 'AVAILABLE' == parsed_result['data']['lifecycle-state'] assert image_id == parsed_result['data']['image-id'] backup_boot_volume_id = parsed_result['data']['id'] # We can now launch an instance using that boot volume result = invoke([ 'compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', instance_name, '--subnet-id', network_resources[1], '--shape', shape, '--hostname-label', hostname_label, '--source-boot-volume-id', backup_boot_volume_id, '--wait-for-state', 'RUNNING', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS ]) util.validate_response(result, json_response_expected=False) instance_data = util.get_json_from_mixed_string(result.output)['data'] instance_ocid = instance_data['id'] assert 'bootVolume' == instance_data['source-details']['source-type'] assert backup_boot_volume_id == instance_data['source-details']['boot-volume-id'] clean_up_instances(instance_ocid) backup_boot_volume_id = None instance_ocid = None finally: clean_up_instances(instance_ocid) clean_up_boot_volume(boot_volume_id) clean_up_boot_volume(cloned_boot_volume_id) clean_up_boot_volume(backup_boot_volume_id) clean_up_boot_volume_backup(backup_id)
def vcn_and_subnets(network_client): from tests import util with test_config_container.create_vcr().use_cassette( '_conftest_fixture_vcn_and_subnets.yml'): # create VCN vcn_name = util.random_name('cli_lb_test_vcn') cidr_block = "10.0.0.0/16" vcn_dns_label = util.random_name('vcn', insert_underscore=False) create_vcn_details = oci.core.models.CreateVcnDetails() create_vcn_details.cidr_block = cidr_block create_vcn_details.display_name = vcn_name create_vcn_details.compartment_id = os.environ[ 'OCI_CLI_COMPARTMENT_ID'] create_vcn_details.dns_label = vcn_dns_label result = network_client.create_vcn(create_vcn_details) vcn_ocid = result.data.id assert result.status == 200 oci.wait_until(network_client, network_client.get_vcn(vcn_ocid), 'lifecycle_state', 'AVAILABLE', max_wait_seconds=300, max_interval_seconds=WAIT_INTERVAL_SECONDS) # create subnet in first AD subnet_name = util.random_name('cli_lb_test_subnet') cidr_block = "10.0.1.0/24" subnet_dns_label = util.random_name('subnet', insert_underscore=False) create_subnet_details = oci.core.models.CreateSubnetDetails() create_subnet_details.compartment_id = os.environ[ 'OCI_CLI_COMPARTMENT_ID'] create_subnet_details.availability_domain = util.availability_domain() create_subnet_details.display_name = subnet_name create_subnet_details.vcn_id = vcn_ocid create_subnet_details.cidr_block = cidr_block create_subnet_details.dns_label = subnet_dns_label result = network_client.create_subnet(create_subnet_details) subnet_ocid_1 = result.data.id assert result.status == 200 oci.wait_until(network_client, network_client.get_subnet(subnet_ocid_1), 'lifecycle_state', 'AVAILABLE', max_wait_seconds=300, max_interval_seconds=WAIT_INTERVAL_SECONDS) # create subnet in second AD subnet_name = util.random_name('cli_lb_test_subnet') cidr_block = "10.0.0.0/24" subnet_dns_label = util.random_name('subnet2', insert_underscore=False) create_subnet_details = oci.core.models.CreateSubnetDetails() create_subnet_details.compartment_id = os.environ[ 'OCI_CLI_COMPARTMENT_ID'] create_subnet_details.availability_domain = util.second_availability_domain( ) create_subnet_details.display_name = subnet_name create_subnet_details.vcn_id = vcn_ocid create_subnet_details.cidr_block = cidr_block create_subnet_details.dns_label = subnet_dns_label result = network_client.create_subnet(create_subnet_details) subnet_ocid_2 = result.data.id assert result.status == 200 oci.wait_until(network_client, network_client.get_subnet(subnet_ocid_2), 'lifecycle_state', 'AVAILABLE', max_wait_seconds=300, max_interval_seconds=WAIT_INTERVAL_SECONDS) yield [vcn_ocid, subnet_ocid_1, subnet_ocid_2] # For some reason VCR doesn't like that the post-yield stuff here is all in one cassette. Splitting into different cassettes seems to work with test_config_container.create_vcr().use_cassette( '_conftest_fixture_vcn_and_subnets_delete.yml'): # delete VCN and subnets network_client.delete_subnet(subnet_ocid_1) try: oci.wait_until(network_client, network_client.get_subnet(subnet_ocid_1), 'lifecycle_state', 'TERMINATED', max_wait_seconds=600, max_interval_seconds=WAIT_INTERVAL_SECONDS) except oci.exceptions.ServiceError as error: if not hasattr(error, 'status') or error.status != 404: util.print_latest_exception(error) network_client.delete_subnet(subnet_ocid_2) try: oci.wait_until(network_client, network_client.get_subnet(subnet_ocid_2), 'lifecycle_state', 'TERMINATED', max_wait_seconds=600, max_interval_seconds=WAIT_INTERVAL_SECONDS) except oci.exceptions.ServiceError as error: if not hasattr(error, 'status') or error.status != 404: util.print_latest_exception(error) network_client.delete_vcn(vcn_ocid)
def subtest_subnet_secondary_ip_operations(self): self.set_up_vcn_and_subnet("10.0.0.0/16") available_ip_addresses = self.get_ip_addresses_from_cidr("10.0.0.0/16") # First we need to launch two instances and get their VNICs. We get two instances # so that we can move the secondary private IP around. The instances need to be # in the same subnet for the secondary private IP address moves to be valid image_id = util.oracle_linux_image() shape = 'VM.Standard1.1' first_instance_name = util.random_name('cli_test_instance') result = self.invoke([ 'compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', first_instance_name, '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape', shape ]) self.first_instance_id = util.find_id_in_response(result.output) second_instance_name = util.random_name('cli_test_instance') result = self.invoke([ 'compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', second_instance_name, '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape', shape ]) self.second_instance_id = util.find_id_in_response(result.output) util.wait_until([ 'compute', 'instance', 'get', '--instance-id', self.first_instance_id ], 'RUNNING', max_wait_seconds=600) util.wait_until([ 'compute', 'instance', 'get', '--instance-id', self.second_instance_id ], 'RUNNING', max_wait_seconds=600) vnics_on_instance_result = self.invoke([ 'compute', 'instance', 'list-vnics', '--instance-id', self.first_instance_id ]) vnics = json.loads(vnics_on_instance_result.output) first_vnic_id = vnics['data'][0]['id'] first_vnic_primary_private_ip = vnics['data'][0]['private-ip'] # So we don't try and re-use the IP address unintentionally available_ip_addresses.remove(first_vnic_primary_private_ip) vnics_on_instance_result = self.invoke([ 'compute', 'instance', 'list-vnics', '--instance-id', self.second_instance_id ]) vnics = json.loads(vnics_on_instance_result.output) second_vnic_id = vnics['data'][0]['id'] second_vnic_primary_private_ip = vnics['data'][0]['private-ip'] available_ip_addresses.remove(second_vnic_primary_private_ip) # Running the assign command against a non-existent VNIC fails fudged_vnic_id = self.fudge_ocid(first_vnic_id) result = self.invoke([ 'network', 'vnic', 'assign-private-ip', '--vnic-id', fudged_vnic_id ]) self.assertNotEqual(0, result.exit_code) assert 'Either VNIC with ID {} does not exist or you are not authorized to access it.'.format( fudged_vnic_id) in result.output # Most basic call with VNIC only - in this case we assign the IP automatically result = self.invoke([ 'network', 'vnic', 'assign-private-ip', '--vnic-id', first_vnic_id ]) first_secondary_private_ip_data = json.loads(result.output)['data'] first_secondary_private_ip_id = first_secondary_private_ip_data['id'] first_secondary_private_ip_address = first_secondary_private_ip_data[ 'ip-address'] available_ip_addresses.remove(first_secondary_private_ip_address) # Assign a new secondary IP with all parameters given second_secondary_private_ip_address = available_ip_addresses.pop() result = self.invoke([ 'network', 'vnic', 'assign-private-ip', '--vnic-id', first_vnic_id, '--ip-address', second_secondary_private_ip_address, '--display-name', 'My second secondary', '--hostname-label', 'secondary-1', # The --unassign-if-already-assigned should not have an impact as the IP address doesn't exist '--unassign-if-already-assigned' ]) second_secondary_private_ip_data = json.loads(result.output)['data'] second_secondary_private_ip_id = second_secondary_private_ip_data['id'] self.assertEqual(second_secondary_private_ip_address, second_secondary_private_ip_data['ip-address']) # Checkpoint by listing the private IPs. Our created secondaries should be there result = self.invoke( ['network', 'private-ip', 'list', '--vnic-id', first_vnic_id]) private_ips = json.loads(result.output)['data'] self.assertEqual(3, len(private_ips)) self.find_private_ip_and_do_assertions( private_ips, first_secondary_private_ip_id, first_secondary_private_ip_address, None, None) self.find_private_ip_and_do_assertions( private_ips, second_secondary_private_ip_id, second_secondary_private_ip_address, 'My second secondary', 'secondary-1') # Trying to assign the same private IP to the same VNIC is a no-op result = self.invoke([ 'network', 'vnic', 'assign-private-ip', '--vnic-id', first_vnic_id, '--ip-address', first_secondary_private_ip_address ]) assert 'Taking no action as IP address {} is already assigned to VNIC {}'.format( first_secondary_private_ip_address, first_vnic_id) in result.output # Trying to move a primary IP fails result = self.invoke([ 'network', 'vnic', 'assign-private-ip', '--vnic-id', first_vnic_id, '--ip-address', second_vnic_primary_private_ip, '--unassign-if-already-assigned' ]) self.assertNotEqual(0, result.exit_code) result = self.invoke([ 'network', 'vnic', 'assign-private-ip', '--vnic-id', second_vnic_id, '--ip-address', first_vnic_primary_private_ip, '--unassign-if-already-assigned' ]) self.assertNotEqual(0, result.exit_code) # Trying to move an existing IP address without saying "unassign" fails result = self.invoke([ 'network', 'vnic', 'assign-private-ip', '--vnic-id', second_vnic_id, '--ip-address', first_secondary_private_ip_address ]) target_message = 'IP address {} is already assigned to a different VNIC: {}. To reassign it, re-run this command with the --unassign-if-already-assigned option'.format( first_secondary_private_ip_address, first_vnic_id) assert target_message in result.output self.assertNotEqual(0, result.exit_code) # Move the secondary IP and also update some information result = self.invoke([ 'network', 'vnic', 'assign-private-ip', '--vnic-id', second_vnic_id, '--ip-address', first_secondary_private_ip_address, '--display-name', 'My first secondary', '--hostname-label', 'moved-first-secondary-1', '--unassign-if-already-assigned' ]) private_ip_data_after_move = json.loads(result.output)['data'] self.assertEqual(first_secondary_private_ip_id, private_ip_data_after_move['id']) self.assertEqual(first_secondary_private_ip_address, private_ip_data_after_move['ip-address']) self.assertEqual('My first secondary', private_ip_data_after_move['display-name']) self.assertEqual('moved-first-secondary-1', private_ip_data_after_move['hostname-label']) # List each VNIC - we expect 2 results per list call (1 x primary private and 1 x secondary private per VNIC) after moving stuff around result = self.invoke( ['network', 'private-ip', 'list', '--vnic-id', first_vnic_id]) private_ips = json.loads(result.output)['data'] self.assertEqual(2, len(private_ips)) self.ensure_private_ip_record_not_present( private_ips, first_secondary_private_ip_id) self.find_private_ip_and_do_assertions( private_ips, second_secondary_private_ip_id, second_secondary_private_ip_address, 'My second secondary', 'secondary-1') result = self.invoke( ['network', 'private-ip', 'list', '--vnic-id', second_vnic_id]) private_ips = json.loads(result.output)['data'] self.assertEqual(2, len(private_ips)) self.ensure_private_ip_record_not_present( private_ips, second_secondary_private_ip_id) self.find_private_ip_and_do_assertions( private_ips, first_secondary_private_ip_id, first_secondary_private_ip_address, 'My first secondary', 'moved-first-secondary-1') # Listing by subnet should give us 4 records (2 x primary private and 2 x secondary private) as it queries across all VNICs in the subnet result = self.invoke( ['network', 'private-ip', 'list', '--subnet-id', self.subnet_ocid]) private_ips = json.loads(result.output)['data'] self.assertEqual(4, len(private_ips)) self.find_private_ip_and_do_assertions( private_ips, first_secondary_private_ip_id, first_secondary_private_ip_address, 'My first secondary', 'moved-first-secondary-1') self.find_private_ip_and_do_assertions( private_ips, second_secondary_private_ip_id, second_secondary_private_ip_address, 'My second secondary', 'secondary-1') # Update the display name and hostname result = self.invoke([ 'network', 'private-ip', 'update', '--private-ip-id', second_secondary_private_ip_id, '--display-name', 'batman display name', '--hostname-label', 'batman-secondary-1' ]) updated_private_ip_info = json.loads(result.output)['data'] self.assertEqual(second_secondary_private_ip_id, updated_private_ip_info['id']) self.assertEqual(second_secondary_private_ip_address, updated_private_ip_info['ip-address']) self.assertEqual(first_vnic_id, updated_private_ip_info['vnic-id']) self.assertEqual('batman display name', updated_private_ip_info['display-name']) self.assertEqual('batman-secondary-1', updated_private_ip_info['hostname-label']) # Do a get and confirm the information which we receive result = self.invoke([ 'network', 'private-ip', 'get', '--private-ip-id', second_secondary_private_ip_id ]) private_ip_info_from_get = json.loads(result.output)['data'] self.assertEqual(second_secondary_private_ip_id, private_ip_info_from_get['id']) self.assertEqual(second_secondary_private_ip_address, private_ip_info_from_get['ip-address']) self.assertEqual(first_vnic_id, private_ip_info_from_get['vnic-id']) self.assertEqual('batman display name', private_ip_info_from_get['display-name']) self.assertEqual('batman-secondary-1', private_ip_info_from_get['hostname-label']) # Running the unassign command against a non-existent VNIC fails # Listing by VNIC should give us one record (the primary private IP) per call result = self.invoke([ 'network', 'vnic', 'unassign-private-ip', '--vnic-id', fudged_vnic_id, '--ip-address', second_secondary_private_ip_address ]) self.assertNotEqual(0, result.exit_code) # The error message from the service is not being sent correctly to the CLI. The Error code is correct. # This needs to be investigated # assert 'Either VNIC with ID {} does not exist or you are not authorized to access it.'.format(fudged_vnic_id) in result.output # Unassigning an IP address not in the VNIC fails result = self.invoke([ 'network', 'vnic', 'unassign-private-ip', '--vnic-id', second_vnic_id, '--ip-address', second_secondary_private_ip_address ]) assert 'IP address {} was not found on VNIC {}'.format( second_secondary_private_ip_address, second_vnic_id) in result.output self.assertNotEqual(0, result.exit_code) # Unassigning a primary private IP address is not supported result = self.invoke([ 'network', 'vnic', 'unassign-private-ip', '--vnic-id', second_vnic_id, '--ip-address', second_vnic_primary_private_ip ]) assert 'Taking no action as {} is the primary private IP on VNIC {}'.format( second_vnic_primary_private_ip, second_vnic_id) in result.output self.assertNotEqual(0, result.exit_code) # Unassign a secondary private IP result = self.invoke([ 'network', 'vnic', 'unassign-private-ip', '--vnic-id', second_vnic_id, '--ip-address', first_secondary_private_ip_address ]) assert 'Unassigned IP address {} from VNIC {}'.format( first_secondary_private_ip_address, second_vnic_id) in result.output # Delete a secondary private IP (by its OCID) result = self.invoke([ 'network', 'private-ip', 'delete', '--private-ip-id', second_secondary_private_ip_id, '--force' ]) self.assertEqual(0, result.exit_code) # Listing by VNIC should give us one record (the primary private IP) per call result = self.invoke( ['network', 'private-ip', 'list', '--vnic-id', first_vnic_id]) private_ips = json.loads(result.output)['data'] self.assertEqual(1, len(private_ips)) self.assertTrue(private_ips[0]['is-primary']) result = self.invoke( ['network', 'private-ip', 'list', '--vnic-id', second_vnic_id]) private_ips = json.loads(result.output)['data'] self.assertEqual(1, len(private_ips)) self.assertTrue(private_ips[0]['is-primary']) # Listing by subnet should give us two records (the primary private IP for each VNIC) result = self.invoke( ['network', 'private-ip', 'list', '--subnet-id', self.subnet_ocid]) private_ips = json.loads(result.output)['data'] self.assertEqual(2, len(private_ips)) self.assertTrue(private_ips[0]['is-primary']) self.assertTrue(private_ips[1]['is-primary'])
def oce_cluster(runner, config_file, config_profile): # Set-up of cross-connect group cluster_id = None with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'test_oce_fixture_cluster.yml'): # Create a VCN for Kubernetes cluster vcn_name = util.random_name('cli_test_oce_vcn') vcn_cidr_block = "10.0.0.0/16" pod_cidr_block = "10.96.0.0/16" kub_svcs_cidr_block = "10.240.0.0/16" params = [ 'network', 'vcn', 'create', '--compartment-id', util.COMPARTMENT_ID, '--display-name', vcn_name, '--cidr-block', vcn_cidr_block ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) vcn_ocid = util.find_id_in_response(result.output) util.wait_until(['network', 'vcn', 'get', '--vcn-id', vcn_ocid], 'AVAILABLE', max_wait_seconds=PROVISIONING_TIME_SEC) # Create 5 subnets: 1st 3 subnets for Kubernetes worker nodes and last 2 subnets for load balancers subnet_cidrs = [ "10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24", "10.0.4.0/24", "10.0.5.0/24" ] subnet_names = list() subnet_ocids = list() for idx, subnet_cidr_block in enumerate(subnet_cidrs): subnet_names.append(util.random_name('cli_test_compute_subnet')) params = [ 'network', 'subnet', 'create', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', subnet_names[idx], '--vcn-id', vcn_ocid, '--cidr-block', subnet_cidr_block, ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) subnet_ocids.append(util.find_id_in_response(result.output)) util.wait_until( ['network', 'subnet', 'get', '--subnet-id', subnet_ocids[idx]], 'AVAILABLE', max_wait_seconds=PROVISIONING_TIME_SEC) regional_subnet_name = util.random_name('cli_test_compute_subnet') subnet_names.append(regional_subnet_name) params = [ 'network', 'subnet', 'create', '--compartment-id', util.COMPARTMENT_ID, '--display-name', regional_subnet_name, '--vcn-id', vcn_ocid, '--cidr-block', "10.0.6.0/24", ] # Create a public regional subnet for the cluster endpoint result = invoke(runner, config_file, config_profile, params) util.validate_response(result) regional_subnet_ocid = util.find_id_in_response(result.output) subnet_ocids.append(regional_subnet_ocid) util.wait_until( ['network', 'subnet', 'get', '--subnet-id', regional_subnet_ocid], 'AVAILABLE', max_wait_seconds=PROVISIONING_TIME_SEC) # Find Supported Kubernetes versions params = ['ce', 'cluster-options', 'get', '--cluster-option-id', 'all'] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) # Pick the first version in the response to be used for the test cluster kub_version = json.loads( result.output)['data']['kubernetes-versions'][0] kub_upgrade_version = json.loads( result.output)['data']['kubernetes-versions'][1] # Create a cluster cluster_lb_subnets = '["' + subnet_ocids[3] + '", "' + subnet_ocids[ 4] + '"]' cluster_name = util.random_name('cli_oce_cluster_name') params = [ 'ce', 'cluster', 'create', '--compartment-id', util.COMPARTMENT_ID, '--name', cluster_name, '--vcn-id', vcn_ocid, '--kubernetes-version', kub_version, '--dashboard-enabled', 'true', '--tiller-enabled', 'true', '--pods-cidr', pod_cidr_block, '--services-cidr', kub_svcs_cidr_block, '--service-lb-subnet-ids', cluster_lb_subnets, '--endpoint-subnet-id', regional_subnet_ocid ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) # Create cluster returns work request. Get work request response to obtain cluster OCID. response = json.loads(result.output) work_request_id = response['opc-work-request-id'] get_work_request_result = util.wait_until( [ 'ce', 'work-request', 'get', '--work-request-id', work_request_id ], 'SUCCEEDED', state_property_name='status', max_wait_seconds=CLUSTER_CREATE_PROVISIONING_TIME_SEC) util.validate_response(get_work_request_result) cluster_id = json.loads(get_work_request_result.output )['data']['resources'][0]['identifier'] # Get a cluster using cluster ID get_params = ['ce', 'cluster', 'get', '--cluster-id', cluster_id] result = invoke(runner, config_file, config_profile, get_params) util.validate_response(result) # Check the kubeconfig file generation params = [ 'ce', 'cluster', 'create-kubeconfig', '--cluster-id', cluster_id, '--file', 'kubeconfig' ] invoke(runner, config_file, config_profile, params) # Validate the kubernetes config is in valid YAML format with open('kubeconfig', 'r') as config: config_data = config.read() yaml.safe_load(config_data) if os.path.exists('kubeconfig'): os.remove('kubeconfig') # Get the list of clusters in the compartment params = [ 'ce', 'cluster', 'list', '--compartment-id', util.COMPARTMENT_ID ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) assert len(json.loads(result.output)['data']) > 0 # Update the cluster using cluster ID cluster_name = util.random_name('cli_test_oce_cluster') params = [ 'ce', 'cluster', 'update', '--cluster-id', cluster_id, '--name', cluster_name, '--kubernetes-version', kub_upgrade_version ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) # Update cluster returns work request. Get work request response to check the command succeeded response = json.loads(result.output) work_request_id = response['opc-work-request-id'] get_work_request_result = util.wait_until( [ 'ce', 'work-request', 'get', '--work-request-id', work_request_id ], 'SUCCEEDED', state_property_name='status', max_wait_seconds=CLUSTER_UPDATE_TIME_SEC) util.validate_response(get_work_request_result) # Get the list of work request logs params = [ 'ce', 'work-request-log-entry', 'list', '--work-request-id', work_request_id, '--compartment-id', util.COMPARTMENT_ID ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) # Update a cluster endpoint params = [ 'ce', 'cluster', 'update-endpoint-config', '--cluster-id', cluster_id, '--is-public-ip-enabled', 'true', '--nsg-ids', '[]' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) # Update endpoint config returns a work request. Get work request response to check the command succeeded response = json.loads(result.output) work_request_id = response['opc-work-request-id'] get_work_request_result = util.wait_until( [ 'ce', 'work-request', 'get', '--work-request-id', work_request_id ], 'SUCCEEDED', state_property_name='status', max_wait_seconds=CLUSTER_UPDATE_TIME_SEC) util.validate_response(get_work_request_result) yield cluster_id, subnet_ocids[0], subnet_ocids[1], subnet_ocids[2] # Tear down sequence with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'test_oce_fixture_cluster_delete.yml'): # Delete the cluster params = [ 'ce', 'cluster', 'delete', '--cluster-id', cluster_id, '--force' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) # Get the cluster and check that it moves to DELETED state invoke(runner, config_file, config_profile, get_params) util.wait_until(get_params, 'DELETED', max_wait_seconds=DELETION_TIME_SEC) # Delete the subnets for subnet_id in subnet_ocids: params = [ 'network', 'subnet', 'delete', '--subnet-id', subnet_id, '--wait-for-state', 'TERMINATED', '--force' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result, json_response_expected=False) # Delete the VCN params = [ 'network', 'vcn', 'delete', '--vcn-id', vcn_ocid, '--wait-for-state', 'TERMINATED', '--force' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result, json_response_expected=False) if os.path.isdir( os.path.expandvars(os.path.expanduser(USER_KUBECONFIG_DIR))): shutil.rmtree(USER_KUBECONFIG_DIR)
def subtest_launch_instance_ipxe_script_file_and_extended_metadata(self): instance_name = util.random_name('cli_test_instance_options') image_id = util.oracle_linux_image() shape = 'VM.Standard1.2' hostname_label = util.random_name('bminstance', insert_underscore=False) vnic_display_name = 'vnic_display_name' private_ip = '10.0.0.15' assign_public_ip = 'true' extended_metadata = '{"a": "1", "b": {"c": "3", "d": {}}}' launch_instance_result = util.invoke_command([ 'compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', instance_name, '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape', shape, '--ipxe-script-file', IPXE_SCRIPT_FILE, '--hostname-label', hostname_label + "1", '--private-ip', private_ip, '--assign-public-ip', assign_public_ip, '--vnic-display-name', vnic_display_name, '--extended-metadata', extended_metadata ]) temp_instance_ocid = util.find_id_in_response( launch_instance_result.output) self.instance_ocids.append(temp_instance_ocid) util.validate_response(launch_instance_result, expect_etag=True) extended_metadata_result = json.loads( launch_instance_result.output)['data']['extended-metadata'] assert extended_metadata_result['a'] == '1' assert extended_metadata_result['b']['c'] == '3' # This can be in ATTACHING state for some time try: util.wait_until([ 'compute', 'vnic-attachment', 'list', '--compartment-id', util.COMPARTMENT_ID, '--instance-id', temp_instance_ocid ], 'ATTACHED', max_wait_seconds=60, item_index_in_list_response=0) except Exception: try: # If it is ATTACHING we will consider it good enough util.wait_until([ 'compute', 'vnic-attachment', 'list', '--compartment-id', util.COMPARTMENT_ID, '--instance-id', temp_instance_ocid ], 'ATTACHING', max_wait_seconds=30, item_index_in_list_response=0) except Exception: # If it is not ATTACHING, double check that it didn't go to ATTACHED util.wait_until([ 'compute', 'vnic-attachment', 'list', '--compartment-id', util.COMPARTMENT_ID, '--instance-id', temp_instance_ocid ], 'ATTACHED', max_wait_seconds=30, item_index_in_list_response=0) # get vnic attachments for given instance list_vnics_result = util.invoke_command([ 'compute', 'vnic-attachment', 'list', '--compartment-id', util.COMPARTMENT_ID, '--instance-id', temp_instance_ocid ]) vnic_id = json.loads(list_vnics_result.output)['data'][0]['vnic-id'] # get full data for vnic attached to new instance (which includes hostname-label) get_vnic_result = util.invoke_command( ['network', 'vnic', 'get', '--vnic-id', vnic_id]) vnic = json.loads(get_vnic_result.output)['data'] assert vnic['hostname-label'] == hostname_label + "1" assert vnic['display-name'] == vnic_display_name assert vnic['public-ip'] content = None with open(IPXE_SCRIPT_FILE, mode='r') as file: content = file.read() assert 'ipxe-script' in launch_instance_result.output # Just look at the first few characters. Once we hit a line break the formatting will differ. assert content[:5] in launch_instance_result.output self.delete_instance(temp_instance_ocid)
def vcn_and_subnet(runner, config_file, config_profile, network_client): with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'filestorage_vcn_and_subnet_fixture.yml'): # create VCN vcn_name = util.random_name('cli_db_test_vcn') cidr_block = "10.0.0.0/16" vcn_dns_label = util.random_name('vcn', insert_underscore=False) create_vcn_details = oci.core.models.CreateVcnDetails() create_vcn_details.cidr_block = cidr_block create_vcn_details.display_name = vcn_name create_vcn_details.compartment_id = util.COMPARTMENT_ID create_vcn_details.dns_label = vcn_dns_label result = network_client.create_vcn(create_vcn_details) vcn_ocid = result.data.id assert result.status == 200 oci.wait_until(network_client, network_client.get_vcn(vcn_ocid), 'lifecycle_state', 'AVAILABLE', max_wait_seconds=300) # create subnet in first AD subnet_name = util.random_name('python_sdk_test_subnet') cidr_block = "10.0.1.0/24" subnet_dns_label = util.random_name('subnet', insert_underscore=False) + '1' create_subnet_details = oci.core.models.CreateSubnetDetails() create_subnet_details.compartment_id = util.COMPARTMENT_ID create_subnet_details.availability_domain = util.availability_domain() create_subnet_details.display_name = subnet_name create_subnet_details.vcn_id = vcn_ocid create_subnet_details.cidr_block = cidr_block create_subnet_details.dns_label = subnet_dns_label result = network_client.create_subnet(create_subnet_details) subnet_ocid = result.data.id assert result.status == 200 oci.wait_until(network_client, network_client.get_subnet(subnet_ocid), 'lifecycle_state', 'AVAILABLE', max_wait_seconds=300) yield vcn_ocid, subnet_ocid # this code does not run inside the vcr_fixture because it is outside any test function # thus we are explicitly creating a separate cassette for it here with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'filestorage_vcn_and_subnet_fixture_cleanup.yml'): # Sometimes we can't delete the subnet straight after the mount target because some VNIC is still # hanging around. If we get a conflict, try a few times before bailing out attempts = 0 while attempts < 5: try: network_client.delete_subnet(subnet_ocid) test_config_container.do_wait( network_client, network_client.get_subnet(subnet_ocid), 'lifecycle_state', 'TERMINATED', max_wait_seconds=600, succeed_on_not_found=True) break except oci.exceptions.ServiceError as e: attempts += 1 if e.status == 409 and attempts < 5: time.sleep(5) else: raise network_client.delete_vcn(vcn_ocid)
def set_up_resources(self): # Grab the Object Storage namespace result = self.invoke(['os', 'ns', 'get']) self.object_storage_namespace = json.loads(result.output)['data'] # Create a bucket print("Creating bucket") self.bucket_name = util.random_name('CliImageImportExport') result = self.invoke( ['os', 'bucket', 'create', '--compartment-id', util.COMPARTMENT_ID, '--namespace', self.object_storage_namespace, '--name', self.bucket_name]) util.validate_response(result, expect_etag=True) # Create a VCN print("Creating VCN") vcn_name = util.random_name('cli_test_compute_vcn') result = self.invoke( ['network', 'vcn', 'create', '--compartment-id', util.COMPARTMENT_ID, '--display-name', vcn_name, '--dns-label', 'clivcn', '--cidr-block', '10.0.0.0/16']) util.validate_response(result, expect_etag=True) self.vcn_ocid = util.find_id_in_response(result.output) util.wait_until(['network', 'vcn', 'get', '--vcn-id', self.vcn_ocid], 'AVAILABLE', max_wait_seconds=300) # Create a subnet print("Creating subnet") subnet_name = util.random_name('cli_test_compute_subnet') result = self.invoke( ['network', 'subnet', 'create', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', subnet_name, '--dns-label', 'clisubnet', '--vcn-id', self.vcn_ocid, '--cidr-block', '10.0.0.0/16', ]) util.validate_response(result, expect_etag=True) self.subnet_ocid = util.find_id_in_response(result.output) util.wait_until(['network', 'subnet', 'get', '--subnet-id', self.subnet_ocid], 'AVAILABLE', max_wait_seconds=300) # Create an instance image_id = util.oracle_linux_image() shape = 'VM.Standard1.1' instance_name = util.random_name('cli_test_instance') print("Creating instance " + instance_name) result = self.invoke( ['compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', instance_name, '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape', shape]) util.validate_response(result, expect_etag=True) self.instance_id = util.find_id_in_response(result.output) util.wait_until(['compute', 'instance', 'get', '--instance-id', self.instance_id], 'RUNNING', max_wait_seconds=600) # Export an image from the instance to use in tests print("Exporting image") result = self.invoke( ['compute', 'image', 'create', '--compartment-id', util.COMPARTMENT_ID, '--instance-id', self.instance_id]) util.validate_response(result, expect_etag=True) self.custom_image_id = util.find_id_in_response(result.output) util.wait_until(['compute', 'image', 'get', '--image-id', self.custom_image_id], 'AVAILABLE', max_wait_seconds=3600)