def subtest_dhcp_option_operations(self): dhcp_options_name = util.random_name('cli_test_dhcp_options') options = util.remove_outer_quotes(oci_cli_virtual_network.virtualnetwork_cli_extended.network_create_dhcp_options_options_example) result = self.invoke( ['dhcp-options', 'create', '--compartment-id', util.COMPARTMENT_ID, '--vcn-id', self.vcn_ocid, '--display-name', dhcp_options_name, '--options', options ]) util.validate_response(result, expect_etag=True) self.dhcp_options_ocid = util.find_id_in_response(result.output) util.wait_until(['network', 'dhcp-options', 'get', '--dhcp-id', self.dhcp_options_ocid], 'AVAILABLE') result = self.invoke(['dhcp-options', 'list', '--compartment-id', util.COMPARTMENT_ID, '--vcn-id', self.vcn_ocid]) util.validate_response(result) self.run_list_filter_verification('dhcp-options', dhcp_options_name) result = self.invoke(['dhcp-options', 'get', '--dhcp-id', self.dhcp_options_ocid]) util.validate_response(result, expect_etag=True) dhcp_options_name = dhcp_options_name + "_updated" options_v2 = """[{"type": "DomainNameServer", "customDnsServers": ["202.44.61.10"], "serverType": "CustomDnsServer"}, {"searchDomainNames": ["testvcn.oraclevcn.com"], "type":"SearchDomain"}]""" # update display name only - does not show a confirmation prompt result = self.invoke(['dhcp-options', 'update', '--dhcp-id', self.dhcp_options_ocid, '--display-name', dhcp_options_name]) util.validate_response(result, expect_etag=True) # update options, confirm y result = self.invoke( ['dhcp-options', 'update', '--dhcp-id', self.dhcp_options_ocid, '--options', options_v2], input='y') util.validate_response(result, json_response_expected=False) # update options, confirm n result = self.invoke( ['dhcp-options', 'update', '--dhcp-id', self.dhcp_options_ocid, '--options', options], input='n') assert result.exit_code != 0 util.vcr_mode_aware_sleep(20) # update options, force result = self.invoke( ['dhcp-options', 'update', '--dhcp-id', self.dhcp_options_ocid, '--options', options_v2, '--force']) util.validate_response(result, expect_etag=True) response = json.loads(result.output) # validate response contains SearchDomain option response_has_search_domain_option = False for option in response["data"]["options"]: if option["type"] == "SearchDomain": response_has_search_domain_option = True assert option["search-domain-names"][0] == "testvcn.oraclevcn.com" assert response_has_search_domain_option, "Options response should contain option of type 'SearchDomain'."
def subtest_volume_attachment_operations(self): va_name = util.random_name('cli_test_va') result = self.invoke([ 'compute', 'volume-attachment', 'attach', '--display-name', va_name, '--type', 'iscsi', '--instance-id', self.instance_ocid, '--volume-id', self.volume_ocid ]) self.va_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until([ 'compute', 'volume-attachment', 'get', '--volume-attachment-id', self.va_ocid ], 'ATTACHED', max_wait_seconds=300) result = self.invoke([ 'compute', 'volume-attachment', 'list', '--compartment-id', util.COMPARTMENT_ID, '--instance-id', self.instance_ocid ]) util.validate_response(result) result = self.invoke([ 'compute', 'volume-attachment', 'get', '--volume-attachment-id', self.va_ocid ]) util.validate_response(result, expect_etag=True) result = self.invoke([ 'compute', 'volume-attachment', 'detach', '--volume-attachment-id', self.va_ocid, '--force' ]) util.validate_response(result) util.wait_until([ 'compute', 'volume-attachment', 'get', '--volume-attachment-id', self.va_ocid ], 'DETACHED', max_wait_seconds=300) result = self.invoke([ 'compute', 'volume-attachment', 'attach', '--display-name', va_name, '--type', 'iscsi', '--instance-id', self.instance_ocid, '--volume-id', self.volume_ocid, '--wait-for-state', 'ATTACHED', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS ]) util.validate_response(result, expect_etag=True, json_response_expected=False) self.va_ocid = util.get_json_from_mixed_string( result.output)['data']['id'] result = self.invoke([ 'compute', 'volume-attachment', 'detach', '--volume-attachment-id', self.va_ocid, '--force', '--wait-for-state', 'DETACHED', '--wait-interval-seconds', util.WAIT_INTERVAL_SECONDS ]) util.validate_response(result, json_response_expected=False)
def db_systems_cleanup(runner, config_file, config_profile, db_system_id_1, db_system_id_2): if SKIP_CLEAN_UP_RESOURCES: print("Skipping clean up of DB systems and dependent resources.") return success_terminating_db_systems = True try: # terminate db system 1 params = [ 'system', 'terminate', '--db-system-id', db_system_id_1, '--force' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) # validate that it goes into terminating state params = [ 'system', 'get', '--db-system-id', db_system_id_1 ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) state = json.loads(result.output)['data']['lifecycle-state'] assert "TERMINAT" in state util.wait_until(['db', 'system', 'get', '--db-system-id', db_system_id_1], 'TERMINATED', max_wait_seconds=DB_SYSTEM_PROVISIONING_TIME_SEC, succeed_if_not_found=True) except Exception as error: util.print_latest_exception(error) success_terminating_db_systems = False # terminate db system 2 try: params = [ 'system', 'terminate', '--db-system-id', db_system_id_2, '--force' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) # validate that it goes into terminating state params = [ 'system', 'get', '--db-system-id', db_system_id_2 ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) state = json.loads(result.output)['data']['lifecycle-state'] assert "TERMINAT" in state util.wait_until(['db', 'system', 'get', '--db-system-id', db_system_id_2], 'TERMINATED', max_wait_seconds=DB_SYSTEM_PROVISIONING_TIME_SEC, succeed_if_not_found=True) except Exception as error: util.print_latest_exception(error) success_terminating_db_systems = False assert success_terminating_db_systems
def certificate(runner, config_file, config_profile, load_balancer, key_pair_files): with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'test_load_balancer_fixture_certificate.yml'): private_key_filename = key_pair_files[1] certificate_filename = key_pair_files[2] cert_name = util.random_name('cli_lb_certificate') params = [ 'certificate', 'create', '--certificate-name', cert_name, '--load-balancer-id', load_balancer, '--ca-certificate-file', certificate_filename, '--private-key-file', private_key_filename, '--public-certificate-file', certificate_filename, '--passphrase', LB_PRIVATE_KEY_PASSPHRASE ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) # returns work request response = json.loads(result.output) work_request_ocid = response['opc-work-request-id'] get_work_request_result = util.wait_until( [ 'lb', 'work-request', 'get', '--work-request-id', work_request_ocid ], 'SUCCEEDED', max_wait_seconds=LB_PROVISIONING_TIME_SEC) util.validate_response(get_work_request_result) yield cert_name with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'test_load_balancer_fixture_certificate_delete.yml'): # delete cert params = [ 'certificate', 'delete', '--load-balancer-id', load_balancer, '--certificate-name', cert_name, '--force' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) response = json.loads(result.output) work_request_ocid = response['opc-work-request-id'] get_work_request_result = util.wait_until( [ 'lb', 'work-request', 'get', '--work-request-id', work_request_ocid ], 'SUCCEEDED', max_wait_seconds=LB_PROVISIONING_TIME_SEC) util.validate_response(get_work_request_result)
def backend(runner, config_file, config_profile, load_balancer, backend_set): with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'test_load_balancer_fixture_backend.yml'): ip_address = '10.0.0.10' port = '80' params = [ 'backend', 'create', '--ip-address', ip_address, '--port', port, '--load-balancer-id', load_balancer, '--backend-set-name', backend_set, '--weight', '3' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) # returns work request response = json.loads(result.output) work_request_ocid = response['opc-work-request-id'] get_work_request_result = util.wait_until( [ 'lb', 'work-request', 'get', '--work-request-id', work_request_ocid ], 'SUCCEEDED', max_wait_seconds=DEFAULT_WAIT_TIME) util.validate_response(get_work_request_result) # backend name defaults to "ipaddress:port" backend_name = "{}:{}".format(ip_address, port) yield backend_name with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'test_load_balancer_fixture_backend_delete.yml'): params = [ 'backend', 'delete', '--load-balancer-id', load_balancer, '--backend-set-name', backend_set, '--backend-name', backend_name, '--force' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) # returns work request response = json.loads(result.output) work_request_ocid = response['opc-work-request-id'] get_work_request_result = util.wait_until( [ 'lb', 'work-request', 'get', '--work-request-id', work_request_ocid ], 'SUCCEEDED', max_wait_seconds=DEFAULT_WAIT_TIME) util.validate_response(get_work_request_result)
def subtest_ip_sec_connection_operations(self): if hasattr(self, 'drg_capacity_issue'): print( 'Unable to execute subtest_ip_sec_connection_operations as a DRG is not available' ) return ipsc_name = util.random_name('cli_test_ipsc') routes = util.remove_outer_quotes( oci_cli_virtual_network.virtualnetwork_cli_extended. network_create_ip_sec_connection_static_routes_example) result = self.invoke([ 'ip-sec-connection', 'create', '--compartment-id', util.COMPARTMENT_ID, '--display-name', ipsc_name, '--cpe-id', self.cpe_ocid, '--drg-id', self.drg_ocid, '--static-routes', routes ]) if 'Limit tenant-ipsec-vpn-connection' in result.output: self.drg_capacity_issue = True print( 'Unable to execute subtest_ip_sec_connection_operations as an IPSec Connection is not available' ) return self.ipsc_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until([ 'network', 'ip-sec-connection', 'get', '--ipsc-id', self.ipsc_ocid ], 'AVAILABLE', max_wait_seconds=600) result = self.invoke([ 'ip-sec-connection', 'list', '--compartment-id', util.COMPARTMENT_ID ]) util.validate_response(result) ipsc_name = ipsc_name + "_updated" result = self.invoke([ 'ip-sec-connection', 'update', '--ipsc-id', self.ipsc_ocid, '--display-name', ipsc_name ]) util.validate_response(result, expect_etag=True) result = self.invoke( ['ip-sec-connection', 'get', '--ipsc-id', self.ipsc_ocid]) util.validate_response(result, expect_etag=True) result = self.invoke( ['ip-sec-connection', 'get-config', '--ipsc-id', self.ipsc_ocid]) util.validate_response(result) result = self.invoke( ['ip-sec-connection', 'get-status', '--ipsc-id', self.ipsc_ocid]) util.validate_response(result)
def volume_operations_internal(self, volume_name, command_params, size_gb, size_mb): params_to_use = list(command_params) if size_gb: params_to_use.extend(['--size-in-gbs', size_gb]) elif size_mb: params_to_use.extend(['--size-in-mbs', size_mb]) result = self.invoke(params_to_use) util.validate_response(result) parsed_result = json.loads(result.output) if size_gb: assert str(parsed_result['data']['size-in-gbs']) == size_gb elif size_mb: assert str(parsed_result['data']['size-in-mbs']) == size_mb volume_id = util.find_id_in_response(result.output) util.wait_until(['bv', 'volume', 'get', '--volume-id', volume_id], 'AVAILABLE', max_wait_seconds=180) result = self.invoke(['volume', 'get', '--volume-id', volume_id]) util.validate_response(result) parsed_result = json.loads(result.output) if size_gb: assert str(parsed_result['data']['size-in-gbs']) == size_gb elif size_mb: assert str(parsed_result['data']['size-in-mbs']) == size_mb result = self.invoke( ['volume', 'list', '--compartment-id', util.COMPARTMENT_ID]) util.validate_response(result) if size_gb: new_size_gb = int(size_gb) + 10 volume_name = volume_name + "_UPDATED" result = self.invoke([ 'volume', 'update', '--volume-id', volume_id, '--display-name', volume_name, '--size-in-gbs', str(new_size_gb) ]) util.validate_response(result) util.wait_until(['bv', 'volume', 'get', '--volume-id', volume_id], 'AVAILABLE', max_wait_seconds=180) result = self.invoke(['volume', 'get', '--volume-id', volume_id]) util.validate_response(result) parsed_result = json.loads(result.output) assert str( parsed_result['data']['size-in-gbs']) == str(new_size_gb) return volume_id
def test_topic_crud(runner, config_file, config_profile): topic_id = None try: # Create Topic params = [ 'ons', 'topic', 'create', '--name', util.random_name('topic_name'), '-c', util.COMPARTMENT_ID, '--description', 'A description of the topic' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) topic_id = json.loads(result.output)['data']['topic-id'] util.wait_until(['ons', 'topic', 'get', '--topic-id', topic_id], 'ACTIVE', max_wait_seconds=600) # Update topic params = [ 'ons', 'topic', 'update', '--topic-id', topic_id, '--description', 'new description' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) assert json.loads(result.output)['data']['description'] == 'new description' # List all topics params = [ 'ons', 'topic', 'list', '--compartment-id', util.COMPARTMENT_ID ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) assert len(json.loads(result.output)['data']) > 0 # Change compartment if COMPARTMENT_ID_CHANGE_TO: params = [ 'ons', 'topic', 'change-compartment', '--topic-id', topic_id, '--compartment-id', COMPARTMENT_ID_CHANGE_TO ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) assert result.output != '' finally: if topic_id: params = [ 'ons', 'topic', 'delete', '--topic-id', topic_id, '--force' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result)
def run_heartbeat(self, get_holders_fun, count=2, interval=0.05, timeout=0.05): ch = ConnectionHeartbeat(interval, get_holders_fun, timeout=timeout) # wait until the thread is started wait_until(lambda: get_holders_fun.call_count > 0, 0.01, 100) time.sleep(interval * (count - 1)) ch.stop() self.assertTrue(get_holders_fun.call_count)
def _update_pe(config_file, config_profile, pe_id, runner, is_async=True): # UPDATE TEST # update_params = [ 'data-safe', 'private-endpoint', 'update', '--private-endpoint-id', pe_id, '--display-name', PE_UPDATED_DISPLAY_NAME, '--description', PE_DESCRIPTION, '--force' ] if is_async: # We just validate if work request id is returned and not the value as it is dynamic response = data_safe_util.run_test( runner, config_file, config_profile, update_params, expected_data=data_safe_util.WORKFLOW_REQUEST_EXPECTED_DATA, check_values=False) update_wr_id = response["opc-work-request-id"] util.wait_until([ 'data-safe', 'work-request', 'get', '--work-request-id', update_wr_id ], 'SUCCEEDED', max_wait_seconds=600, state_property_name='status') else: update_params.append('--wait-for-state') update_params.append(SUCCEEDED_STATE) data_safe_util.run_test(runner, config_file, config_profile, update_params, decode_response=False) get_params = [ 'data-safe', 'private-endpoint', 'get', '--private-endpoint-id', pe_id ] expected_data = { "lifecycle-state": "ACTIVE", "display-name": PE_UPDATED_DISPLAY_NAME, "id": pe_id, "subnet-id": SUBNET_ID, "vcn-id": VCN_ID, "compartment-id": COMPARTMENT_ID, "description": PE_DESCRIPTION } data_safe_util.run_test(runner, config_file, config_profile, get_params, expected_data=expected_data)
def _create_pe(config_file, config_profile, runner, completion_state=SUCCEEDED_STATE, private_ip=PRIVATE_IP, display_name=PE_DISPLAY_NAME, is_async=True): # CREATE TEST # create_params = [ 'data-safe', 'private-endpoint', 'create', '--compartment-id', COMPARTMENT_ID, '--display-name', display_name, '--subnet-id', SUBNET_ID, '--vcn-id', VCN_ID, '--private-endpoint-ip', private_ip ] if is_async: # We just validate if work request id is returned and not the value as it is dynamic response = data_safe_util.run_test(runner, config_file, config_profile, create_params, expected_data=data_safe_util.WORKFLOW_REQUEST_EXPECTED_DATA, check_values=False) create_wr_id = response["opc-work-request-id"] util.wait_until(['data-safe', 'work-request', 'get', '--work-request-id', create_wr_id], completion_state, max_wait_seconds=600, state_property_name='status') # Test list-work-request-logs list_wr_log_params = [ 'data-safe', 'work-request-log-entry', 'list', '--work-request-id', create_wr_id, '--all' ] data_safe_util.run_test(runner, config_file, config_profile, list_wr_log_params, check_length=True) if completion_state == FAILED_STATE: # Test list-work-error list_wr_log_params = [ 'data-safe', 'work-request-error', 'list', '--work-request-id', create_wr_id, '--all' ] data_safe_util.run_test(runner, config_file, config_profile, list_wr_log_params, check_length=True) else: create_params.append('--wait-for-state') create_params.append(SUCCEEDED_STATE) data_safe_util.run_test(runner, config_file, config_profile, create_params, decode_response=False) pe_id = '' if completion_state == SUCCEEDED_STATE: list_params = [ 'data-safe', 'private-endpoint', 'list', '--compartment-id', COMPARTMENT_ID, '--display-name', display_name, '--all' ] response = data_safe_util.run_test(runner, config_file, config_profile, list_params, check_length=True) pe_id = response["data"][0]["id"] return pe_id
def delete_instance(self, instance_ocid): print("Deleting instance " + instance_ocid) util.wait_until( ['compute', 'instance', 'get', '--instance-id', instance_ocid], 'RUNNING', max_wait_seconds=600, succeed_if_not_found=True) result = util.invoke_command([ 'compute', 'instance', 'terminate', '--instance-id', instance_ocid, '--force' ]) util.validate_response(result)
def subtest_instance_operations(self): instance_name = util.random_name('cli_test_instance') fault_domain = 'FAULT-DOMAIN-1' image_id = util.oracle_linux_image() shape = 'VM.Standard1.1' result = self.invoke( ['compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', instance_name, '--fault-domain', fault_domain, '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape', shape, '--metadata', util.remove_outer_quotes(oci_cli_compute.compute_cli_extended.compute_instance_launch_metadata_example)]) self.instance_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until(['compute', 'instance', 'get', '--instance-id', self.instance_ocid], 'RUNNING', max_wait_seconds=600) result = self.invoke(['compute', 'instance', 'list', '--compartment-id', util.COMPARTMENT_ID]) util.validate_response(result) # list with compartment shortcut result = self.invoke(['compute', 'instance', 'list', '-c', util.COMPARTMENT_ID]) util.validate_response(result) instance_name = instance_name + "_updated" result = self.invoke(['compute', 'instance', 'update', '--instance-id', self.instance_ocid, '--display-name', instance_name]) util.validate_response(result, expect_etag=True) result = self.invoke(['compute', 'instance', 'get', '--instance-id', self.instance_ocid]) util.validate_response(result, expect_etag=True) result = self.invoke( ['compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', instance_name + "_2", '--fault-domain', fault_domain, '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape', shape, '--metadata', util.remove_outer_quotes(oci_cli_compute.compute_cli_extended.compute_instance_launch_metadata_example), '--wait-for-state', 'RUNNING', '--max-wait-seconds', '20', '--wait-interval-seconds', '5']) self.instance_ocid_2 = util.find_id_in_response(result.output[result.output.index('{'):]) assert result.exit_code != 0
def subtest_instance_action_operations(self): result = self.invoke([ 'compute', 'instance', 'action', '--instance-id', self.instance_ocid, '--action', 'RESET' ]) util.validate_response(result) util.vcr_mode_aware_sleep(10) util.wait_until([ 'compute', 'instance', 'get', '--instance-id', self.instance_ocid ], 'RUNNING', max_wait_seconds=300) util.vcr_mode_aware_sleep(5)
def mount_target(filestorage_client, vcn_and_subnet, runner, config_file, config_profile): with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'filestorage_mount_target_fixture.yml'): vcn_id = vcn_and_subnet[0] subnet_id = vcn_and_subnet[1] mount_target_name = util.random_name('cli_test_mt') params = [ 'mount-target', 'create', '--availability-domain', util.availability_domain(), '-c', util.COMPARTMENT_ID, '--subnet-id', subnet_id ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) mount_target = json.loads(result.output)['data'] mount_target_id = mount_target['id'] test_config_container.do_wait( filestorage_client, filestorage_client.get_mount_target(mount_target_id), 'lifecycle_state', 'ACTIVE') # exercise CLI get mount target params = ['mount-target', 'get', '--mount-target-id', mount_target_id] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) yield mount_target with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'filestorage_mount_target_fixture_cleanup.yml'): params = [ 'mount-target', 'delete', '--mount-target-id', mount_target_id, '--force' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) util.wait_until([ 'fs', 'mount-target', 'get', '--mount-target-id', mount_target_id ], 'DELETED', max_wait_seconds=300)
def load_balancer(runner, config_file, config_profile, vcn_and_subnets): with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'test_load_balancer_fixture_lb.yml'): subnet_ocid_1 = vcn_and_subnets[1] subnet_ocid_2 = vcn_and_subnets[2] params = [ 'load-balancer', 'create', '-c', util.COMPARTMENT_ID, '--display-name', util.random_name('cli_lb'), '--shape-name', '100Mbps', '--subnet-ids', '["{}","{}"]'.format(subnet_ocid_1, subnet_ocid_2) ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) # create lb returns work request response = json.loads(result.output) work_request_ocid = response['opc-work-request-id'] get_work_request_result = util.wait_until( [ 'lb', 'work-request', 'get', '--work-request-id', work_request_ocid ], 'SUCCEEDED', max_wait_seconds=LB_PROVISIONING_TIME_SEC) util.validate_response(get_work_request_result) lb_ocid = json.loads( get_work_request_result.output)['data']['load-balancer-id'] yield lb_ocid with test_config_container.create_vcr( cassette_library_dir=CASSETTE_LIBRARY_DIR).use_cassette( 'test_load_balancer_fixture_lb_delete.yml'): params = [ 'load-balancer', 'delete', '--load-balancer-id', lb_ocid, '--force' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) util.wait_until( ['lb', 'load-balancer', 'get', '--load-balancer-id', lb_ocid], 'TERMINATED', max_wait_seconds=LB_PROVISIONING_TIME_SEC, succeed_if_not_found=True)
def subtest_route_table_operations(self): rt_name = util.random_name('cli_test_route_table') rules = util.remove_outer_quotes(oci_cli_virtual_network.virtualnetwork_cli_extended.network_create_route_table_route_rules_example.format(ig_id=self.ig_ocid)) result = self.invoke( ['route-table', 'create', '--compartment-id', util.COMPARTMENT_ID, '--display-name', rt_name, '--vcn-id', self.vcn_ocid, '--route-rules', rules ]) self.rt_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until(['network', 'route-table', 'get', '--rt-id', self.rt_ocid], 'AVAILABLE', max_wait_seconds=300) result = self.invoke(['route-table', 'list', '--compartment-id', util.COMPARTMENT_ID, '--vcn-id', self.vcn_ocid]) util.validate_response(result) self.run_list_filter_verification('route-table', rt_name) result = self.invoke(['route-table', 'get', '--rt-id', self.rt_ocid]) util.validate_response(result, expect_etag=True) rt_name = rt_name + "_updated" rules_v2 = """[{{"cidrBlock":"0.0.0.0/1","networkEntityId":"{ig_id}"}}]""".format(ig_id=self.ig_ocid) # update display name only - does not show a prompt result = self.invoke(['route-table', 'update', '--rt-id', self.rt_ocid, '--display-name', rt_name]) util.validate_response(result, expect_etag=True) util.vcr_mode_aware_sleep(20) # update route-rules, confirm y result = self.invoke( ['route-table', 'update', '--rt-id', self.rt_ocid, '--route-rules', rules_v2], input='y') util.validate_response(result, json_response_expected=False) # update route-rules, confirm n result = self.invoke( ['route-table', 'update', '--rt-id', self.rt_ocid, '--route-rules', rules_v2], input='n') assert result.exit_code != 0 util.vcr_mode_aware_sleep(20) # update route-rules, force result = self.invoke( ['route-table', 'update', '--rt-id', self.rt_ocid, '--route-rules', rules, '--force']) util.validate_response(result, expect_etag=True)
def subtest_console_history_operations(self): result = self.invoke([ 'compute', 'console-history', 'capture', '--instance-id', self.instance_ocid, '--display-name', 'Original' ]) parsed_result = json.loads(result.output) self.assertEquals('Original', parsed_result['data']['display-name']) self.ch_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until([ 'compute', 'console-history', 'get', '--instance-console-history-id', self.ch_ocid ], 'SUCCEEDED', max_wait_seconds=300) result = self.invoke([ 'compute', 'console-history', 'update', '--instance-console-history-id', self.ch_ocid, '--display-name', 'Updated' ]) parsed_result = json.loads(result.output) self.assertEquals('Updated', parsed_result['data']['display-name']) result = self.invoke([ 'compute', 'console-history', 'list', '--compartment-id', util.COMPARTMENT_ID, '--instance-id', self.instance_ocid ]) util.validate_response(result) result = self.invoke([ 'compute', 'console-history', 'get', '--instance-console-history-id', self.ch_ocid ]) util.validate_response(result, expect_etag=True) if os.path.exists(CONSOLE_HISTORY_FILENAME): os.remove(CONSOLE_HISTORY_FILENAME) result = self.invoke([ 'compute', 'console-history', 'get-content', '--instance-console-history-id', self.ch_ocid, '--file', CONSOLE_HISTORY_FILENAME ]) util.validate_response(result) with open(CONSOLE_HISTORY_FILENAME, 'rb') as file: # Make sure that we got at least some minimum amount of content. assert (len(file.read()) > 500)
def volume_group_operations_internal(self, volume_group_name, command_params): params_to_use = list(command_params) result = self.invoke(params_to_use) util.validate_response(result) parsed_result = json.loads(result.output) volume_group_id = util.find_id_in_response(result.output) volume_ids = parsed_result['data']['volume-ids'] assert len(volume_ids) > 0 util.wait_until([ 'bv', 'volume-group', 'get', '--volume-group-id', volume_group_id ], 'AVAILABLE', max_wait_seconds=180) result = self.invoke( ['volume-group', 'get', '--volume-group-id', volume_group_id]) util.validate_response(result) parsed_result = json.loads(result.output) assert parsed_result['data']['size-in-mbs'] is not None assert parsed_result['data']['time-created'] is not None result = self.invoke( ['volume-group', 'list', '--compartment-id', util.COMPARTMENT_ID]) util.validate_response(result) volume_group_name = volume_group_name + "_UPDATED" # if we have more than a single volume in the group, remove one and update if len(volume_ids) > 1: volume_ids.remove(volume_ids[len(volume_ids) - 1]) result = self.invoke([ 'volume-group', 'update', '--volume-group-id', volume_group_id, '--display-name', volume_group_name, '--volume-ids', json.dumps(volume_ids), '--force' ]) util.validate_response(result) parsed_result = json.loads(result.output) assert len(parsed_result['data']['volume-ids']) == len(volume_ids) else: result = self.invoke([ 'volume-group', 'update', '--volume-group-id', volume_group_id, '--display-name', volume_group_name ]) util.validate_response(result) return volume_group_id, volume_ids
def subtest_subnet_operations(self): subnet_name = util.random_name('cli_test_subnet') cidr_block = "10.0.0.0/16" security_list_ids = util.remove_outer_quotes( oci_cli_virtual_network.virtualnetwork_cli_extended. network_create_subnet_security_list_ids_example.format( sl_id=self.sl_ocid)) subnet_dns_label = util.random_name('subnet', insert_underscore=False) result = self.invoke([ 'subnet', 'create', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', subnet_name, '--vcn-id', self.vcn_ocid, '--cidr-block', cidr_block, '--security-list-ids', security_list_ids, '--dns-label', subnet_dns_label ]) self.subnet_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until( ['network', 'subnet', 'get', '--subnet-id', self.subnet_ocid], 'AVAILABLE', max_wait_seconds=300) result = self.invoke([ 'subnet', 'list', '--compartment-id', util.COMPARTMENT_ID, '--vcn-id', self.vcn_ocid ]) util.validate_response(result) self.run_list_filter_verification('subnet', subnet_name) subnet_name = subnet_name + "_updated" result = self.invoke([ 'subnet', 'update', '--subnet-id', self.subnet_ocid, '--display-name', subnet_name ]) util.validate_response(result, expect_etag=True) result = self.invoke( ['subnet', 'get', '--subnet-id', self.subnet_ocid]) util.validate_response(result, expect_etag=True) subnet_response = json.loads(result.output) assert subnet_response['data']['dns-label'] == subnet_dns_label
def _test_all_datatypes(self, schema, graphson): ep = self.get_execution_profile(graphson) for data in six.itervalues(schema.fixtures.datatypes()): typ, value, deserializer = data vertex_label = VertexLabel([typ]) property_name = next(six.iterkeys(vertex_label.non_pk_properties)) schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) vertex = list( schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] def get_vertex_properties(): return list( schema.get_vertex_properties(self.session, vertex, execution_profile=ep)) prop_returned = 1 if DSE_VERSION < Version( '5.1') else 2 # include pkid >=5.1 wait_until(lambda: len(get_vertex_properties()) == prop_returned, 0.2, 15) vertex_properties = get_vertex_properties() if graphson == GraphProtocol.GRAPHSON_1_0: vertex_properties = [ vp.as_vertex_property() for vp in vertex_properties ] for vp in vertex_properties: if vp.label == 'pkid': continue self.assertIsInstance(vp, VertexProperty) self.assertEqual(vp.label, property_name) if graphson == GraphProtocol.GRAPHSON_1_0: deserialized_value = deserializer( vp.value) if deserializer else vp.value self.assertEqual(deserialized_value, value) else: self.assertEqual(vp.value, value)
def test_connection_honor_cluster_port(self): """ Test that the initial contact point and discovered nodes honor the cluster port on new connection. All hosts should be marked as up and we should be able to execute queries on it. """ cluster = Cluster() with self.assertRaises(NoHostAvailable): cluster.connect() # should fail on port 9042 cluster = Cluster(port=9046) session = cluster.connect(wait_for_all_pools=True) wait_until(lambda: len(cluster.metadata.all_hosts()) == 3, 1, 5) for host in cluster.metadata.all_hosts(): self.assertTrue(host.is_up) session.execute("select * from system.local", host=host)
def test_policy_crud(runner, config_file, config_profile): domain = util.random_name('cli-domain', insert_underscore=False) + '.oci.example.com' origin = util.random_name('cli-domain', insert_underscore=False) + '.origin.oci.example.com' policy_id = None try: result = invoke(runner, config_file, config_profile, [ 'waas', 'waas-policy', 'create', '--domain', domain, '--origins', '{"primary":{"uri":"%s","httpPort":80}}' % origin, '--compartment-id', util.COMPARTMENT_ID ]) util.validate_response(result) work_request_id = json.loads(result.output)['opc-work-request-id'] result = invoke(runner, config_file, config_profile, [ 'waas', 'work-request', 'get', '--work-request-id', work_request_id ]) util.validate_response(result) policy_id = json.loads(result.output)['data']['resources'][0]['identifier'] util.wait_until(['waas', 'waas-policy', 'get', '--waas-policy-id', policy_id], 'ACTIVE', max_wait_seconds=600) params = [ 'waas', 'waas-policy', 'list', '-c', util.COMPARTMENT_ID ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) assert len(json.loads(result.output)['data']) > 0 finally: if policy_id: params = [ 'waas', 'waas-policy', 'delete', '--waas-policy-id', policy_id, '--force' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result)
def subtest_windows_instance_operations(self): instance_name = util.random_name('cli_test_instance') image_id = util.windows_vm_image() shape = 'VM.Standard1.1' result = self.invoke([ 'compute', 'instance', 'launch', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', instance_name, '--subnet-id', self.subnet_ocid, '--image-id', image_id, '--shape', shape ]) self.windows_instance_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until([ 'compute', 'instance', 'get', '--instance-id', self.windows_instance_ocid ], 'RUNNING', max_wait_seconds=600) result = self.invoke([ 'compute', 'instance', 'get', '--instance-id', self.windows_instance_ocid ]) util.validate_response(result, expect_etag=True) result = self.invoke([ 'compute', 'instance', 'get-windows-initial-creds', '--instance-id', self.windows_instance_ocid ]) util.validate_response(result) credentials = json.loads(result.output)['data'] assert credentials['username'] == 'opc' assert 'password' in credentials result = self.invoke([ 'compute', 'instance', 'terminate', '--instance-id', self.windows_instance_ocid, '--force' ]) util.validate_response(result)
def _delete_pe(config_file, config_profile, pe_id, runner, is_async=True): # DELETE TEST # delete_params = [ 'data-safe', 'private-endpoint', 'delete', '--private-endpoint-id', pe_id, '--force' ] if is_async: # We just validate if work request id is returned and not the value as it is dynamic response = data_safe_util.run_test(runner, config_file, config_profile, delete_params, expected_data=data_safe_util.WORKFLOW_REQUEST_EXPECTED_DATA, check_values=False) delete_wr_id = response["opc-work-request-id"] util.wait_until(['data-safe', 'work-request', 'get', '--work-request-id', delete_wr_id], SUCCEEDED_STATE, max_wait_seconds=600, state_property_name='status') else: delete_params.append('--wait-for-state') delete_params.append(SUCCEEDED_STATE) data_safe_util.run_test(runner, config_file, config_profile, delete_params, decode_response=False)
def test_deployment_update(runner, config_file, config_profile): # TODO: remove this -- added this return on 8/16/2019 b/c tests were failing. return api_deployment_id = api_gateway_and_deployment[1] fn_func_id = api_gateway_and_deployment[2] get_params = [ 'api-gateway', 'deployment', 'get', '--deployment-id', api_deployment_id ] result = invoke(runner, config_file, config_profile, get_params) util.validate_response(result) api_deployment = json.loads(result.output)['data'] params = [ 'api-gateway', 'deployment', 'update', '--deployment-id', api_deployment_id, '--display-name', util.random_name('deployment', insert_underscore=False), '--specification', build_full_api_specification('https://cloud.oracle.com', fn_func_id, fn_func_id), '--force' ] update_result = invoke(runner, config_file, config_profile, params) util.validate_response(update_result) util.wait_until([ 'api-gateway', 'deployment', 'get', '--deployment-id', api_deployment_id ], 'ACTIVE', max_wait_seconds=300) result = invoke(runner, config_file, config_profile, get_params) util.validate_response(result) api_deployment_updated = json.loads(result.output)['data'] assert api_deployment['display-name'] != api_deployment_updated['display-name'], \ "Deployment's display name should have been updated" assert json.dumps(api_deployment['specification']) != json.dumps(api_deployment_updated['specification']), \ "Deployment's specification should have been updated"
def set_up_resources(self): # Create a VCN vcn_name = util.random_name('cli_test_compute_vcn') cidr_block = "10.0.0.0/16" result = self.invoke([ 'network', 'vcn', 'create', '--compartment-id', util.COMPARTMENT_ID, '--display-name', vcn_name, '--dns-label', 'clivcn', '--cidr-block', cidr_block ]) self.vcn_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until(['network', 'vcn', 'get', '--vcn-id', self.vcn_ocid], 'AVAILABLE', max_wait_seconds=300) # Create a subnet subnet_name = util.random_name('cli_test_compute_subnet') result = self.invoke([ 'network', 'subnet', 'create', '--compartment-id', util.COMPARTMENT_ID, '--availability-domain', util.availability_domain(), '--display-name', subnet_name, '--dns-label', 'clisubnet', '--vcn-id', self.vcn_ocid, '--cidr-block', cidr_block, ]) self.subnet_ocid = util.find_id_in_response(result.output) util.validate_response(result, expect_etag=True) util.wait_until( ['network', 'subnet', 'get', '--subnet-id', self.subnet_ocid], 'AVAILABLE', max_wait_seconds=300)
def exa_db_system(runner, config_file, config_profile, networking, network_client, request): DB_SYSTEM_SHAPE = "Exadata.Quarter1.84" DB_SYSTEM_CPU_CORE_COUNT = '22' if EXISTING_DB_SYSTEM_1: return [EXISTING_DB_SYSTEM_1] else: subnet_response = network_client.get_subnet( networking['subnet_ocid_1']) print("Using subnet's AD", subnet_response.data.availability_domain) # provision DB systems params = [ 'system', 'launch', '--admin-password', ADMIN_PASSWORD, '--availability-domain', subnet_response.data.availability_domain, '--compartment-id', util.COMPARTMENT_ID, '--cpu-core-count', DB_SYSTEM_CPU_CORE_COUNT, '--database-edition', DB_SYSTEM_DB_EXTREME_EDITION, '--db-name', 'clibmdb', '--db-version', DB_VERSION, '--display-name', 'CliDbSysDisplayNameExa', '--hostname', 'cli-bm-host', '--shape', DB_SYSTEM_SHAPE, '--ssh-authorized-keys-file', util.SSH_AUTHORIZED_KEYS_FILE, '--subnet-id', networking['subnet_ocid_1'], '--backup-subnet-id', networking['subnet_ocid_2'], '--license-model', 'LICENSE_INCLUDED', '--node-count', '1', '--initial-data-storage-size-in-gb', '256' ] result = invoke(runner, config_file, config_profile, params) util.validate_response(result) print(str(result.output)) json_result = json.loads(result.output) db_system_id_1 = json_result['data']['id'] print("Wating for DB System to complete provisioning...") # create db system and wait to finish util.wait_until( ['db', 'system', 'get', '--db-system-id', db_system_id_1], 'AVAILABLE', max_wait_seconds=DB_SYSTEM_PROVISIONING_TIME_SEC) print("exa_db_system: DB System provisioned successfully!") return db_system_id_1
def test_removed_node_stops_reconnecting(self): """ Ensure we stop reconnecting after a node is removed. PYTHON-1181 """ use_cluster("test_down_then_removed", [3], start=True) state_listener = StateListener() cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.addCleanup(cluster.shutdown) cluster.register_listener(state_listener) session = cluster.connect(wait_for_all_pools=True) get_node(3).nodetool("disablebinary") wait_until(condition=lambda: state_listener.downed_host is not None, delay=2, max_attempts=50) self.assertTrue(state_listener.downed_host.is_currently_reconnecting()) decommission(3) wait_until(condition=lambda: state_listener.removed_host is not None, delay=2, max_attempts=50) self.assertIs(state_listener.downed_host, state_listener.removed_host) # Just a sanity check self.assertFalse(state_listener.removed_host.is_currently_reconnecting())
def subtest_image_import_export_via_uri(self, config): self.export_via_uri_object_name = 'export-via-uri' object_uri = '{}/n/{}/b/{}/o/{}'.format( oci.regions.endpoint_for('object_storage', config['region']), self.object_storage_namespace, self.bucket_name, self.export_via_uri_object_name) result = self.invoke( ['compute', 'image', 'export', 'to-object-uri', '--image-id', self.custom_image_id, '--uri', object_uri]) image_details = json.loads(result.output) assert self.custom_image_id == image_details['data']['id'] assert image_details['data']['lifecycle-state'] == 'EXPORTING' util.wait_until(['compute', 'image', 'get', '--image-id', self.custom_image_id], 'AVAILABLE', max_wait_seconds=3600) result = self.invoke( ['compute', 'image', 'import', 'from-object-uri', '--compartment-id', util.COMPARTMENT_ID, '--display-name', 'Imported from object storage uri', '--uri', object_uri]) image_details = json.loads(result.output) self.imported_image_from_uri_id = image_details['data']['id'] assert self.imported_image_from_uri_id is not None assert image_details['data']['lifecycle-state'] == 'IMPORTING' assert image_details['data']['display-name'] == 'Imported from object storage uri' # Create a pre-authenticated request against the object so that we can import it result = self.invoke( ['os', 'preauth-request', 'create', '--namespace', self.object_storage_namespace, '--bucket-name', self.bucket_name, '--object-name', self.export_via_uri_object_name, '--name', 'preauth-object', '--access-type', 'ObjectRead', '--time-expires', '9999-12-31T00:00:00.000+00:00']) par_details = json.loads(result.output) self.object_par_id = par_details['data']['id'] # The access-uri in the response looks like: # /p/...../n/mytenancy/b/A_CliImportExportBucket/o/test_export_1 # # So we need to prepend the domain only (as the URI references the object alreadt) object_par_access_uri = '{}{}'.format(oci.regions.endpoint_for('object_storage', config['region']), par_details['data']['access-uri']) result = self.invoke( ['compute', 'image', 'import', 'from-object-uri', '--compartment-id', util.COMPARTMENT_ID, '--display-name', 'Imported from object storage par', '--uri', object_par_access_uri]) image_details = json.loads(result.output) self.imported_image_from_par_id = image_details['data']['id'] assert self.imported_image_from_par_id is not None assert image_details['data']['lifecycle-state'] == 'IMPORTING' assert image_details['data']['display-name'] == 'Imported from object storage par' util.wait_until(['compute', 'image', 'get', '--image-id', self.imported_image_from_uri_id], 'AVAILABLE', max_wait_seconds=3600) util.wait_until(['compute', 'image', 'get', '--image-id', self.imported_image_from_par_id], 'AVAILABLE', max_wait_seconds=3600)