def xpath(self, download_id=None): return self._project.add_project( "D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job" + ("[download-id={}]".format(quoted_key(download_id) ) if download_id else ""))
def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account, use_accounts): def verify_input_parameters(running_config, config_param): """ Verify the configured parameter set against the running configuration """ for run_input_param in running_config.input_parameter: if (run_input_param.xpath == config_param.xpath and run_input_param.value == config_param.value): return True assert False, ( "Verification of configured input parameters: { xpath:%s, value:%s} " "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath, config_param.value, running_config.input_parameter)) catalog = nsd_proxy.get_config( '/rw-project:project[rw-project:name="default"]/nsd-catalog') nsd = catalog.nsd[0] input_parameters = [] descr_xpath = "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id=%s]/project-nsd:vendor" % quoted_key( nsd.id) descr_value = "automation" in_param_id = str(uuid.uuid4()) input_param_1 = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter( xpath=descr_xpath, value=descr_value) input_parameters.append(input_param_1) nsr_id = str(uuid.uuid4()) if use_accounts: nsr = rift.auto.descriptor.create_nsr( cloud_account.name, nsr_id, nsd, input_param_list=input_parameters, account=cloud_account.name, nsr_id=nsr_id) else: nsr = rift.auto.descriptor.create_nsr( cloud_account.name, nsr_id, nsd, input_param_list=input_parameters, nsr_id=nsr_id) logger.info("Instantiating the Network Service") rwnsr_proxy.create_config( '/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr) nsr_opdata = rwnsr_proxy.get( '/rw-project:project[rw-project:name="default"]/ns-instance-opdata/nsr[ns-instance-config-ref={}]' .format(quoted_key(nsr.id))) assert nsr_opdata is not None # Verify the input parameter configuration running_config = rwnsr_proxy.get_config( "/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id=%s]" % quoted_key(nsr.id)) for input_param in input_parameters: verify_input_parameters(running_config, input_param)
import argparse import asyncio import gi import logging import os import sys import unittest import xmlrunner from rift.mano.utils import project gi.require_version('RwKeyspec', '1.0') from gi.repository.RwKeyspec import quoted_key NAME = 'test' XPATH = "/rw-project:project[rw-project:name={}]".format(quoted_key(NAME)) class TestCase(unittest.TestCase): log = None @classmethod def set_logger(cls, log): cls.log = log def setUp(self): if not TestCase.log: log = logging.getLogger() log.setLevel(logging.ERROR) def test_create_from_xpath(self):
def xpath(self, transaction_id=None): return self.project.add_project( "D,/rw-pkg-mgmt:update-jobs/rw-pkg-mgmt:job" + ("[transaction-id={}]".format(quoted_key(transaction_id) ) if transaction_id else ""))
def get_xpath(sdn_name=None): return self._project.add_project("D,/rw-sdn:sdn/rw-sdn:account{}/rw-sdn:connection-status". format( "[rw-sdn:name=%s]" % quoted_key(sdn_name) if sdn_name is not None else ''))
def get_xpath(ro_account_name=None): return "D,/rw-ro-account:ro-account-state/account{}/instance-ref-count".format( "[name=%s]" % quoted_key(ro_account_name) if ro_account_name is not None else '')
def xpath(self): """ path for this VLR """ return self._project.add_project("D,/vlr:vlr-catalog" "/vlr:vlr[vlr:id={}]".format( quoted_key(self.vlr_id)))
def test_instantiate_ns_mem_check(self, logger, rwvnfr_proxy, nsd_proxy, rwnsr_proxy, rwvlr_proxy, cloud_account_name, descriptors): """It runs over a loop. In each loop, it instantiates a NS, terminates the NS, checks memory usage of the system. During memory check, it verifies whether current system mem usage exceeds base memory-usage by a defined threshold. """ catalog = nsd_proxy.get_config( '/rw-project:project[rw-project:name="default"]/nsd-catalog') # Random NSD sequence generation for NS instantiation iteration, no_of_hours = map( float, pytest.config.getoption('--multiple-ns-instantiate').split(',')) nsd_count = len([pkg for pkg in descriptors if 'nsd.' in pkg]) nsd_instantiate_seq = np.random.choice(list(range(nsd_count)), int(iteration)) random.shuffle(nsd_instantiate_seq) logger.debug('nsd instantiaion sequence: {}'.format( [catalog.nsd[seq].name for seq in nsd_instantiate_seq])) # Collect mem-usage of the system base_system_rss = get_mem_usage() print_mem_usage() start_time = time.time() total_duration_in_secs = no_of_hours * 60 * 60 # Loop through NSD instantiation sequence and instantiate the NS for idx, seq in enumerate(nsd_instantiate_seq, 1): # Instantiating NS nsd = catalog.nsd[seq] logger.debug('Iteration {}: Instantiating NS {}'.format( idx, nsd.name)) nsr = rift.auto.descriptor.create_nsr(cloud_account_name, nsd.name, nsd) rwnsr_proxy.create_config( '/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr) # Verify if NS reaches active state nsr_opdata = rwnsr_proxy.get( '/rw-project:project[rw-project:name="default"]/ns-instance-opdata/nsr[ns-instance-config-ref={}]' .format(quoted_key(nsr.id))) assert nsr_opdata is not None # Verify NSR instances enter 'running' operational-status for nsr in rwnsr_proxy.get( '/rw-project:project[rw-project:name="default"]/ns-instance-opdata' ).nsr: xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format( quoted_key(nsr.ns_instance_config_ref)) rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=400) # Verify NSR instances enter 'configured' config-status for nsr in rwnsr_proxy.get( '/rw-project:project[rw-project:name="default"]/ns-instance-opdata' ).nsr: xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format( quoted_key(nsr.ns_instance_config_ref)) rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=400) time.sleep(30) # Let it run for few secs before terminating it # Terminates the NSR rift.auto.descriptor.terminate_nsr(rwvnfr_proxy, rwnsr_proxy, rwvlr_proxy, logger) time.sleep( 30 ) # After NS termination, wait for few secs before collecting mem-usage # Get the mem-usage and compare it with base mem-usage print_mem_usage() curr_system_rss = get_mem_usage() threshold = 5 mem_usage_inc = 100 * (curr_system_rss - base_system_rss) / base_system_rss if mem_usage_inc > threshold: assert False, 'There is an increase of {}%% during sequence {}. Base system-rss- {}; Current system-rss- {}'.format( mem_usage_inc, idx, base_system_rss, curr_system_rss) if (time.time() - start_time) > total_duration_in_secs: logger.debug( 'NS instantiation has been happening for last {} hours (provided limit). Exiting.' .format(no_of_hours)) break
def get_vnfd(vnfd_id): xpath = ("/rw-project:project[rw-project:name='default']/" + "vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_id))) return proxy(RwProjectVnfdYang).get(xpath)
def test_onboarded_vnfds_project_independent( self, descriptors, logger, rbac_platform_proxy, rw_conman_proxy, rw_user_proxy, rw_project_proxy, rbac_user_passwd, user_domain, fmt_vnfd_catalog_xpath, session_class, confd_host, fmt_vnfd_id_xpath, rw_rbac_int_proxy): """Same VNFDs on boarded in two different projects. VNFD changes in one project shouldn't affect another.""" map_project_user_roles = { 'user1': ('project_test_onboarded_vnfds_project_independent_1', 'rw-project-mano:catalog-admin'), 'user2': ('project_test_onboarded_vnfds_project_independent_2', 'rw-project:project-admin'), } user_to_modify_vnfds, user_not_supposed_to_see_vnfd_changes = 'user1', 'user2' modified_vnfd_name = 'test_rbac_vnfd' user_sessions = {} logger.debug('descriptors being used: {}'.format(descriptors)) for user, project_role_tuple in map_project_user_roles.items(): project_name, role = project_role_tuple logger.debug('Creating user {} with {}'.format( user, project_role_tuple)) rift.auto.mano.create_project(rw_conman_proxy, project_name) rift.auto.mano.create_user(rw_user_proxy, user, rbac_user_passwd, user_domain) if 'platform' in role: rift.auto.mano.assign_platform_role_to_user( rbac_platform_proxy, role, user, user_domain, rw_rbac_int_proxy) else: rift.auto.mano.assign_project_role_to_user( rw_project_proxy, role, user, project_name, user_domain, rw_rbac_int_proxy) logger.debug('User {} onboarding the packages'.format(user)) user_session = rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd) user_sessions[user] = user_session for descriptor in descriptors: rift.auto.descriptor.onboard(user_session, descriptor, project=project_name) vnfd_pxy = user_sessions[user_to_modify_vnfds].proxy(RwProjectVnfdYang) vnfd_xpath = '{}/vnfd'.format( fmt_vnfd_catalog_xpath.format(project=quoted_key( map_project_user_roles[user_to_modify_vnfds][0]))) for vnfd in vnfd_pxy.get(vnfd_xpath, list_obj=True).vnfd: logger.debug( 'Changing the vnfd name from {} to {} for user {}'.format( vnfd.name, modified_vnfd_name, user_to_modify_vnfds)) vnfd.name = modified_vnfd_name vnfd_pxy.replace_config( fmt_vnfd_id_xpath.format(project=quoted_key( map_project_user_roles[user_to_modify_vnfds][0]), vnfd_id=quoted_key(vnfd.id)), vnfd) for vnfd in vnfd_pxy.get(vnfd_xpath, list_obj=True).vnfd: assert vnfd.name == modified_vnfd_name vnfd_pxy = user_sessions[user_not_supposed_to_see_vnfd_changes].proxy( RwProjectVnfdYang) vnfd_xpath = '{}/vnfd'.format( fmt_vnfd_catalog_xpath.format(project=quoted_key( map_project_user_roles[user_not_supposed_to_see_vnfd_changes] [0]))) for vnfd in vnfd_pxy.get(vnfd_xpath, list_obj=True).vnfd: logger.debug( 'Verifying the vnfd name {} for user {} did not change to {}'. format(vnfd.name, user_not_supposed_to_see_vnfd_changes, modified_vnfd_name)) assert vnfd.name != modified_vnfd_name
def project_access(user_session): user_conman_pxy = user_session.proxy(RwProjectYang) assert user_conman_pxy.get_config( '/project[name={}]/project-state'.format( quoted_key(test_proj)), list_obj=True)
def test_delete_project_and_vim_accounts( self, rw_conman_proxy, rw_user_proxy, logger, rbac_user_passwd, user_domain, rw_project_proxy, rw_rbac_int_proxy, mgmt_session, cloud_module, cloud_account, descriptors, fmt_nsd_catalog_xpath, session_class, confd_host): """Testing vim accounts.""" # Create a project and three cloud accounts for it. rift.auto.mano.create_project(rw_conman_proxy, 'vim_project') rift.auto.mano.assign_project_role_to_user(rw_project_proxy, 'rw-project:project-admin', 'admin', 'vim_project', 'system', rw_rbac_int_proxy) for idx in range(1, 4): rift.auto.mano.create_cloud_account(mgmt_session, cloud_account, 'vim_project', 'cloud_account_{}'.format(idx)) # Uploading descriptors for descriptor in descriptors: rift.auto.descriptor.onboard(mgmt_session, descriptor, project='vim_project') nsd_pxy = mgmt_session.proxy(RwProjectNsdYang) nsd_catalog = nsd_pxy.get_config( fmt_nsd_catalog_xpath.format(project=quoted_key('vim_project'))) assert nsd_catalog nsd = nsd_catalog.nsd[0] nsr = rift.auto.descriptor.create_nsr('cloud_account_1', nsd.name, nsd) # Instantiating the nsr rwnsr_pxy = mgmt_session.proxy(RwNsrYang) rift.auto.descriptor.instantiate_nsr(nsr, rwnsr_pxy, logger, project='vim_project') # Trying to delete the project before taking the instance down with pytest.raises(Exception, message="Project deletion should've failed"): rift.auto.mano.delete_project(rw_conman_proxy, 'vim_project') # Trying to delete the vim account before taking the instance down with pytest.raises(Exception, message="Vim account deletion should've failed"): rift.auto.mano.delete_cloud_account(mgmt_session, 'cloud_account_1', 'vim_project') # Terminating the nsr rwvnfr_pxy = mgmt_session.proxy(RwVnfrYang) rwvlr_pxy = mgmt_session.proxy(RwVlrYang) rift.auto.descriptor.terminate_nsr(rwvnfr_pxy, rwnsr_pxy, rwvlr_pxy, logger, project='vim_project') # Delete all cloud accounts for the project for idx in range(1, 4): rift.auto.mano.delete_cloud_account(mgmt_session, 'cloud_account_{}'.format(idx), 'vim_project') # Delete the uploaded descriptors vnfd_proxy = mgmt_session.proxy(RwProjectVnfdYang) self.delete_records(nsd_pxy, vnfd_proxy, 'vim_project') # Delete the project rift.auto.mano.delete_project(rw_conman_proxy, 'vim_project') # Check in rw-rbac-internal if project is removed rwinternal_xpath = '/rw-rbac-internal/role' response = (rw_rbac_int_proxy.get(rwinternal_xpath, list_obj=True)).as_dict()['role'] keys = [role['keys'] for role in response if 'keys' in role] for key in keys: assert 'vim_project' not in key, "Improper project deletion"
def test_descriptor_nsr_persistence_check( self, rw_conman_proxy, rw_user_proxy, rw_project_proxy, cloud_account, cloud_module, mgmt_session, descriptors, logger, user_domain, session_class, confd_host, rbac_user_passwd, fmt_nsd_catalog_xpath, rw_rbac_int_proxy): """Creates a project and cloud account for it. Uploads descriptors. Logs in as project-admin and checks if the uploaded descriptors are still there, after which he logs out. Then instantiates nsr. Again logs in as project admin and checks if the instantiated nsr is still there.""" # Creating a project, assigning project admin and creating # a cloud account for the project for idx in range(1, 6): rift.auto.mano.create_project(rw_conman_proxy, 'xcloud_project_{}'.format(idx)) rift.auto.mano.create_user(rw_user_proxy, 'project_admin_{}'.format(idx), rbac_user_passwd, user_domain) rift.auto.mano.assign_project_role_to_user( rw_project_proxy, 'rw-project:project-admin', 'project_admin_{}'.format(idx), 'xcloud_project_{}'.format(idx), user_domain, rw_rbac_int_proxy) rift.auto.mano.create_cloud_account( mgmt_session, cloud_account, 'xcloud_project_{}'.format(idx)) #Uploading descriptors and verifying its existence from another user(project admin) for descriptor in descriptors: rift.auto.descriptor.onboard( mgmt_session, descriptor, project='xcloud_project_{}'.format(idx)) user_session = rift.auto.mano.get_session( session_class, confd_host, 'project_admin_{}'.format(idx), rbac_user_passwd) project_admin_nsd_pxy = user_session.proxy(RwProjectNsdYang) nsd_catalog = project_admin_nsd_pxy.get_config( fmt_nsd_catalog_xpath.format( project=quoted_key('xcloud_project_{}'.format(idx)))) assert nsd_catalog, "Descriptor Not found on try no: {}".format( idx) nsd = nsd_catalog.nsd[0] nsr = rift.auto.descriptor.create_nsr(cloud_account.name, nsd.name, nsd) rift.auto.mano.close_session(user_session) #Instantiating the nsr and verifying its existence from another user(project admin), after which it gets terminated admin_rwnsr_pxy = mgmt_session.proxy(RwNsrYang) admin_rwvnfr_pxy = mgmt_session.proxy(RwVnfrYang) admin_rwvlr_pxy = mgmt_session.proxy(RwVlrYang) rift.auto.descriptor.instantiate_nsr( nsr, admin_rwnsr_pxy, logger, project='xcloud_project_{}'.format(idx)) user_session = rift.auto.mano.get_session( session_class, confd_host, 'project_admin_{}'.format(idx), rbac_user_passwd) pxy = user_session.proxy(RwNsrYang) nsr_opdata = pxy.get( '/rw-project:project[rw-project:name={}]/ns-instance-opdata'. format(quoted_key('xcloud_project_{}'.format(idx)))) nsrs = nsr_opdata.nsr for nsr in nsrs: xpath = "/rw-project:project[rw-project:name={}]/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format( quoted_key('xcloud_project_{}'.format(idx)), quoted_key(nsr.ns_instance_config_ref)) pxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=60) rift.auto.mano.close_session(user_session) rift.auto.descriptor.terminate_nsr( admin_rwvnfr_pxy, admin_rwnsr_pxy, admin_rwvlr_pxy, logger, project='xcloud_project_{}'.format(idx))
def test_multi_projects_multi_vnf(self, rw_project_proxy, rw_conman_proxy, cloud_account, cloud_module, descriptors, session_class, confd_host, user_domain, mgmt_session, fmt_nsd_catalog_xpath, logger, rw_rbac_int_proxy): """Creates multiple projects, cloud accounts and then instantiates them. Then it lets the instantiated NS's run for a minute after which gets terminated. Use the SCALE_FACTOR to adjust the number of instantiations.""" def instantiate_nsr_not_wait(nsr, rwnsr_proxy, project='default'): ns_instance_opdata_xpath = '/project[name={}]/ns-instance-opdata'.format( quoted_key(project)) rwnsr_proxy.create_config( '/rw-project:project[rw-project:name={}]/nsr:ns-instance-config/nsr:nsr' .format(quoted_key(project)), nsr) nsr_opdata = rwnsr_proxy.get( '{}/nsr[ns-instance-config-ref={}]'.format( ns_instance_opdata_xpath, quoted_key(nsr.id))) assert nsr_opdata is not None nsr_opdata = rwnsr_proxy.get(ns_instance_opdata_xpath) nsr_ = [ nsr_ for nsr_ in nsr_opdata.nsr if nsr_.ns_instance_config_ref == nsr.id ][0] #Creating multiple projects according to the scale factor SCALE_FACTOR = 5 PROJECT_LIST = {} for idx in range(1, SCALE_FACTOR + 1): rift.auto.mano.create_project(rw_conman_proxy, 'cloud_project_{}'.format(idx)) PROJECT_LIST['cloud_project_{}'.format(idx)] = None rift.auto.mano.assign_project_role_to_user( rw_project_proxy, 'rw-project:project-admin', 'admin', 'cloud_project_{}'.format(idx), 'system', rw_rbac_int_proxy) #Creating cloud accounts, uploading descriptors, instantiating NS for project_name in PROJECT_LIST: rift.auto.mano.create_cloud_account(mgmt_session, cloud_account, project_name) for descriptor in descriptors: rift.auto.descriptor.onboard(mgmt_session, descriptor, project=project_name) admin_nsd_pxy = mgmt_session.proxy(RwProjectNsdYang) nsd_catalog = admin_nsd_pxy.get_config( fmt_nsd_catalog_xpath.format(project=quoted_key(project_name))) assert nsd_catalog nsd = nsd_catalog.nsd[0] nsr = rift.auto.descriptor.create_nsr(cloud_account.name, nsd.name, nsd) PROJECT_LIST[project_name] = nsr for project_name, NSR in PROJECT_LIST.items(): admin_rwnsr_pxy = mgmt_session.proxy(RwNsrYang) admin_rwvnfr_pxy = mgmt_session.proxy(RwVnfrYang) admin_rwvlr_pxy = mgmt_session.proxy(RwVlrYang) instantiate_nsr_not_wait(NSR, admin_rwnsr_pxy, project=project_name) # Waiting for NS's to get started and configured. for project_name in PROJECT_LIST: admin_rwnsr_pxy = mgmt_session.proxy(RwNsrYang) nsr_opdata = admin_rwnsr_pxy.get( '/rw-project:project[rw-project:name={}]/ns-instance-opdata'. format(quoted_key(project_name))) nsrs = nsr_opdata.nsr for nsr in nsrs: xpath = "/rw-project:project[rw-project:name={}]/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format( quoted_key(project_name), quoted_key(nsr.ns_instance_config_ref)) admin_rwnsr_pxy.wait_for(xpath, "running", fail_on=['failed'], timeout=400) for nsr in nsrs: xpath = "/rw-project:project[rw-project:name={}]/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format( quoted_key(project_name), quoted_key(nsr.ns_instance_config_ref)) admin_rwnsr_pxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=400) # Letting the started NS's run for a minute after which is terminated start_time = time.time() while (time.time() - start_time) < 60: time.sleep(2) for project_name in PROJECT_LIST: rift.auto.descriptor.terminate_nsr(admin_rwvnfr_pxy, admin_rwnsr_pxy, admin_rwvlr_pxy, logger, project=project_name)
def test_ha_multiple_failovers(self, revertive_pref_host, active_confd_host, standby_confd_host, standby_lp_node_obj, active_lp_node_obj, logger, fmt_cloud_xpath, cloud_account, test_project, active_site_name, standby_site_name, standby_mgmt_session, active_mgmt_session, descriptors): count, failover_count = 1, 10 current_actv_mgmt_session, current_stdby_mgmt_session = active_mgmt_session, standby_mgmt_session current_actv_lp_node_obj = active_lp_node_obj descriptor_list = descriptors['haproxy'][::-1] + descriptors['vdud_cfgfile'][::-1] original_active_as_standby_kwargs = {'revertive_pref_host': revertive_pref_host, 'new_active_ip': standby_confd_host, 'new_active_site': standby_site_name, 'new_standby_ip': active_confd_host, 'new_standby_site': active_site_name} original_active_as_active_kwargs = {'revertive_pref_host': revertive_pref_host, 'new_active_ip':active_confd_host, 'new_active_site': active_site_name, 'new_standby_ip': standby_confd_host, 'new_standby_site': standby_site_name} while count <= failover_count: kwargs = original_active_as_active_kwargs if count%2 == 1: kwargs = original_active_as_standby_kwargs # upload descriptor if count not in [5,6,7,8]: descriptor = descriptor_list.pop() rift.auto.descriptor.onboard(current_actv_mgmt_session, descriptor, project=test_project) # Collect config, op-data from current active before doing a failover current_actv_lp_node_obj.session = None current_actv_lp_node_obj.collect_data() time.sleep(5) logger.debug('Failover Iteration - {}. Current standby {} will be the new active'.format(count, current_stdby_mgmt_session.host)) mano.indirect_failover(**kwargs) last_actv_lp_node_obj = current_actv_lp_node_obj current_actv_mgmt_session, current_stdby_mgmt_session = active_mgmt_session, standby_mgmt_session current_actv_lp_node_obj = active_lp_node_obj if count%2 == 1: current_actv_lp_node_obj = standby_lp_node_obj current_actv_mgmt_session, current_stdby_mgmt_session = standby_mgmt_session, active_mgmt_session logger.debug('Waiting for the new active {} to come up'.format(current_actv_mgmt_session.host)) mano.wait_for_standby_to_become_active(current_actv_mgmt_session) # Wait for NSR to become active rw_new_active_cloud_pxy = current_actv_mgmt_session.proxy(RwCloudYang) rwnsr_proxy = current_actv_mgmt_session.proxy(RwNsrYang) rw_new_active_cloud_pxy.wait_for( fmt_cloud_xpath.format(project=quoted_key(test_project), account_name=quoted_key( cloud_account.name)) + '/connection-status/status', 'success', timeout=60, fail_on=['failure']) nsr_opdata = rwnsr_proxy.get( '/rw-project:project[rw-project:name={project}]/ns-instance-opdata'.format( project=quoted_key(test_project))) assert nsr_opdata nsrs = nsr_opdata.nsr for nsr in nsrs: xpath = "/rw-project:project[rw-project:name={project}]/ns-instance-opdata/nsr[ns-instance-config-ref={config_ref}]/config-status".format( project=quoted_key(test_project), config_ref=quoted_key(nsr.ns_instance_config_ref)) rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=400) # Collect config, op-data from new active current_actv_lp_node_obj.session = None current_actv_lp_node_obj.collect_data() # Compare data between last active and current active current_actv_lp_node_obj.compare(last_actv_lp_node_obj) count += 1
def test_rbac_mano_xpaths_access( self, mano_xpaths, logger, mano_roles_xpaths_mapping, xpath_module_mapping, session_class, project_keyed_xpath, user_domain, rbac_platform_proxy, rw_project_proxy, rbac_user_passwd, confd_host, rw_user_proxy, rw_rbac_int_proxy): """Verify Mano roles/Permission mapping works (Verifies only read access for all Xpaths).""" project_name = 'default' # Skipping download-jobs as it is not yet implemented from MANO side. # Others are skipped becuase they need Juju, Openmano configurations etc. skip_xpaths = ('/download-jobs', '/config-agent', '/resource-orchestrator', '/datacenters', '/upload-jobs') for index, (role, xpath_keys_tuple) in enumerate( mano_roles_xpaths_mapping.items()): # Create an user and assign a role user_name = 'user-{}'.format(index) rift.auto.mano.create_user(rw_user_proxy, user_name, rbac_user_passwd, user_domain) logger.debug('Creating an user {} with role {}'.format( user_name, role)) if 'platform' in role: rift.auto.mano.assign_platform_role_to_user( rbac_platform_proxy, role, user_name, user_domain, rw_rbac_int_proxy) else: rift.auto.mano.assign_project_role_to_user( rw_project_proxy, role, user_name, project_name, user_domain, rw_rbac_int_proxy) # Get user session user_session = rift.auto.mano.get_session(session_class, confd_host, user_name, rbac_user_passwd) # go through each of its xpaths keys and try to access for xpath_key in xpath_keys_tuple: for xpath in mano_xpaths[xpath_key]: if xpath in skip_xpaths: continue logger.debug( 'User {} with role {} trying to access xpath {}'. format(user_name, role, xpath)) yang_module, get_type = [ yang_module for xpath_tuple, yang_module in xpath_module_mapping.items() if xpath in xpath_tuple ][0] user_pxy = user_session.proxy(yang_module) get_data_func = getattr(user_pxy, get_type) assert get_data_func( project_keyed_xpath.format( project_name=quoted_key(project_name)) + xpath) # go through remaining xpaths keys which this user-role not part of and try to access; it should fail access_denied_xpath_keys_tuple = set( mano_xpaths.keys()).difference(xpath_keys_tuple) for xpath_key in access_denied_xpath_keys_tuple: for xpath in mano_xpaths[xpath_key]: if xpath in skip_xpaths: continue logger.debug( 'User {} with role {} trying to access xpath {}. It should get None' .format(user_name, role, xpath)) yang_module, get_type = [ yang_module for xpath_tuple, yang_module in xpath_module_mapping.items() if xpath in xpath_tuple ][0] user_pxy = user_session.proxy(yang_module) get_data_func = getattr(user_pxy, get_type) assert get_data_func( project_keyed_xpath.format( project_name=quoted_key(project_name)) + xpath) is None
def get_xpath(ro_account_name): return "D,/rw-ro-account:ro-account-state/account{}/connection-status".format( "[name=%s]" % quoted_key(ro_account_name))
def test_upload_delete_descriptors(self, logger, mgmt_session, vnfd_proxy, descriptors, vnf_onboard_delete): """Randomly upload and delete VNFs. With each upload/delete, verify if the VNF gets uploaded/deleted successfully. """ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]" iteration, vnf_count = map(int, vnf_onboard_delete.split(',')) # Get the VNF paths to be used for onboarding all_vnfs = [ pkg_path for pkg_path in descriptors if '_nsd' not in os.path.basename(pkg_path) ] if vnf_count > len(all_vnfs): vnf_count = len(all_vnfs) available_vnfs = random.sample(all_vnfs, vnf_count) # Get the add, delete iterations add_del_seq = list(np.random.choice(['add', 'del'], iteration)) random.shuffle(add_del_seq) logger.info( 'Vnf add-delete iteration sequence: {}'.format(add_del_seq)) uploaded_vnfs = {} def get_vnfd_list(): """Returns list of VNFDs""" vnfd_obj = vnfd_proxy.get( "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True) return vnfd_obj.vnfd if vnfd_obj else [] def delete_vnfd(): """Deletes a VNFD""" vnf_path, vnfd_id = random.choice(list(uploaded_vnfs.items())) logger.info('Deleting VNF {} having id {}'.format( os.path.basename(vnf_path), vnfd_id)) vnfd_proxy.delete_config(xpath.format(quoted_key(vnfd_id))) uploaded_vnfs.pop(vnf_path) available_vnfs.append(vnf_path) assert not [vnfd for vnfd in get_vnfd_list() if vnfd.id == vnfd_id] for op_type in add_del_seq: if op_type == 'del': if uploaded_vnfs: delete_vnfd() continue op_type = 'add' if op_type == 'add': if not available_vnfs: delete_vnfd() continue vnf_path = random.choice(available_vnfs) logger.info('Adding VNF {}'.format(os.path.basename(vnf_path))) rift.auto.descriptor.onboard(mgmt_session, vnf_path) vnfs = get_vnfd_list() assert len(vnfs) == len(uploaded_vnfs) + 1 vnfd = [ vnfd for vnfd in vnfs if vnfd.id not in list(uploaded_vnfs.values()) ] assert len(vnfd) == 1 vnfd = vnfd[0] assert vnfd.name assert vnfd.connection_point assert vnfd.vdu uploaded_vnfs[vnf_path] = vnfd.id available_vnfs.remove(vnf_path) assert len(get_vnfd_list()) == len(uploaded_vnfs) logger.info('Onboarded VNFs : {}'.format(uploaded_vnfs)) assert len(available_vnfs) + len(uploaded_vnfs) == vnf_count # cleanup - Delete VNFs(if any) for vnfd_id in uploaded_vnfs.values(): vnfd_proxy.delete_config(xpath.format(quoted_key(vnfd_id)))
def get_xpath(ro_account_name=None): return "D,/rw-ro-account:ro-account-state/account{}/config-data".format( "[name=%s]" % quoted_key(ro_account_name) if ro_account_name is not None else '')
def test_update_vnfd(self, vnfd_proxy, iteration, port_sequencing_intf_positions): """Updates few fields of ping pong VNFDs and verify those changes """ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]" vnfd_catalog = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd" if iteration == 0 and pytest.config.getoption("--port-sequencing"): pytest.skip() def get_vnfd(): vnfds = vnfd_proxy.get(vnfd_catalog, list_obj=True) dict_ = {} # Get ping pong VNFDs for vnfd in vnfds.vnfd: if 'ping' in vnfd.name: dict_['ping'] = vnfd if 'pong' in vnfd.name: dict_['pong'] = vnfd return dict_ vnfds_dict = get_vnfd() update_data = { 'ping': { 'static_ip_address': '31.31.31.60' }, 'pong': { 'static_ip_address': '31.31.31.90' } } port_sequencing_intf_positions_tmp = port_sequencing_intf_positions[:] # Modify/add fields in VNFDs for name_, vnfd in vnfds_dict.items(): if pytest.config.getoption('--update-vnfd-instantiate'): vnfd.vdu[0].interface[1].static_ip_address = update_data[ name_]['static_ip_address'] if pytest.config.getoption('--port-sequencing'): vnfd_intf_list = vnfd.vdu[0].interface # for ping vnfd, remove positional values from all interfaces # for pong vnfd, modify the positional values as per fixture port_sequencing_intf_positions if 'ping' in vnfd.name: tmp_intf_list = [] for i in range(len(vnfd_intf_list)): tmp_intf_dict = vnfd_intf_list[-1].as_dict() del tmp_intf_dict['position'] vnfd_intf_list.pop() tmp_intf_list.append(tmp_intf_dict) for intf_dict_without_positional_values in tmp_intf_list: new_intf = vnfd.vdu[0].interface.add() new_intf.from_dict(intf_dict_without_positional_values) if 'pong' in vnfd.name: for intf in vnfd_intf_list: if 'position' in intf: intf.position = port_sequencing_intf_positions_tmp.pop( ) # Update/save the VNFDs for vnfd in vnfds_dict.values(): vnfd_proxy.replace_config(xpath.format(quoted_key(vnfd.id)), vnfd) # Match whether data is updated vnfds_dict = get_vnfd() assert vnfds_dict for name_, vnfd in vnfds_dict.items(): if pytest.config.getoption('--update-vnfd-instantiate'): assert vnfd.vdu[0].interface[ 1].static_ip_address == update_data[name_][ 'static_ip_address'] if pytest.config.getoption('--port-sequencing'): if 'ping' in vnfd.name: for intf in vnfd.vdu[0].interface: assert 'position' not in intf.as_dict() if 'pong' in vnfd.name: tmp_positional_values_list = [] for intf in vnfd.vdu[0].interface: if 'position' in intf.as_dict(): tmp_positional_values_list.append(intf.position) assert set(tmp_positional_values_list) == set( port_sequencing_intf_positions)
def resmgr_path(self): """ path for resource-mgr""" return self._project.add_project( "D,/rw-resource-mgr:resource-mgmt" + "/vlink-event/vlink-event-data[event-id={}]".format( quoted_key(self.event_id)))
def get_xpath(cloud_name=None): return "D,/rw-cloud:cloud/account{}/connection-status".format( "[name=%s]" % quoted_key(cloud_name) if cloud_name is not None else '')