def __init__(self): def get_conf(parameter): return ft_utils.get_functest_config(parameter) super(VPingBase, self).__init__() self.logger = None self.functest_repo = ft_constants.FUNCTEST_REPO_DIR self.repo = get_conf('general.directories.dir_vping') self.vm1_name = get_conf('vping.vm_name_1') self.vm2_name = get_conf('vping.vm_name_2') self.vm_boot_timeout = 180 self.vm_delete_timeout = 100 self.ping_timeout = get_conf('vping.ping_timeout') self.image_name = get_conf('vping.image_name') self.image_filename = get_conf('general.openstack.image_file_name') self.image_format = get_conf('general.openstack.image_disk_format') self.image_path = \ "%s/%s" % (get_conf('general.directories.dir_functest_data'), self.image_filename) self.flavor_name = get_conf('vping.vm_flavor') # NEUTRON Private Network parameters self.private_net_name = get_conf('vping.vping_private_net_name') self.private_subnet_name = get_conf('vping.vping_private_subnet_name') self.private_subnet_cidr = get_conf('vping.vping_private_subnet_cidr') self.router_name = get_conf('vping.vping_router_name') self.sg_name = get_conf('vping.vping_sg_name') self.sg_desc = get_conf('vping.vping_sg_descr') self.neutron_client = os_utils.get_neutron_client() self.glance_client = os_utils.get_glance_client() self.nova_client = os_utils.get_nova_client()
def main(): logger.info("Generating OpenStack snapshot...") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() keystone_client = os_utils.get_keystone_client() cinder_client = os_utils.get_cinder_client() if not os_utils.check_credentials(): logger.error("Please source the openrc credentials and run the" + "script again.") exit(-1) snapshot = {} snapshot.update(get_instances(nova_client)) snapshot.update(get_images(nova_client)) snapshot.update(get_volumes(cinder_client)) snapshot.update(get_networks(neutron_client)) snapshot.update(get_routers(neutron_client)) snapshot.update(get_security_groups(neutron_client)) snapshot.update(get_floatinips(nova_client)) snapshot.update(get_users(keystone_client)) snapshot.update(get_tenants(keystone_client)) with open(OS_SNAPSHOT_FILE, 'w+') as yaml_file: yaml_file.write(yaml.safe_dump(snapshot, default_flow_style=False)) yaml_file.seek(0) logger.debug("Openstack Snapshot found in the deployment:\n%s" % yaml_file.read()) logger.debug("NOTE: These objects will NOT be deleted after " + "running the test.")
def get_av_zones(): ''' Return the availability zone each host belongs to ''' nova_client = os_utils.get_nova_client() hosts = os_utils.get_hypervisors(nova_client) return ['nova::{0}'.format(host) for host in hosts]
def __init__(self): super(VPingBase, self).__init__() self.logger = None self.functest_repo = CONST.dir_repo_functest self.repo = CONST.dir_vping self.vm1_name = CONST.vping_vm_name_1 self.vm2_name = CONST.vping_vm_name_2 self.vm_boot_timeout = 180 self.vm_delete_timeout = 100 self.ping_timeout = CONST.vping_ping_timeout self.image_name = CONST.vping_image_name self.image_filename = CONST.openstack_image_file_name self.image_format = CONST.openstack_image_disk_format self.image_path = os.path.join(CONST.dir_functest_data, self.image_filename) self.flavor_name = CONST.vping_vm_flavor # NEUTRON Private Network parameters self.private_net_name = CONST.vping_private_net_name self.private_subnet_name = CONST.vping_private_subnet_name self.private_subnet_cidr = CONST.vping_private_subnet_cidr self.router_name = CONST.vping_router_name self.sg_name = CONST.vping_sg_name self.sg_desc = CONST.vping_sg_desc self.neutron_client = os_utils.get_neutron_client() self.glance_client = os_utils.get_glance_client() self.nova_client = os_utils.get_nova_client()
def delete_instances(): n = os_utils.get_nova_client() instances = os_utils.get_instances(n) if instances is None: return for inst in instances: logger.info("Removing instance: {0}".format(inst.id)) os_utils.delete_instance(n, inst.id)
def delete_floating_ips(): n = os_utils.get_nova_client() fips = os_utils.get_floating_ips(n) if fips is None: return for fip in fips: logger.info("Removing floating ip: {0}".format(fip.ip)) os_utils.delete_floating_ip(n, fip.id)
def clean(self): self.main_agent.get_agent("nsr", project_id=self.ob_projectid).delete( self.ob_nsr_id) time.sleep(5) os_utils.delete_instance(nova_client=os_utils.get_nova_client(), instance_id=self.ob_instance_id) # question is the clean removing also the VM? # I think so since is goinf to remove the tenant... super(ImsVnf, self).clean()
def __init__(self, **kwargs): if "case_name" not in kwargs: kwargs["case_name"] = "juju_epc" super(JujuEpc, self).__init__(**kwargs) # Retrieve the configuration self.case_dir = pkg_resources.resource_filename( 'functest', 'opnfv_tests/vnf/epc') try: self.config = CONST.__getattribute__( 'vnf_{}_config'.format(self.case_name)) except Exception: raise Exception("VNF config file not found") config_file = os.path.join(self.case_dir, self.config) self.orchestrator = dict( requirements=get_config("orchestrator.requirements", config_file), ) self.created_object = [] self.snaps_creds = '' self.os_creds = openstack_tests.get_credentials( os_env_file=CONST.__getattribute__('openstack_creds')) self.details['orchestrator'] = dict( name=get_config("orchestrator.name", config_file), version=get_config("orchestrator.version", config_file), status='ERROR', result='' ) self.vnf = dict( descriptor=get_config("vnf.descriptor", config_file), requirements=get_config("vnf.requirements", config_file) ) self.details['vnf'] = dict( descriptor_version=self.vnf['descriptor']['version'], name=get_config("vnf.name", config_file), version=get_config("vnf.version", config_file), ) self.__logger.debug("VNF configuration: %s", self.vnf) self.details['test_vnf'] = dict( name=get_config("vnf_test_suite.name", config_file), version=get_config("vnf_test_suite.version", config_file), tag_name=get_config("vnf_test_suite.tag_name", config_file) ) self.images = get_config("tenant_images", config_file) self.__logger.info("Images needed for vEPC: %s", self.images) self.keystone_client = os_utils.get_keystone_client() self.glance_client = os_utils.get_glance_client() self.neutron_client = os_utils.get_neutron_client() self.nova_client = os_utils.get_nova_client() self.sec_group_id = None self.public_auth_url = None self.creds = None self.filename = None
def prepare(self): self.creds = os_utils.get_credentials() self.keystone_client = os_utils.get_keystone_client() self.logger.info("Prepare OpenStack plateform(create tenant and user)") admin_user_id = os_utils.get_user_id(self.keystone_client, self.creds['username']) if admin_user_id == '': self.step_failure("Failed to get id of " + self.creds['username']) tenant_id = os_utils.create_tenant(self.keystone_client, self.tenant_name, self.tenant_description) if not tenant_id: self.step_failure("Failed to create " + self.tenant_name + " tenant") roles_name = ["admin", "Admin"] role_id = '' for role_name in roles_name: if role_id == '': role_id = os_utils.get_role_id(self.keystone_client, role_name) if role_id == '': self.logger.error("Failed to get id for %s role" % role_name) self.step_failure("Failed to get role id of " + role_name) if not os_utils.add_role_user(self.keystone_client, admin_user_id, role_id, tenant_id): self.logger.error("Failed to add %s on tenant" % self.creds['username']) self.step_failure("Failed to add %s on tenant" % self.creds['username']) user_id = os_utils.create_user(self.keystone_client, self.tenant_name, self.tenant_name, None, tenant_id) if not user_id: self.logger.error("Failed to create %s user" % self.tenant_name) self.step_failure("Failed to create user ") if not os_utils.add_role_user(self.keystone_client, user_id, role_id, tenant_id): self.logger.error("Failed to add %s on tenant" % self.tenant_name) self.step_failure("Failed to add %s on tenant" % self.tenant_name) self.logger.info("Update OpenStack creds informations") self.admin_creds = self.creds.copy() self.admin_creds.update({"tenant": self.tenant_name}) self.neutron_client = os_utils.get_neutron_client(self.admin_creds) self.nova_client = os_utils.get_nova_client(self.admin_creds) self.creds.update({ "tenant": self.tenant_name, "username": self.tenant_name, "password": self.tenant_name, })
def __init__(self, case_name=''): super(RallyBase, self).__init__(case_name) self.mode = '' self.summary = [] self.scenario_dir = '' self.nova_client = os_utils.get_nova_client() self.neutron_client = os_utils.get_neutron_client() self.cinder_client = os_utils.get_cinder_client() self.network_dict = {} self.volume_type = None self.smoke = None
def main(): logging.basicConfig() logger.info("Cleaning OpenStack resources...") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() keystone_client = os_utils.get_keystone_client() cinder_client = os_utils.get_cinder_client() glance_client = os_utils.get_glance_client() try: with open(OS_SNAPSHOT_FILE) as f: snapshot_yaml = yaml.safe_load(f) except Exception: logger.info("The file %s does not exist. The OpenStack snapshot must" " be created first. Aborting cleanup." % OS_SNAPSHOT_FILE) return 0 default_images = snapshot_yaml.get('images') default_instances = snapshot_yaml.get('instances') default_volumes = snapshot_yaml.get('volumes') default_networks = snapshot_yaml.get('networks') default_routers = snapshot_yaml.get('routers') default_security_groups = snapshot_yaml.get('secgroups') default_floatingips = snapshot_yaml.get('floatingips') default_users = snapshot_yaml.get('users') default_tenants = snapshot_yaml.get('tenants') if not os_utils.check_credentials(): logger.error("Please source the openrc credentials and run " "the script again.") return -1 remove_instances(nova_client, default_instances) separator() remove_images(glance_client, default_images) separator() remove_volumes(cinder_client, default_volumes) separator() remove_floatingips(neutron_client, default_floatingips) separator() remove_networks(neutron_client, default_networks, default_routers) separator() remove_security_groups(neutron_client, default_security_groups) separator() remove_users(keystone_client, default_users) separator() remove_tenants(keystone_client, default_tenants) separator() return 0
def __init__(self, **kwargs): """Initialize RallyBase object.""" super(RallyBase, self).__init__(**kwargs) self.mode = '' self.summary = [] self.scenario_dir = '' self.nova_client = os_utils.get_nova_client() self.neutron_client = os_utils.get_neutron_client() self.cinder_client = os_utils.get_cinder_client() self.network_dict = {} self.volume_type = None self.smoke = None self.test_name = None self.image_exists = None self.image_id = None self.start_time = None self.result = None self.details = None
def deploy_orchestrator(self): self.logger.info("Deploying Open Baton...") self.logger.info("Details: %s", self.mano['details']) start_time = time.time() self.logger.info("Creating orchestra instance...") userdata = get_userdata(self.mano) self.logger.info("flavor: %s\n" "image: %s\n" "network_id: %s\n", self.mano['details']['flavor']['name'], self.mano['requirements']['image'], self.mano['details']['network']['id']) self.logger.debug("userdata: %s\n", userdata) # setting up image image_settings = ImageSettings(name=self.mano['requirements']['image'], image_user='******', exists=True) # setting up port port_settings = PortSettings( name='%s_port' % self.case_name, network_name=self.mano['details']['network']['name']) # build configuration of vm orchestra_settings = VmInstanceSettings( name=self.case_name, flavor=self.mano['details']['flavor']['name'], port_settings=[port_settings], security_group_names=[self.mano['details']['sec_group']], userdata=userdata) orchestra_vm = OpenStackVmInstance(self.snaps_creds, orchestra_settings, image_settings) orchestra_vm.create() self.created_resources.append(orchestra_vm) self.mano['details']['id'] = orchestra_vm.get_vm_info()['id'] self.logger.info("Created orchestra instance: %s", self.mano['details']['id']) self.logger.info("Associating floating ip: '%s' to VM '%s' ", self.mano['details']['fip'].ip, self.case_name) nova_client = os_utils.get_nova_client() if not os_utils.add_floating_ip(nova_client, self.mano['details']['id'], self.mano['details']['fip'].ip): duration = time.time() - start_time self.details["orchestrator"].update(status='FAIL', duration=duration) self.logger.error("Cannot associate floating IP to VM.") return False self.logger.info("Waiting for Open Baton NFVO to be up and running...") timeout = 0 while timeout < 200: if servertest(self.mano['details']['fip'].ip, "8080"): break else: self.logger.info("Open Baton NFVO is not started yet (%ss)", (timeout * 5)) time.sleep(5) timeout += 1 if timeout >= 200: duration = time.time() - start_time self.details["orchestrator"].update(status='FAIL', duration=duration) self.logger.error("Open Baton is not started correctly") return False self.logger.info("Waiting for all components to be up and running...") time.sleep(60) duration = time.time() - start_time self.details["orchestrator"].update(status='PASS', duration=duration) self.logger.info("Deploy Open Baton NFVO: OK") return True
def main(): results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() (floatingip_ids, instance_ids, router_ids, network_ids, image_ids, subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8)) try: image_id = os_utils.create_glance_image( glance_client, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public='public') image_ids.append(image_id) network_1_id = test_utils.create_net(neutron_client, TESTCASE_CONFIG.net_1_name) subnet_1_id = test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr, network_1_id) network_2_id = test_utils.create_net(neutron_client, TESTCASE_CONFIG.net_2_name) subnet_2_id = test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_2_name, TESTCASE_CONFIG.subnet_2_cidr, network_2_id) network_ids.extend([network_1_id, network_2_id]) subnet_ids.extend([subnet_1_id, subnet_2_id]) sg_id = os_utils.create_security_group_full( neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client) av_zone_1 = "nova:" + compute_nodes[0] av_zone_2 = "nova:" + compute_nodes[1] # boot INTANCES vm_2 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_2_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1) vm_2_ip = test_utils.get_instance_ip(vm_2) vm_3 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_3_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_2) vm_3_ip = test_utils.get_instance_ip(vm_3) vm_5 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_5_name, image_id, network_2_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_2) vm_5_ip = test_utils.get_instance_ip(vm_5) # We boot vm5 first because we need vm5_ip for vm4 userdata u4 = test_utils.generate_ping_userdata([vm_5_ip]) vm_4 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_4_name, image_id, network_2_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u4) vm_4_ip = test_utils.get_instance_ip(vm_4) # We boot VM1 at the end because we need to get the IPs first # to generate the userdata u1 = test_utils.generate_ping_userdata( [vm_2_ip, vm_3_ip, vm_4_ip, vm_5_ip]) vm_1 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_1_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u1) instance_ids.extend([vm_1.id, vm_2.id, vm_3.id, vm_4.id, vm_5.id]) msg = ("Create VPN with eRT<>iRT") results.record_action(msg) vpn_name = "sdnvpn-" + str(randint(100000, 999999)) kwargs = { "import_targets": TESTCASE_CONFIG.targets1, "export_targets": TESTCASE_CONFIG.targets2, "route_distinguishers": TESTCASE_CONFIG.route_distinguishers, "name": vpn_name } bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs) bgpvpn_id = bgpvpn['bgpvpn']['id'] logger.debug("VPN created details: %s" % bgpvpn) bgpvpn_ids.append(bgpvpn_id) msg = ("Associate network '%s' to the VPN." % TESTCASE_CONFIG.net_1_name) results.record_action(msg) results.add_to_summary(0, "-") test_utils.create_network_association(neutron_client, bgpvpn_id, network_1_id) # Wait for VMs to be ready. instances_up = test_utils.wait_for_instances_up(vm_2, vm_3, vm_5) instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1, vm_4) if (not instances_up or not instances_dhcp_up): logger.error("One or more instances are down") # TODO: Handle this appropriately results.get_ping_status(vm_1, vm_2, expected="PASS", timeout=200) results.get_ping_status(vm_1, vm_3, expected="PASS", timeout=30) results.get_ping_status(vm_1, vm_4, expected="FAIL", timeout=30) msg = ("Associate network '%s' to the VPN." % TESTCASE_CONFIG.net_2_name) results.add_to_summary(0, "-") results.record_action(msg) results.add_to_summary(0, "-") test_utils.create_network_association(neutron_client, bgpvpn_id, network_2_id) test_utils.wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, network_1_id, network_2_id) logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() results.get_ping_status(vm_4, vm_5, expected="PASS", timeout=30) # TODO enable again when isolation in VPN with iRT != eRT works # results.get_ping_status(vm_1, vm_4, expected="FAIL", timeout=30) # results.get_ping_status(vm_1, vm_5, expected="FAIL", timeout=30) msg = ("Update VPN with eRT=iRT ...") results.add_to_summary(0, "-") results.record_action(msg) results.add_to_summary(0, "-") kwargs = { "import_targets": TESTCASE_CONFIG.targets1, "export_targets": TESTCASE_CONFIG.targets1, "name": vpn_name } bgpvpn = test_utils.update_bgpvpn(neutron_client, bgpvpn_id, **kwargs) logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() results.get_ping_status(vm_1, vm_4, expected="PASS", timeout=30) results.get_ping_status(vm_1, vm_5, expected="PASS", timeout=30) except Exception as e: logger.error("exception occurred while executing testcase_1: %s", e) raise finally: test_utils.cleanup_nova(nova_client, instance_ids) test_utils.cleanup_glance(glance_client, image_ids) test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces, subnet_ids, router_ids, network_ids) return results.compile_summary()
def main(): logger.info("Cleaning OpenStack resources...") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() keystone_client = os_utils.get_keystone_client() cinder_client = os_utils.get_cinder_client() tacker_client = os_tacker.get_tacker_client() try: with open(OS_SNAPSHOT_FILE) as f: snapshot_yaml = yaml.safe_load(f) except Exception: logger.info("The file %s does not exist. The OpenStack snapshot must" " be created first. Aborting cleanup." % OS_SNAPSHOT_FILE) exit(0) default_images = snapshot_yaml.get('images') default_instances = snapshot_yaml.get('instances') default_volumes = snapshot_yaml.get('volumes') default_networks = snapshot_yaml.get('networks') default_routers = snapshot_yaml.get('routers') default_security_groups = snapshot_yaml.get('secgroups') default_floatingips = snapshot_yaml.get('floatingips') default_users = snapshot_yaml.get('users') default_tenants = snapshot_yaml.get('tenants') default_vnfds = snapshot_yaml.get('vnfds') default_vnfs = snapshot_yaml.get('vnfs') default_sfcs = snapshot_yaml.get('sfcs') default_sfc_classifiers = snapshot_yaml.get('sfc_classifiers') if not os_utils.check_credentials(): logger.error("Please source the openrc credentials and run " "the script again.") exit(-1) remove_instances(nova_client, default_instances) separator() remove_images(nova_client, default_images) separator() remove_volumes(cinder_client, default_volumes) separator() remove_floatingips(nova_client, default_floatingips) separator() remove_networks(neutron_client, default_networks, default_routers) separator() remove_security_groups(neutron_client, default_security_groups) separator() remove_users(keystone_client, default_users) separator() remove_tenants(keystone_client, default_tenants) separator() # Note: Delete in this order # 1. Classifiers, 2. SFCs, 3. VNFs, 4. VNFDs remove_tacker_sfc_classifiers(tacker_client, default_sfc_classifiers) separator() remove_tacker_sfcs(tacker_client, default_sfcs) separator() remove_tacker_vnfs(tacker_client, default_vnfs) separator() remove_tacker_vnfds(tacker_client, default_vnfds) separator()
def main(): deploymentHandler = DeploymentFactory.get_handler( COMMON_CONFIG.installer_type, COMMON_CONFIG.installer_ip, COMMON_CONFIG.installer_user, installer_pwd=COMMON_CONFIG.installer_password) cluster = COMMON_CONFIG.installer_cluster openstack_nodes = (deploymentHandler.get_nodes({'cluster': cluster}) if cluster is not None else deploymentHandler.get_nodes()) controller_nodes = [node for node in openstack_nodes if node.is_controller()] compute_nodes = [node for node in openstack_nodes if node.is_compute()] odl_ip, odl_port = test_utils.get_odl_ip_port(openstack_nodes) for compute in compute_nodes: logger.info("This is a compute: %s" % compute.info) results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") installer_type = os.environ.get("INSTALLER_TYPE") if installer_type != "fuel": logger.error( '\033[91mCurrently supported only Fuel Installer type\033[0m') sys.exit(1) installer_ip = os.environ.get("INSTALLER_IP") if not installer_ip: logger.error( '\033[91minstaller ip is not set\033[0m') logger.error( '\033[91mexport INSTALLER_IP=<ip>\033[0m') sys.exit(1) test_utils.setup_compute_node(TESTCASE_CONFIG.subnet_cidr, compute_nodes) test_utils.configure_iptables(controller_nodes) test_utils.download_image(COMMON_CONFIG.url, COMMON_CONFIG.image_path) _, custom_flv_id = os_utils.get_or_create_flavor( COMMON_CONFIG.flavor, COMMON_CONFIG.ram_size_in_mb, COMMON_CONFIG.disk_size_in_gb, COMMON_CONFIG.vcpu_count, public=True) if not custom_flv_id: logger.error("Failed to create custom flavor") sys.exit(1) glance_client = os_utils.get_glance_client() neutron_client = os_utils.get_neutron_client() nova_client = os_utils.get_nova_client() tacker_client = os_tacker.get_tacker_client() controller_clients = test_utils.get_ssh_clients(controller_nodes) compute_clients = test_utils.get_ssh_clients(compute_nodes) ovs_logger = ovs_log.OVSLogger( os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'), COMMON_CONFIG.functest_results_dir) image_id = os_utils.create_glance_image(glance_client, COMMON_CONFIG.image_name, COMMON_CONFIG.image_path, COMMON_CONFIG.image_format, public='public') network_id = test_utils.setup_neutron(neutron_client, TESTCASE_CONFIG.net_name, TESTCASE_CONFIG.subnet_name, TESTCASE_CONFIG.router_name, TESTCASE_CONFIG.subnet_cidr) sg_id = test_utils.create_security_groups(neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) vnf_names = ['testVNF1', 'testVNF2'] topo_seed = topo_shuffler.get_seed() # change to None for nova av zone testTopology = topo_shuffler.topology(vnf_names, seed=topo_seed) logger.info('This test is run with the topology {0}' .format(testTopology['id'])) logger.info('Topology description: {0}' .format(testTopology['description'])) client_instance = test_utils.create_instance( nova_client, CLIENT, COMMON_CONFIG.flavor, image_id, network_id, sg_id, av_zone=testTopology['client']) server_instance = test_utils.create_instance( nova_client, SERVER, COMMON_CONFIG.flavor, image_id, network_id, sg_id, av_zone=testTopology['server']) server_ip = server_instance.networks.get(TESTCASE_CONFIG.net_name)[0] tosca_red = os.path.join(COMMON_CONFIG.sfc_test_dir, COMMON_CONFIG.vnfd_dir, TESTCASE_CONFIG.test_vnfd_red) os_tacker.create_vnfd(tacker_client, tosca_file=tosca_red) tosca_blue = os.path.join(COMMON_CONFIG.sfc_test_dir, COMMON_CONFIG.vnfd_dir, TESTCASE_CONFIG.test_vnfd_blue) os_tacker.create_vnfd(tacker_client, tosca_file=tosca_blue) default_param_file = os.path.join( COMMON_CONFIG.sfc_test_dir, COMMON_CONFIG.vnfd_dir, COMMON_CONFIG.vnfd_default_params_file) test_utils.create_vnf_in_av_zone( tacker_client, vnf_names[0], 'test-vnfd1', default_param_file, testTopology[vnf_names[0]]) test_utils.create_vnf_in_av_zone( tacker_client, vnf_names[1], 'test-vnfd2', default_param_file, testTopology[vnf_names[1]]) vnf1_id = os_tacker.wait_for_vnf(tacker_client, vnf_name=vnf_names[0]) vnf2_id = os_tacker.wait_for_vnf(tacker_client, vnf_name=vnf_names[1]) if vnf1_id is None or vnf2_id is None: logger.error('ERROR while booting vnfs') sys.exit(1) vnf1_instance_id = test_utils.get_nova_id(tacker_client, 'vdu1', vnf1_id) os_utils.add_secgroup_to_instance(nova_client, vnf1_instance_id, sg_id) vnf2_instance_id = test_utils.get_nova_id(tacker_client, 'vdu1', vnf2_id) os_utils.add_secgroup_to_instance(nova_client, vnf2_instance_id, sg_id) os_tacker.create_sfc(tacker_client, 'red', chain_vnf_names=['testVNF1']) os_tacker.create_sfc(tacker_client, 'blue', chain_vnf_names=['testVNF2']) os_tacker.create_sfc_classifier( tacker_client, 'red_http', sfc_name='red', match={ 'source_port': 0, 'dest_port': 80, 'protocol': 6 }) os_tacker.create_sfc_classifier( tacker_client, 'red_ssh', sfc_name='red', match={ 'source_port': 0, 'dest_port': 22, 'protocol': 6 }) logger.info(test_utils.run_cmd('tacker sfc-list')[1]) logger.info(test_utils.run_cmd('tacker sfc-classifier-list')[1]) # Start measuring the time it takes to implement the classification rules t1 = threading.Thread(target=test_utils.wait_for_classification_rules, args=(ovs_logger, compute_nodes, odl_ip, odl_port, testTopology,)) try: t1.start() except Exception as e: logger.error("Unable to start the thread that counts time %s" % e) logger.info("Assigning floating IPs to instances") server_floating_ip = test_utils.assign_floating_ip( nova_client, neutron_client, server_instance.id) client_floating_ip = test_utils.assign_floating_ip( nova_client, neutron_client, client_instance.id) sf1_floating_ip = test_utils.assign_floating_ip( nova_client, neutron_client, vnf1_instance_id) sf2_floating_ip = test_utils.assign_floating_ip( nova_client, neutron_client, vnf2_instance_id) for ip in (server_floating_ip, client_floating_ip, sf1_floating_ip, sf2_floating_ip): logger.info("Checking connectivity towards floating IP [%s]" % ip) if not test_utils.ping(ip, retries=50, retry_timeout=1): logger.error("Cannot ping floating IP [%s]" % ip) sys.exit(1) logger.info("Successful ping to floating IP [%s]" % ip) if not test_utils.check_ssh([sf1_floating_ip, sf2_floating_ip]): logger.error("Cannot establish SSH connection to the SFs") sys.exit(1) logger.info("Starting HTTP server on %s" % server_floating_ip) if not test_utils.start_http_server(server_floating_ip): logger.error('\033[91mFailed to start HTTP server on %s\033[0m' % server_floating_ip) sys.exit(1) logger.info("Starting SSH firewall on %s" % sf1_floating_ip) test_utils.start_vxlan_tool(sf1_floating_ip, block="22") logger.info("Starting HTTP firewall on %s" % sf2_floating_ip) test_utils.start_vxlan_tool(sf2_floating_ip, block="80") logger.info("Wait for ODL to update the classification rules in OVS") t1.join() logger.info("Test SSH") if test_utils.is_ssh_blocked(client_floating_ip, server_ip): results.add_to_summary(2, "PASS", "SSH Blocked") else: error = ('\033[91mTEST 1 [FAILED] ==> SSH NOT BLOCKED\033[0m') logger.error(error) test_utils.capture_ovs_logs( ovs_logger, controller_clients, compute_clients, error) results.add_to_summary(2, "FAIL", "SSH Blocked") logger.info("Test HTTP") if not test_utils.is_http_blocked(client_floating_ip, server_ip): results.add_to_summary(2, "PASS", "HTTP works") else: error = ('\033[91mTEST 2 [FAILED] ==> HTTP BLOCKED\033[0m') logger.error(error) test_utils.capture_ovs_logs( ovs_logger, controller_clients, compute_clients, error) results.add_to_summary(2, "FAIL", "HTTP works") logger.info("Changing the classification") test_utils.delete_classifier_and_acl( tacker_client, 'red_http', odl_ip, odl_port) test_utils.delete_classifier_and_acl( tacker_client, 'red_ssh', odl_ip, odl_port) os_tacker.create_sfc_classifier( tacker_client, 'blue_http', sfc_name='blue', match={ 'source_port': 0, 'dest_port': 80, 'protocol': 6 }) os_tacker.create_sfc_classifier( tacker_client, 'blue_ssh', sfc_name='blue', match={ 'source_port': 0, 'dest_port': 22, 'protocol': 6 }) logger.info(test_utils.run_cmd('tacker sfc-classifier-list')[1]) # Start measuring the time it takes to implement the classification rules t2 = threading.Thread(target=test_utils.wait_for_classification_rules, args=(ovs_logger, compute_nodes, odl_ip, odl_port, testTopology,)) try: t2.start() except Exception as e: logger.error("Unable to start the thread that counts time %s" % e) logger.info("Wait for ODL to update the classification rules in OVS") t2.join() logger.info("Test HTTP") if test_utils.is_http_blocked(client_floating_ip, server_ip): results.add_to_summary(2, "PASS", "HTTP Blocked") else: error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m') logger.error(error) test_utils.capture_ovs_logs( ovs_logger, controller_clients, compute_clients, error) results.add_to_summary(2, "FAIL", "HTTP Blocked") logger.info("Test SSH") if not test_utils.is_ssh_blocked(client_floating_ip, server_ip): results.add_to_summary(2, "PASS", "SSH works") else: error = ('\033[91mTEST 4 [FAILED] ==> SSH BLOCKED\033[0m') logger.error(error) test_utils.capture_ovs_logs( ovs_logger, controller_clients, compute_clients, error) results.add_to_summary(2, "FAIL", "SSH works") return results.compile_summary()
def init(self): start_time_ts = time.time() self.util = utilvnf(self.logger) self.ks_cresds = os_utils.get_credentials() self.logger.info("Prepare OpenStack plateform(create tenant and user)") keystone = os_utils.get_keystone_client() user_id = os_utils.get_user_id(keystone, self.ks_cresds['username']) if user_id == '': return self.step_failure( "init", "Error : Failed to get id of " + self.ks_cresds['username']) tenant_id = os_utils.create_tenant(keystone, TENANT_NAME, TENANT_DESCRIPTION) if tenant_id == '': return self.step_failure( "init", "Error : Failed to create " + TENANT_NAME + " tenant") roles_name = ["admin", "Admin"] role_id = '' for role_name in roles_name: if role_id == '': role_id = os_utils.get_role_id(keystone, role_name) if role_id == '': self.logger.error("Error : Failed to get id for %s role" % role_name) if not os_utils.add_role_user(keystone, user_id, role_id, tenant_id): self.logger.error("Error : Failed to add %s on tenant" % self.ks_cresds['username']) user_id = os_utils.create_user(keystone, TENANT_NAME, TENANT_NAME, None, tenant_id) if user_id == '': self.logger.error("Error : Failed to create %s user" % TENANT_NAME) if not os_utils.add_role_user(keystone, user_id, role_id, tenant_id): self.logger.error("Failed to add %s on tenant" % TENANT_NAME) self.logger.info("Update OpenStack creds informations") self.ks_cresds.update({ "tenant_name": TENANT_NAME, }) self.neutron = os_utils.get_neutron_client(self.ks_cresds) nova = os_utils.get_nova_client() self.glance = os_utils.get_glance_client(self.ks_cresds) self.ks_cresds.update({ "username": TENANT_NAME, "password": TENANT_NAME, }) self.load_test_env_config() self.logger.info("Upload some OS images if it doesn't exist") images = {} images.update(IMAGES) images.update(self.VNF_TEST_IMAGES) for img in images.keys(): image_name = images[img]['image_name'] self.logger.info("image name = " + image_name) image_url = images[img]['image_url'] image_id = os_utils.get_image_id(self.glance, image_name) if image_id == '': self.logger.info( """%s image doesn't exist on glance repository. Try downloading this image and upload on glance !""" % image_name) image_id = self.download_and_add_image_on_glance( self.glance, image_name, image_url) if image_id == '': return self.step_failure( "init", "Error : Failed to find or upload required OS " "image for this deployment") self.logger.info("Update security group quota for this tenant") result = os_utils.update_sg_quota(self.neutron, tenant_id, 50, 100) if not result: return self.step_failure( "init", "Failed to update security group quota for tenant " + TENANT_NAME) self.credentials = { "username": TENANT_NAME, "password": TENANT_NAME, "auth_url": os_utils.get_endpoint('identity'), "tenant_name": TENANT_NAME, "region_name": os.environ['OS_REGION_NAME'] } self.util.set_credentials(self.credentials["username"], self.credentials["password"], self.credentials["auth_url"], self.credentials["tenant_name"], self.credentials["region_name"]) test_scenario_file = open(self.util.TEST_SCENATIO_YAML_FILE, 'r') self.test_scenario_yaml = yaml.safe_load(test_scenario_file) test_scenario_file.close() res = self.util.test_scenario_validation_check(self.test_scenario_yaml) if res["status"] is False: self.logger.error(res["message"]) return self.step_failure("init", "Error : Faild to test execution.") self.logger.info("Test scenario yaml validation check : " + res["message"]) end_time_ts = time.time() duration = round(end_time_ts - start_time_ts, 1) self.set_result("init", duration, "OK") return self.set_resultdata(self.testcase_start_time, "", "", self.results)
def main(): results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() image_id = os_utils.create_glance_image(glance_client, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public=True) network_1_id, _, router_1_id = test_utils.create_network( neutron_client, TESTCASE_CONFIG.net_1_name, TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr, TESTCASE_CONFIG.router_1_name) network_2_id = test_utils.create_net(neutron_client, TESTCASE_CONFIG.net_2_name) test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_2_name, TESTCASE_CONFIG.subnet_2_cidr, network_2_id) sg_id = os_utils.create_security_group_full(neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client) av_zone_1 = "nova:" + compute_nodes[0] av_zone_2 = "nova:" + compute_nodes[1] # boot INTANCES vm_2 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_2_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1) vm_2_ip = vm_2.networks.itervalues().next()[0] vm_3 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_3_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_2) vm_3_ip = vm_3.networks.itervalues().next()[0] vm_5 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_5_name, image_id, network_2_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_2) vm_5_ip = vm_5.networks.itervalues().next()[0] # We boot vm5 first because we need vm5_ip for vm4 userdata u4 = test_utils.generate_ping_userdata([vm_5_ip]) vm_4 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_4_name, image_id, network_2_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u4) vm_4_ip = vm_4.networks.itervalues().next()[0] # We boot VM1 at the end because we need to get the IPs first to generate # the userdata u1 = test_utils.generate_ping_userdata( [vm_2_ip, vm_3_ip, vm_4_ip, vm_5_ip]) vm_1 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_1_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u1) msg = ("Create VPN with eRT<>iRT") results.record_action(msg) vpn_name = "sdnvpn-" + str(randint(100000, 999999)) kwargs = { "import_targets": TESTCASE_CONFIG.targets1, "export_targets": TESTCASE_CONFIG.targets2, "route_distinguishers": TESTCASE_CONFIG.route_distinguishers, "name": vpn_name } bgpvpn = os_utils.create_bgpvpn(neutron_client, **kwargs) bgpvpn_id = bgpvpn['bgpvpn']['id'] logger.debug("VPN created details: %s" % bgpvpn) msg = ("Associate router '%s' to the VPN." % TESTCASE_CONFIG.router_1_name) results.record_action(msg) results.add_to_summary(0, "-") os_utils.create_router_association(neutron_client, bgpvpn_id, router_1_id) # Wait for VMs to get ips. instances_up = test_utils.wait_for_instances_up(vm_1, vm_2, vm_3, vm_4, vm_5) if not instances_up: logger.error("One or more instances is down") # TODO Handle appropriately results.get_ping_status(vm_1, vm_2, expected="PASS", timeout=200) results.get_ping_status(vm_1, vm_3, expected="PASS", timeout=30) results.get_ping_status(vm_1, vm_4, expected="FAIL", timeout=30) msg = ("Associate network '%s' to the VPN." % TESTCASE_CONFIG.net_2_name) results.add_to_summary(0, "-") results.record_action(msg) results.add_to_summary(0, "-") os_utils.create_network_association(neutron_client, bgpvpn_id, network_2_id) test_utils.wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_1_id) test_utils.wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, network_2_id) logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() results.get_ping_status(vm_4, vm_5, expected="PASS", timeout=30) results.get_ping_status(vm_1, vm_4, expected="FAIL", timeout=30) results.get_ping_status(vm_1, vm_5, expected="FAIL", timeout=30) msg = ("Update VPN with eRT=iRT ...") results.add_to_summary(0, "-") results.record_action(msg) results.add_to_summary(0, "-") kwargs = { "import_targets": TESTCASE_CONFIG.targets1, "export_targets": TESTCASE_CONFIG.targets1, "name": vpn_name } bgpvpn = os_utils.update_bgpvpn(neutron_client, bgpvpn_id, **kwargs) logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() results.get_ping_status(vm_1, vm_4, expected="PASS", timeout=30) results.get_ping_status(vm_1, vm_5, expected="PASS", timeout=30) results.add_to_summary(0, "=") logger.info("\n%s" % results.summary) return results.compile_summary(TESTCASE_CONFIG.success_criteria)
def main(): results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() image_id = os_utils.create_glance_image(glance_client, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public='public') network_1_id, _, router_1_id = test_utils.create_network( neutron_client, TESTCASE_CONFIG.net_1_name, TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr, TESTCASE_CONFIG.router_1_name) network_2_id = test_utils.create_net(neutron_client, TESTCASE_CONFIG.net_2_name) test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_2_name, TESTCASE_CONFIG.subnet_2_cidr, network_2_id) sg_id = os_utils.create_security_group_full(neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) test_utils.open_icmp_ssh(neutron_client, sg_id) vm_2 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_2_name, image_id, network_2_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name) vm_2_ip = test_utils.get_instance_ip(vm_2) u1 = test_utils.generate_ping_userdata([vm_2_ip]) vm_1 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_1_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, userdata=u1) results.record_action("Create VPN with eRT==iRT") vpn_name = "sdnvpn-7" kwargs = { "import_targets": TESTCASE_CONFIG.targets, "export_targets": TESTCASE_CONFIG.targets, "route_distinguishers": TESTCASE_CONFIG.route_distinguishers, "name": vpn_name } bgpvpn = os_utils.create_bgpvpn(neutron_client, **kwargs) bgpvpn_id = bgpvpn['bgpvpn']['id'] logger.debug("VPN created details: %s" % bgpvpn) msg = ("Associate router '%s' and net '%s' to the VPN." % (TESTCASE_CONFIG.router_1_name, TESTCASE_CONFIG.net_2_name)) results.record_action(msg) results.add_to_summary(0, "-") os_utils.create_router_association(neutron_client, bgpvpn_id, router_1_id) os_utils.create_network_association(neutron_client, bgpvpn_id, network_2_id) test_utils.wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_1_id) test_utils.wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, network_2_id) instances_up = test_utils.wait_for_instances_up(vm_1, vm_2) if not instances_up: logger.error("One or more instances is down") logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() results.get_ping_status(vm_1, vm_2, expected="PASS", timeout=200) results.add_to_summary(0, "=") msg = "Assign a Floating IP to %s" % vm_1.name results.record_action(msg) fip = os_utils.create_floating_ip(neutron_client) fip_added = os_utils.add_floating_ip(nova_client, vm_1.id, fip['fip_addr']) if fip_added: results.add_success(msg) else: results.add_failure(msg) results.record_action("Ping %s via Floating IP" % vm_1.name) results.add_to_summary(0, "-") results.ping_ip_test(fip['fip_addr']) return results.compile_summary()
def main(): deploymentHandler = DeploymentFactory.get_handler( INSTALLER["type"], INSTALLER["ip"], INSTALLER["user"], installer_pwd=INSTALLER["password"]) cluster = INSTALLER["cluster"] openstack_nodes = (deploymentHandler.get_nodes({'cluster': cluster}) if cluster is not None else deploymentHandler.get_nodes()) compute_nodes = [node for node in openstack_nodes if node.is_compute()] odl_ip, odl_port = test_utils.get_odl_ip_port(openstack_nodes) neutron_client = os_utils.get_neutron_client() nova_client = os_utils.get_nova_client() tacker_client = os_tacker.get_tacker_client() ovs_logger = ovs_log.OVSLogger( os.path.join(DEMO_DIR, 'ovs-logs'), RESULTS_DIR) sg_id = os_utils.get_security_group_id(neutron_client, SECGROUP_NAME) vnfs = ['testVNF1', 'testVNF2'] topo_seed = 0 testTopology = topo_shuffler.topology(vnfs, seed=topo_seed) logger.info('This test is run with the topology {0}' .format(testTopology['id'])) logger.info('Topology description: {0}' .format(testTopology['description'])) tosca_red = os.path.join(DEMO_DIR, VNFD_DIR, TEST_VNFD) os_tacker.create_vnfd(tacker_client, tosca_file=tosca_red) default_param_file = os.path.join( DEMO_DIR, VNFD_DIR, VNFD_DEFAULT_PARAMS_FILE) test_utils.create_vnf_in_av_zone( tacker_client, vnfs[0], 'test-vnfd1', default_param_file, testTopology[vnfs[0]]) vnf_id = os_tacker.wait_for_vnf(tacker_client, vnf_name='testVNF1') if vnf_id is None: logger.error('ERROR while booting vnf') sys.exit(1) vnf_instance_id = test_utils.get_nova_id(tacker_client, 'vdu1', vnf_id) instances = os_utils.get_instances(nova_client) for instance in instances: if ('client' not in instance.name) and ('server' not in instance.name): os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id) os_tacker.create_sfc(tacker_client, 'red', chain_vnf_names=['testVNF1'], symmetrical=True) os_tacker.create_sfc_classifier( tacker_client, 'red_http', sfc_name='red', match={ 'source_port': 0, 'dest_port': 80, 'protocol': 6 }) os_tacker.create_sfc_classifier( tacker_client, 'red_http_reverse', sfc_name='red', match={ 'source_port': 80, 'dest_port': 0, 'protocol': 6 }) logger.info(test_utils.run_cmd('tacker sfc-list')[1]) logger.info(test_utils.run_cmd('tacker sfc-classifier-list')[1]) sf_floating_ip = test_utils.assign_floating_ip( nova_client, neutron_client, vnf_instance_id) for ip in [sf_floating_ip]: logger.info("Checking connectivity towards floating IP [%s]" % ip) if not test_utils.ping(ip, retries=50, retry_timeout=1): logger.error("Cannot ping floating IP [%s]" % ip) sys.exit(1) logger.info("Successful ping to floating IP [%s]" % ip) if not test_utils.check_ssh([sf_floating_ip]): logger.error("Cannot establish SSH connection to the SFs") sys.exit(1) logger.info("Firewall started, blocking traffic port 80") test_utils.vxlan_firewall(sf_floating_ip, port=80) cmd = "python vxlan_tool.py --metadata -i eth0 -d forward -v off -b 80" cmd = "sh -c 'cd /root;nohup " + cmd + " > /dev/null 2>&1 &'" test_utils.run_cmd_remote(sf_floating_ip, cmd) time.sleep(7) logger.info("Wait for ODL to update the classification rules in OVS") time.sleep(10) rsps = test_utils.get_odl_resource_list( odl_ip, odl_port, 'rendered-service-path', datastore='operational') reverse_path_id = next( rsp['path-id'] for rsp in rsps['rendered-service-paths']['rendered-service-path'] if rsp['name'].endswith('Reverse')) hex_path_id = hex(int(reverse_path_id)) reverse_path_action = "load:{0}->NXM_NX_NSH_C3[]".format(hex_path_id) for compute_node in compute_nodes: compute_ssh = compute_node.ssh_client match_rsp = re.compile( r'.+tp_dst=80.+load:(0x[0-9a-f]+)->NXM_NX_NSP\[0\.\.23\].+') # First line is OFPST_FLOW reply (OF1.3) (xid=0x2): # This is not a flow so ignore flows = (ovs_logger.ofctl_dump_flows(compute_ssh, 'br-int', '11') .strip().split('\n')[1:]) matching_flows = [match_rsp.match(f) for f in flows] if all(m is None for m in matching_flows): break uplink_flow = [f.group(0) for f in matching_flows if f is not None][0] actions = uplink_flow.split("actions=")[1] actions_c3 = "{0},{1}".format(reverse_path_action, actions) cmd = "ovs-ofctl -OOpenflow13 mod-flows br-int \"table=11,tcp,reg0=0x1,tp_dst=80,actions={0}\"".format(actions_c3) #compute_node.run_cmd(cmd) logger.info("Running: {0}".format(cmd)) match_port = re.compile( r'.+table=158.+output:([0-9]+)') flows = (ovs_logger.ofctl_dump_flows(compute_ssh, 'br-int', '158').strip().split('\n')[1:]) matching_flows = [match_port.match(f) for f in flows] sf_port = [f.group(1) for f in matching_flows if f is not None][0] cmd = "ovs-ofctl -O Openflow13 add-flow br-int \"table=11,nsi=254,nsp={0} actions=load:0x1->NXM_NX_REG0[],move:NXM_NX_NSH_C2[]->NXM_NX_TUN_ID[0..31],resubmit({1},1)\"".format(reverse_path_id, sf_port) #compute_node.run_cmd(cmd) logger.info("Running: {0}".format(cmd)) cmd = "ovs-ofctl -O Openflow13 add-flow br-int \"table=1, priority=40000,nsi=254,nsp={0},reg0=0x1,in_port={1} actions=pop_nsh,goto_table:21\"".format(reverse_path_id, sf_port) logger.info("Running: {0}".format(cmd)) logger.info("HTTP traffic from client to server should be blocked") logger.info("When trying to send HTTP traffic to server it should respond with TCP RESET")
def main(): results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() logger.debug("Using private key %s injected to the VMs." % COMMON_CONFIG.keyfile_path) keyfile = open(COMMON_CONFIG.keyfile_path, 'r') key = keyfile.read() keyfile.close() files = {"/home/cirros/id_rsa": key} image_id = os_utils.create_glance_image(glance_client, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public='public') network_1_id = test_utils.create_net(neutron_client, TESTCASE_CONFIG.net_1_name) test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_1a_name, TESTCASE_CONFIG.subnet_1a_cidr, network_1_id) test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_1b_name, TESTCASE_CONFIG.subnet_1b_cidr, network_1_id) network_2_id = test_utils.create_net(neutron_client, TESTCASE_CONFIG.net_2_name) test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_2a_name, TESTCASE_CONFIG.subnet_2a_cidr, network_2_id) test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_2b_name, TESTCASE_CONFIG.subnet_2b_cidr, network_2_id) sg_id = os_utils.create_security_group_full(neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client) av_zone_1 = "nova:" + compute_nodes[0] av_zone_2 = "nova:" + compute_nodes[1] # boot INTANCES userdata_common = test_utils.generate_userdata_common() vm_2 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_2_name, image_id, network_1_id, sg_id, fixed_ip=TESTCASE_CONFIG.instance_2_ip, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=userdata_common) vm_3 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_3_name, image_id, network_1_id, sg_id, fixed_ip=TESTCASE_CONFIG.instance_3_ip, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_2, userdata=userdata_common) vm_5 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_5_name, image_id, network_2_id, sg_id, fixed_ip=TESTCASE_CONFIG.instance_5_ip, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_2, userdata=userdata_common) # We boot vm5 first because we need vm5_ip for vm4 userdata u4 = test_utils.generate_userdata_with_ssh([ TESTCASE_CONFIG.instance_1_ip, TESTCASE_CONFIG.instance_3_ip, TESTCASE_CONFIG.instance_5_ip ]) vm_4 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_4_name, image_id, network_2_id, sg_id, fixed_ip=TESTCASE_CONFIG.instance_4_ip, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u4, files=files) # We boot VM1 at the end because we need to get the IPs first to generate # the userdata u1 = test_utils.generate_userdata_with_ssh([ TESTCASE_CONFIG.instance_2_ip, TESTCASE_CONFIG.instance_3_ip, TESTCASE_CONFIG.instance_4_ip, TESTCASE_CONFIG.instance_5_ip ]) vm_1 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_1_name, image_id, network_1_id, sg_id, fixed_ip=TESTCASE_CONFIG.instance_1_ip, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u1, files=files) msg = ("Create VPN1 with eRT=iRT") results.record_action(msg) vpn1_name = "sdnvpn-1-" + str(randint(100000, 999999)) kwargs = { "import_targets": TESTCASE_CONFIG.targets2, "export_targets": TESTCASE_CONFIG.targets2, "route_targets": TESTCASE_CONFIG.targets2, "route_distinguishers": TESTCASE_CONFIG.route_distinguishers1, "name": vpn1_name } bgpvpn1 = os_utils.create_bgpvpn(neutron_client, **kwargs) bgpvpn1_id = bgpvpn1['bgpvpn']['id'] logger.debug("VPN1 created details: %s" % bgpvpn1) msg = ("Associate network '%s' to the VPN." % TESTCASE_CONFIG.net_1_name) results.record_action(msg) results.add_to_summary(0, "-") os_utils.create_network_association(neutron_client, bgpvpn1_id, network_1_id) # Wait for VMs to get ips. instances_up = test_utils.wait_for_instances_up(vm_1, vm_2, vm_3, vm_4, vm_5) if not instances_up: logger.error("One or more instances is down") sys.exit(-1) logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() # 10.10.10.12 should return sdnvpn-2 to sdnvpn-1 results.check_ssh_output(vm_1, vm_2, expected=TESTCASE_CONFIG.instance_2_name, timeout=200) # 10.10.11.13 should return sdnvpn-3 to sdnvpn-1 results.check_ssh_output(vm_1, vm_3, expected=TESTCASE_CONFIG.instance_3_name, timeout=30) results.add_to_summary(0, "-") msg = ("Create VPN2 with eRT=iRT") results.record_action(msg) vpn2_name = "sdnvpn-2-" + str(randint(100000, 999999)) kwargs = { "import_targets": TESTCASE_CONFIG.targets1, "export_targets": TESTCASE_CONFIG.targets1, "route_targets": TESTCASE_CONFIG.targets1, "route_distinguishers": TESTCASE_CONFIG.route_distinguishers2, "name": vpn2_name } bgpvpn2 = os_utils.create_bgpvpn(neutron_client, **kwargs) bgpvpn2_id = bgpvpn2['bgpvpn']['id'] logger.debug("VPN created details: %s" % bgpvpn2) msg = ("Associate network '%s' to the VPN2." % TESTCASE_CONFIG.net_2_name) results.record_action(msg) results.add_to_summary(0, "-") os_utils.create_network_association(neutron_client, bgpvpn2_id, network_2_id) test_utils.wait_for_bgp_net_assoc(neutron_client, bgpvpn1_id, network_1_id) test_utils.wait_for_bgp_net_assoc(neutron_client, bgpvpn2_id, network_2_id) logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() # 10.10.11.13 should return sdnvpn-5 to sdnvpn-4 results.check_ssh_output(vm_4, vm_5, expected=TESTCASE_CONFIG.instance_5_name, timeout=30) # 10.10.10.11 should return "not reachable" to sdnvpn-4 results.check_ssh_output(vm_4, vm_1, expected="not reachable", timeout=30) return results.compile_summary()
def main(): GlobalVariables.nova_client = os_utils.get_nova_client() GlobalVariables.neutron_client = os_utils.get_neutron_client() cinder_client = os_utils.get_cinder_client() start_time = time.time() # configure script if not (args.test_name in tests): logger.error('argument not valid') exit(-1) GlobalVariables.SUMMARY = [] volume_types = os_utils.list_volume_types(cinder_client, private=False) if not volume_types: volume_type = os_utils.create_volume_type(cinder_client, CINDER_VOLUME_TYPE_NAME) if not volume_type: logger.error("Failed to create volume type...") exit(-1) else: logger.debug("Volume type '%s' created succesfully..." % CINDER_VOLUME_TYPE_NAME) else: logger.debug("Using existing volume type(s)...") image_exists, image_id = os_utils.get_or_create_image( GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH, GLANCE_IMAGE_FORMAT) if not image_id: exit(-1) logger.debug("Creating network '%s'..." % RALLY_PRIVATE_NET_NAME) GlobalVariables.network_dict = \ os_utils.create_shared_network_full(RALLY_PRIVATE_NET_NAME, RALLY_PRIVATE_SUBNET_NAME, RALLY_ROUTER_NAME, RALLY_PRIVATE_SUBNET_CIDR) if not GlobalVariables.network_dict: exit(1) if args.test_name == "all": for test_name in tests: if not (test_name == 'all' or test_name == 'vm'): run_task(test_name) else: logger.debug("Test name: " + args.test_name) run_task(args.test_name) report = ("\n" " " "\n" " Rally Summary Report\n" "\n" "+===================+============+===============+===========+" "\n" "| Module | Duration | nb. Test Run | Success |" "\n" "+===================+============+===============+===========+" "\n") payload = [] stop_time = time.time() # for each scenario we draw a row for the table total_duration = 0.0 total_nb_tests = 0 total_success = 0.0 for s in GlobalVariables.SUMMARY: name = "{0:<17}".format(s['test_name']) duration = float(s['overall_duration']) total_duration += duration duration = time.strftime("%M:%S", time.gmtime(duration)) duration = "{0:<10}".format(duration) nb_tests = "{0:<13}".format(s['nb_tests']) total_nb_tests += int(s['nb_tests']) success = "{0:<10}".format(str(s['success']) + '%') total_success += float(s['success']) report += ("" + "| " + name + " | " + duration + " | " + nb_tests + " | " + success + "|\n" + "+-------------------+------------" "+---------------+-----------+\n") payload.append({ 'module': name, 'details': { 'duration': s['overall_duration'], 'nb tests': s['nb_tests'], 'success': s['success'] } }) total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration)) total_duration_str2 = "{0:<10}".format(total_duration_str) total_nb_tests_str = "{0:<13}".format(total_nb_tests) if len(GlobalVariables.SUMMARY): success_rate = total_success / len(GlobalVariables.SUMMARY) else: success_rate = 100 success_rate = "{:0.2f}".format(success_rate) success_rate_str = "{0:<10}".format(str(success_rate) + '%') report += "+===================+============+===============+===========+" report += "\n" report += ("| TOTAL: | " + total_duration_str2 + " | " + total_nb_tests_str + " | " + success_rate_str + "|\n") report += "+===================+============+===============+===========+" report += "\n" logger.info("\n" + report) payload.append({ 'summary': { 'duration': total_duration, 'nb tests': total_nb_tests, 'nb success': success_rate } }) if args.sanity: case_name = "rally_sanity" else: case_name = "rally_full" # Evaluation of the success criteria status = ft_utils.check_success_rate(case_name, success_rate) exit_code = -1 if status == "PASS": exit_code = 0 if args.report: logger.debug("Pushing Rally summary into DB...") ft_utils.push_results_to_db("functest", case_name, start_time, stop_time, status, payload) if args.noclean: exit(exit_code) if not image_exists: logger.debug("Deleting image '%s' with ID '%s'..." % (GLANCE_IMAGE_NAME, image_id)) if not os_utils.delete_glance_image(GlobalVariables.nova_client, image_id): logger.error("Error deleting the glance image") if not volume_types: logger.debug("Deleting volume type '%s'..." % CINDER_VOLUME_TYPE_NAME) if not os_utils.delete_volume_type(cinder_client, volume_type): logger.error("Error in deleting volume type...") exit(exit_code)
def init_performance_testToplogy(self, tplgy, performance_test_config): tplgy.delete_config() vnf_list = performance_test_config["vnf_list"] target_vnf = self.util.get_vnf_info(vnf_list, "target_vnf") tester_vm = self.util.get_vnf_info(vnf_list, "tester_vm") target_vnf_image_name = "" if "image_name" in target_vnf: target_vnf_image_name = target_vnf["image_name"] target_vnf_flavor_name = "" if "flavor_name" in target_vnf: target_vnf_flavor_name = target_vnf["flavor_name"] self.logger.debug("target_vnf image name : " + target_vnf_image_name) self.logger.debug("target_vnf flavor name : " + target_vnf_flavor_name) tester_vm_image_name = "" if "image_name" in tester_vm: tester_vm_image_name = tester_vm["image_name"] tester_vm_flavor_name = "" if "flavor_name" in tester_vm: tester_vm_flavor_name = tester_vm["flavor_name"] self.logger.debug("tester vm image name : " + tester_vm_image_name) self.logger.debug("tester vm flavor name : " + tester_vm_flavor_name) nova = os_utils.get_nova_client() # Setting the flavor id for target vnf. target_vnf_flavor_id = os_utils.get_flavor_id(nova, target_vnf_flavor_name) if target_vnf_flavor_id == '': for default in self.PERFORMANCE_TEST_TPLGY_DEFAULT: if default == 'ram_min': target_vnf_flavor_id = os_utils.get_flavor_id_by_ram_range( nova, self.PERFORMANCE_TEST_TPLGY_DEFAULT['ram_min'], VNF_MAX_RAM_SIZE) if target_vnf_flavor_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find flavor for target vnf") tplgy.set_target_vnf_flavor_id(target_vnf_flavor_id) # Setting the flavor id for tester vm. tester_vm_flavor_id = os_utils.get_flavor_id(nova, tester_vm_flavor_name) if tester_vm_flavor_id == '': for default in self.PERFORMANCE_TEST_TPLGY_DEFAULT: if default == 'ram_min': tester_vm_flavor_id = os_utils.get_flavor_id_by_ram_range( nova, self.PERFORMANCE_TEST_TPLGY_DEFAULT['ram_min'], VNF_MAX_RAM_SIZE) if tester_vm_flavor_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find flavor for tester vm") tplgy.set_send_tester_vm_flavor_id(tester_vm_flavor_id) tplgy.set_receive_tester_vm_flavor_id(tester_vm_flavor_id) # Setting the image id for target vnf. target_vnf_image_id = os_utils.get_image_id(self.glance, target_vnf_image_name) if target_vnf_image_id == '': for default in self.PERFORMANCE_TEST_TPLGY_DEFAULT: if default == 'vnf_os_image': target_vnf_image_id = os_utils.get_image_id( self.glance, self.PERFORMANCE_TEST_TPLGY_DEFAULT['vnf_os_image']) if target_vnf_image_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find required OS image for target vnf") tplgy.set_target_vnf_image_id(target_vnf_image_id) # Setting the image id for target vnf. tester_vm_image_id = os_utils.get_image_id(self.glance, tester_vm_image_name) if tester_vm_image_id == '': for default in self.PERFORMANCE_TEST_TPLGY_DEFAULT: if default == 'tester_os_image': tester_vm_image_id = os_utils.get_image_id( self.glance, self.PERFORMANCE_TEST_TPLGY_DEFAULT['tester_os_image']) if tester_vm_image_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find required OS image for tester vm") tplgy.set_send_tester_vm_image_id(tester_vm_image_id) tplgy.set_receive_tester_vm_image_id(tester_vm_image_id) tplgy.set_region(REGION_NAME) ext_net = os_utils.get_external_net(self.neutron) if not ext_net: return self.step_failure("making_testTopology", "Failed to get external network") tplgy.set_external_network_name(ext_net) tplgy.set_credentials(username=self.ks_cresds['username'], password=self.ks_cresds['password'], tenant_name=self.ks_cresds['tenant_name'], auth_url=os_utils.get_endpoint('identity')) return self.set_resultdata(self.testcase_start_time, "", "", self.results)
def init_function_testToplogy(self, tplgy, function_test_config): tplgy.delete_config() self.logger.info("Collect flavor id for all topology vnf") vnf_list = function_test_config["vnf_list"] target_vnf = self.util.get_vnf_info(vnf_list, "target_vnf") reference_vnf = self.util.get_vnf_info(vnf_list, "reference_vnf") target_vnf_image_name = "" if "image_name" in target_vnf: target_vnf_image_name = target_vnf["image_name"] target_vnf_flavor_name = "" if "flavor_name" in target_vnf: target_vnf_flavor_name = target_vnf["flavor_name"] self.logger.debug("target_vnf image name : " + target_vnf_image_name) self.logger.debug("target_vnf flavor name : " + target_vnf_flavor_name) reference_vnf_image_name = "" if "image_name" in reference_vnf: reference_vnf_image_name = reference_vnf["image_name"] reference_vnf_flavor_name = "" if "flavor_name" in reference_vnf: reference_vnf_flavor_name = reference_vnf["flavor_name"] self.logger.debug("reference_vnf image name : " + reference_vnf_image_name) self.logger.debug("reference_vnf flavor name : " + reference_vnf_flavor_name) nova = os_utils.get_nova_client() # Setting the flavor id for target vnf. target_vnf_flavor_id = os_utils.get_flavor_id(nova, target_vnf_flavor_name) if target_vnf_flavor_id == '': for default in self.FUNCTION_TEST_TPLGY_DEFAULT: if default == 'ram_min': target_vnf_flavor_id = os_utils.get_flavor_id_by_ram_range( nova, self.FUNCTION_TEST_TPLGY_DEFAULT['ram_min'], VNF_MAX_RAM_SIZE) self.logger.info("target_vnf_flavor_id id search set") if target_vnf_flavor_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find flavor for target vnf") tplgy.set_target_vnf_flavor_id(target_vnf_flavor_id) # Setting the flavor id for reference vnf. reference_vnf_flavor_id = os_utils.get_flavor_id( nova, reference_vnf_flavor_name) if reference_vnf_flavor_id == '': for default in self.FUNCTION_TEST_TPLGY_DEFAULT: if default == 'ram_min': reference_vnf_flavor_id = \ os_utils.get_flavor_id_by_ram_range( nova, self.FUNCTION_TEST_TPLGY_DEFAULT['ram_min'], VNF_MAX_RAM_SIZE) self.logger.info("reference_vnf_flavor_id id search set") if reference_vnf_flavor_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find flavor for tester vm") tplgy.set_reference_vnf_flavor_id(reference_vnf_flavor_id) # Setting the image id for target vnf. target_vnf_image_id = os_utils.get_image_id(self.glance, target_vnf_image_name) if target_vnf_image_id == '': for default in self.FUNCTION_TEST_TPLGY_DEFAULT: if default == 'os_image': target_vnf_image_id = os_utils.get_image_id( self.glance, self.FUNCTION_TEST_TPLGY_DEFAULT['os_image']) if target_vnf_image_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find required OS image for target vnf") tplgy.set_target_vnf_image_id(target_vnf_image_id) # Setting the image id for reference vnf. reference_vnf_image_id = os_utils.get_image_id( self.glance, reference_vnf_image_name) if reference_vnf_image_id == '': for default in self.FUNCTION_TEST_TPLGY_DEFAULT: if default == 'os_image': reference_vnf_image_id = os_utils.get_image_id( self.glance, self.FUNCTION_TEST_TPLGY_DEFAULT['os_image']) if reference_vnf_image_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find required OS image for reference vnf.") tplgy.set_reference_vnf_image_id(reference_vnf_image_id) tplgy.set_region(REGION_NAME) ext_net = os_utils.get_external_net(self.neutron) if not ext_net: return self.step_failure("making_testTopology", "Failed to get external network") tplgy.set_external_network_name(ext_net) tplgy.set_credentials(username=self.ks_cresds['username'], password=self.ks_cresds['password'], tenant_name=self.ks_cresds['tenant_name'], auth_url=os_utils.get_endpoint('identity')) return self.set_resultdata(self.testcase_start_time, "", "", self.results)
def deploy_cloudify(self, cfy): username = self.ks_cresds['username'] password = self.ks_cresds['password'] tenant_name = self.ks_cresds['tenant_name'] auth_url = os_utils.get_endpoint('identity') self.logger.debug("auth_url = %s" % auth_url) cfy.set_credentials(username, password, tenant_name, auth_url) self.logger.info("Collect flavor id for cloudify manager server") nova = os_utils.get_nova_client() flavor_name = "m1.large" flavor_id = os_utils.get_flavor_id(nova, flavor_name) for requirement in CFY_MANAGER_REQUIERMENTS: if requirement == 'ram_min': flavor_id = os_utils.get_flavor_id_by_ram_range( nova, CFY_MANAGER_REQUIERMENTS['ram_min'], CFY_MANAGER_MAX_RAM_SIZE) if flavor_id == '': self.logger.error("Failed to find %s flavor. " "Try with ram range default requirement !" % flavor_name) flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196) if flavor_id == '': return self.step_failure( "making_orchestrator", "Failed to find required flavor for this deployment") cfy.set_flavor_id(flavor_id) image_name = "centos_7" image_id = os_utils.get_image_id(self.glance, image_name) for requirement in CFY_MANAGER_REQUIERMENTS: if requirement == 'os_image': image_id = os_utils.get_image_id( self.glance, CFY_MANAGER_REQUIERMENTS['os_image']) if image_id == '': return self.step_failure( "making_orchestrator", "Error : Failed to find required OS image for cloudify manager" ) cfy.set_image_id(image_id) ext_net = os_utils.get_external_net(self.neutron) if not ext_net: return self.step_failure("making_orchestrator", "Failed to get external network") cfy.set_external_network_name(ext_net) ns = functest_utils.get_resolvconf_ns() if ns: cfy.set_nameservers(ns) self.logger.info("Prepare virtualenv for cloudify-cli") cmd = "chmod +x " + VNF_DIR + "create_venv.sh" functest_utils.execute_command(cmd, self.logger) time.sleep(3) cmd = VNF_DIR + "create_venv.sh " + self.util.VNF_DATA_DIR functest_utils.execute_command(cmd, self.logger) cfy.download_manager_blueprint(CFY_MANAGER_BLUEPRINT['url'], CFY_MANAGER_BLUEPRINT['branch']) # ############### CLOUDIFY DEPLOYMENT ################ start_time_ts = time.time() self.logger.info("Cloudify deployment Start Time:'%s'" % (datetime.datetime.fromtimestamp( start_time_ts).strftime('%Y-%m-%d %H:%M:%S'))) error = cfy.deploy_manager() if error: return self.step_failure("making_orchestrator", error) end_time_ts = time.time() duration = round(end_time_ts - start_time_ts, 1) self.logger.info("Cloudify deployment duration:'%s'" % duration) self.set_result("making_orchestrator", duration, "OK") return self.set_resultdata(self.testcase_start_time, "", "", self.results)
def deploy_orchestrator(self): self.logger.info("Additional pre-configuration steps") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() # Import images if needed # needs some images self.logger.info("Upload some OS images if it doesn't exist") temp_dir = os.path.join(self.data_dir, "tmp/") for image_name, image_url in self.images.iteritems(): self.logger.info("image: %s, url: %s", image_name, image_url) try: image_id = os_utils.get_image_id(glance_client, image_name) self.logger.info("image_id: %s", image_id) except BaseException: self.logger.error("Unexpected error: %s", sys.exc_info()[0]) if image_id == '': self.logger.info("""%s image doesn't exist on glance repository. Try downloading this image and upload on glance !""" % image_name) image_id = os_utils.download_and_add_image_on_glance( glance_client, image_name, image_url, temp_dir) if image_id == '': self.logger.error("Failed to find or upload required OS " "image for this deployment") return False network_dic = os_utils.create_network_full(neutron_client, "openbaton_mgmt", "openbaton_mgmt_subnet", "openbaton_router", "192.168.100.0/24") # orchestrator VM flavor self.logger.info( "Check if orchestra Flavor is available, if not create one") flavor_exist, flavor_id = os_utils.get_or_create_flavor("orchestra", "4096", '20', '2', public=True) self.logger.debug("Flavor id: %s" % flavor_id) if not network_dic: self.logger.error("There has been a problem when creating the " "neutron network") network_id = network_dic["net_id"] self.logger.info("Creating floating IP for VM in advance...") floatip_dic = os_utils.create_floating_ip(neutron_client) floatip = floatip_dic['fip_addr'] if floatip is None: self.logger.error("Cannot create floating IP.") return False userdata = "#!/bin/bash\n" userdata += "echo \"Executing userdata...\"\n" userdata += "set -x\n" userdata += "set -e\n" userdata += "echo \"Set nameserver to '8.8.8.8'...\"\n" userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n" userdata += "echo \"Install curl...\"\n" userdata += "apt-get install curl\n" userdata += "echo \"Inject public key...\"\n" userdata += ("echo \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuPXrV3" "geeHc6QUdyUr/1Z+yQiqLcOskiEGBiXr4z76MK4abiFmDZ18OMQlc" "fl0p3kS0WynVgyaOHwZkgy/DIoIplONVr2CKBKHtPK+Qcme2PVnCtv" "EqItl/FcD+1h5XSQGoa+A1TSGgCod/DPo+pes0piLVXP8Ph6QS1k7S" "ic7JDeRQ4oT1bXYpJ2eWBDMfxIWKZqcZRiGPgMIbJ1iEkxbpeaAd9O" "4MiM9nGCPESmed+p54uYFjwEDlAJZShcAZziiZYAvMZhvAhe6USljc" "7YAdalAnyD/jwCHuwIrUw/lxo7UdNCmaUxeobEYyyFA1YVXzpNFZya" "XPGAAYIJwEq/ openbaton@opnfv\" >> /home/ubuntu/.ssh/aut" "horized_keys\n") userdata += "echo \"Download bootstrap...\"\n" userdata += ("curl -s %s " "> ./bootstrap\n" % self.bootstrap_link) userdata += ("curl -s %s" "> ./config_file\n" % self.bootstrap_config_link) userdata += ("echo \"Disable usage of mysql...\"\n") userdata += "sed -i s/mysql=.*/mysql=no/g /config_file\n" userdata += ("echo \"Setting 'rabbitmq_broker_ip' to '%s'\"\n" % floatip) userdata += ("sed -i s/rabbitmq_broker_ip=localhost/rabbitmq_broker_ip" "=%s/g /config_file\n" % floatip) userdata += "echo \"Set autostart of components to 'false'\"\n" userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n" userdata += "echo \"Execute bootstrap...\"\n" bootstrap = "sh ./bootstrap release -configFile=./config_file" userdata += bootstrap + "\n" userdata += "echo \"Setting 'nfvo.plugin.timeout' to '300000'\"\n" userdata += ("echo \"nfvo.plugin.timeout=600000\" >> " "/etc/openbaton/openbaton-nfvo.properties\n") userdata += ( "wget %s -O /etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" % self.userdata_file) userdata += "sed -i '113i\ \ \ \ sleep 60' " \ "/etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" userdata += "echo \"Starting NFVO\"\n" userdata += "service openbaton-nfvo restart\n" userdata += "echo \"Starting Generic VNFM\"\n" userdata += "service openbaton-vnfm-generic restart\n" userdata += "echo \"...end of userdata...\"\n" sg_id = os_utils.create_security_group_full(neutron_client, "orchestra-sec-group", "allowall") os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress", "icmp", 0, 255) os_utils.create_secgroup_rule(neutron_client, sg_id, "egress", "icmp", 0, 255) os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress", "tcp", 1, 65535) os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress", "udp", 1, 65535) os_utils.create_secgroup_rule(neutron_client, sg_id, "egress", "tcp", 1, 65535) os_utils.create_secgroup_rule(neutron_client, sg_id, "egress", "udp", 1, 65535) self.logger.info("Security group set") self.logger.info("Create instance....") self.logger.info( "flavor: m1.medium\n" "image: %s\n" "network_id: %s\n" "userdata: %s\n", self.imagename, network_id, userdata) instance = os_utils.create_instance_and_wait_for_active( "orchestra", os_utils.get_image_id(glance_client, self.imagename), network_id, "orchestra-openbaton", config_drive=False, userdata=userdata) self.ob_instance_id = instance.id self.logger.info("Adding sec group to orchestra instance") os_utils.add_secgroup_to_instance(nova_client, self.ob_instance_id, sg_id) self.logger.info("Associating floating ip: '%s' to VM '%s' ", floatip, "orchestra-openbaton") if not os_utils.add_floating_ip(nova_client, instance.id, floatip): self.logger.error("Cannot associate floating IP to VM.") return False self.logger.info("Waiting for Open Baton NFVO to be up and running...") x = 0 while x < 200: if servertest(floatip, "8080"): break else: self.logger.debug("Open Baton NFVO is not started yet (%ss)" % (x * 5)) time.sleep(5) x += 1 if x == 200: self.logger.error("Open Baton is not started correctly") self.ob_ip = floatip self.ob_password = "******" self.ob_username = "******" self.ob_https = False self.ob_port = "8080" self.logger.info("Waiting for all components up and running...") time.sleep(60) self.details["orchestrator"] = { 'status': "PASS", 'result': "Deploy Open Baton NFVO: OK" } self.logger.info("Deploy Open Baton NFVO: OK") return True
def main(): nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() # Download the image if not os.path.isfile(IMAGE_PATH): logger.info("Downloading image") ft_utils.download_url( "http://artifacts.opnfv.org/sfc/demo/sf_summit2016.qcow2", "/home/opnfv/functest/data/") else: logger.info("Using old image") # Allow any port so that tacker commands reaches the server. # CHECK IF THIS STILL MAKES SENSE WHEN TACKER IS INCLUDED IN OPNFV INSTALATION controller_command = ("sshpass -p r00tme ssh [email protected]" " 'fuel node'|grep controller|awk '{print $10}'") logger.info("Executing tacker script: '%s'" % controller_command) process = subprocess.Popen(controller_command, shell=True, stdout=subprocess.PIPE) ip = process.stdout.readline() iptable_command1 = ("sshpass -p r00tme ssh [email protected] ssh" " " + ip + " iptables -P INPUT ACCEPT ") iptable_command2 = ("sshpass -p r00tme ssh [email protected] ssh" " " + ip + "iptables -t nat -P INPUT ACCEPT ") subprocess.call(iptable_command1, shell=True) subprocess.call(iptable_command2, shell=True) # Create glance image and the neutron network image_id = os_utils.create_glance_image(glance_client, IMAGE_NAME, IMAGE_PATH, disk=IMAGE_FORMAT, container="bare", public=True, logger=logger) network_dic = os_utils.create_network_full(logger, neutron_client, NET_NAME, SUBNET_NAME, ROUTER_NAME, SUBNET_CIDR) if not network_dic: logger.error( "There has been a problem when creating the neutron network") sys.exit(-1) network_id = network_dic["net_id"] sg_id = os_utils.create_security_group_full(logger, neutron_client, SECGROUP_NAME, SECGROUP_DESCR) # boot INTANCE logger.info("Creating instance '%s'..." % INSTANCE_NAME) logger.debug( "Configuration:\n name=%s \n flavor=%s \n image=%s \n " "network=%s \n" % (INSTANCE_NAME, FLAVOR, image_id, network_id)) instance = os_utils.create_instance_and_wait_for_active(FLAVOR, image_id, network_id, INSTANCE_NAME) if instance is None: logger.error("Error while booting instance.") sys.exit(-1) # Retrieve IP of INSTANCE instance_ip = instance.networks.get(NET_NAME)[0] logger.debug("Instance '%s' got private ip '%s'." % (INSTANCE_NAME, instance_ip)) logger.info("Adding '%s' to security group '%s'..." % (INSTANCE_NAME, SECGROUP_NAME)) os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id) logger.info("Creating floating IP for VM '%s'..." % INSTANCE_NAME) floatip_dic = os_utils.create_floating_ip(neutron_client) floatip_client = floatip_dic['fip_addr'] # floatip_id = floatip_dic['fip_id'] if floatip_client is None: logger.error("Cannot create floating IP.") sys.exit(-1) logger.info("Floating IP created: '%s'" % floatip_client) logger.info("Associating floating ip: '%s' to VM '%s' " % (floatip_client, INSTANCE_NAME)) if not os_utils.add_floating_ip(nova_client, instance.id, floatip_client): logger.error("Cannot associate floating IP to VM.") sys.exit(-1) # STARTING SECOND VM (server) ### # boot INTANCE logger.info("Creating instance '%s'..." % INSTANCE_NAME) logger.debug( "Configuration:\n name=%s \n flavor=%s \n image=%s \n " "network=%s \n" % (INSTANCE_NAME, FLAVOR, image_id, network_id)) instance_2 = os_utils.create_instance_and_wait_for_active(FLAVOR, image_id, network_id, INSTANCE_NAME_2) if instance_2 is None: logger.error("Error while booting instance.") sys.exit(-1) # Retrieve IP of INSTANCE instance_ip_2 = instance_2.networks.get(NET_NAME)[0] logger.debug("Instance '%s' got private ip '%s'." % (INSTANCE_NAME_2, instance_ip_2)) logger.info("Adding '%s' to security group '%s'..." % (INSTANCE_NAME_2, SECGROUP_NAME)) os_utils.add_secgroup_to_instance(nova_client, instance_2.id, sg_id) logger.info("Creating floating IP for VM '%s'..." % INSTANCE_NAME_2) floatip_dic = os_utils.create_floating_ip(neutron_client) floatip_server = floatip_dic['fip_addr'] # floatip_id = floatip_dic['fip_id'] if floatip_server is None: logger.error("Cannot create floating IP.") sys.exit(-1) logger.info("Floating IP created: '%s'" % floatip_server) logger.info("Associating floating ip: '%s' to VM '%s' " % (floatip_server, INSTANCE_NAME_2)) if not os_utils.add_floating_ip(nova_client, instance_2.id, floatip_server): logger.error("Cannot associate floating IP to VM.") sys.exit(-1) # CREATION OF THE 2 SF #### tacker_script = "/home/opnfv/repos/functest/testcases/features/sfc/" + \ TACKER_SCRIPT logger.info("Executing tacker script: '%s'" % tacker_script) subprocess.call(tacker_script, shell=True) # SSH CALL TO START HTTP SERVER ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: ssh.connect(floatip_server, username="******", password="******", timeout=2) command = "python -m SimpleHTTPServer 80 > /dev/null 2>&1 &" logger.info("Starting HTTP server") (stdin, stdout, stderr) = ssh.exec_command(command) except: logger.debug("Waiting for %s..." % floatip_server) time.sleep(6) # timeout -= 1 instances = nova_client.servers.list(search_opts={'all_tenants': 1}) ips = [] try: for instance in instances: if "server" not in instance.name: if "client" not in instance.name: logger.debug( "This is the instance name: %s " % instance.name) floatip_dic = os_utils.create_floating_ip(neutron_client) floatip = floatip_dic['fip_addr'] ips.append(floatip) instance.add_floating_ip(floatip) except: logger.debug("Problems assigning floating IP to SFs") logger.debug("Floating IPs for SFs: %s..." % ips) # SSH TO START THE VXLAN_TOOL ON SF1 logger.info("Configuring the SFs") try: ssh.connect(ips[0], username="******", password="******", timeout=2) command = ("nohup python vxlan_tool.py -i eth0 " "-d forward -v off -f -b 80 &") (stdin, stdout, stderr) = ssh.exec_command(command) except: logger.debug("Waiting for %s..." % ips[0]) time.sleep(6) # timeout -= 1 # SSH TO START THE VXLAN_TOOL ON SF2 try: ssh.connect(ips[1], username="******", password="******", timeout=2) command = ("nohup python vxlan_tool.py -i eth0 " "-d forward -v off -f -b 22 &") (stdin, stdout, stderr) = ssh.exec_command(command) except: logger.debug("Waiting for %s..." % ips[1]) time.sleep(6) # timeout -= 1 # SSH TO EXECUTE cmd_client logger.info("TEST STARTED") try: ssh.connect(floatip_client, username="******", password="******", timeout=2) command = "nc -w 5 -zv " + floatip_server + " 22 2>&1" (stdin, stdout, stderr) = ssh.exec_command(command) except: logger.debug("Waiting for %s..." % floatip_client) time.sleep(6) # timeout -= 1 # WRITE THE CORRECT WAY TO DO LOGGING i = 0 logger.info("First output: %s" % stdout.readlines()) if "timed out" in stdout.readlines()[0]: logger.info('\033[92m' + "TEST 1 [PASSED] " "==> SSH BLOCKED" + '\033[0m') i = i + 1 else: logger.debug('\033[91m' + "TEST 1 [FAILED] " "==> SSH NOT BLOCKED" + '\033[0m') return # SSH TO EXECUTE cmd_client try: ssh.connect(floatip_client, username="******", password="******", timeout=2) command = "nc -w 5 -zv " + floatip_server + " 80 2>&1" (stdin, stdout, stderr) = ssh.exec_command(command) except: logger.debug("Waiting for %s..." % floatip_client) time.sleep(6) # timeout -= 1 if "succeeded" in stdout.readlines()[0]: logger.info('\033[92m' + "TEST 2 [PASSED] " "==> HTTP WORKS" + '\033[0m') i = i + 1 else: logger.debug('\033[91m' + "TEST 2 [FAILED] " "==> HTTP BLOCKED" + '\033[0m') return # CHANGE OF CLASSIFICATION # logger.info("Changing the classification") tacker_classi = "/home/opnfv/repos/functest/testcases/features/sfc/" + \ TACKER_CHANGECLASSI subprocess.call(tacker_classi, shell=True) # SSH TO EXECUTE cmd_client try: ssh.connect(floatip_client, username="******", password="******", timeout=2) command = "nc -w 5 -zv " + floatip_server + " 80 2>&1" (stdin, stdout, stderr) = ssh.exec_command(command) except: logger.debug("Waiting for %s..." % floatip_client) time.sleep(6) # timeout -= 1 if "timed out" in stdout.readlines()[0]: logger.info('\033[92m' + "TEST 3 [WORKS] " "==> HTTP BLOCKED" + '\033[0m') i = i + 1 else: logger.debug('\033[91m' + "TEST 3 [FAILED] " "==> HTTP NOT BLOCKED" + '\033[0m') return # SSH TO EXECUTE cmd_client try: ssh.connect(floatip_client, username="******", password="******", timeout=2) command = "nc -w 5 -zv " + floatip_server + " 22 2>&1" (stdin, stdout, stderr) = ssh.exec_command(command) except: logger.debug("Waiting for %s..." % floatip_client) time.sleep(6) # timeout -= 1 if "succeeded" in stdout.readlines()[0]: logger.info('\033[92m' + "TEST 4 [WORKS] " "==> SSH WORKS" + '\033[0m') i = i + 1 else: logger.debug('\033[91m' + "TEST 4 [FAILED] " "==> SSH BLOCKED" + '\033[0m') return if i == 4: for x in range(0, 5): logger.info('\033[92m' + "SFC TEST WORKED" " :) \n" + '\033[0m') sys.exit(0)
def main(): installer_type = os.environ.get("INSTALLER_TYPE") if installer_type != "fuel": logger.error( '\033[91mCurrently supported only Fuel Installer type\033[0m') sys.exit(1) installer_ip = os.environ.get("INSTALLER_IP") if not installer_ip: logger.error( '\033[91minstaller ip is not set\033[0m') logger.error( '\033[91mexport INSTALLER_IP=<ip>\033[0m') sys.exit(1) start_time = time.time() status = "PASS" configure_iptables() download_image() _, custom_flv_id = os_utils.get_or_create_flavor( FLAVOR, 1500, 10, 1, public=True) if not custom_flv_id: logger.error("Failed to create custom flavor") sys.exit(1) glance_client = os_utils.get_glance_client() neutron_client = os_utils.get_neutron_client() nova_client = os_utils.get_nova_client() controller_clients = get_ssh_clients("controller") compute_clients = get_ssh_clients("compute") ovs_logger = ovs_utils.OVSLogger( os.path.join(os.getcwd(), 'ovs-logs'), FUNCTEST_RESULTS_DIR) image_id = setup_glance(glance_client) network_id = setup_neutron(neutron_client) sg_id = setup_security_groups(neutron_client) boot_instance( nova_client, CLIENT, "custom-3", "97a399a4-a736-449d-9d20-0cc92cf2cbe4", network_id, sg_id) srv_prv_ip = boot_instance( nova_client, SERVER, FLAVOR, image_id, network_id, sg_id) subprocess.call(TACKER_SCRIPT, shell=True) # Start measuring the time it takes to implement the classification rules # try: # thread.start_new_thread(capture_time_log, # (ovs_logger, compute_clients,)) # except Exception, e: # logger.error("Unable to start the thread that counts time %s" % e) server_ip, client_ip, sf1, sf2 = get_floating_ips( nova_client, neutron_client) if not check_ssh([sf1, sf2]): logger.error("Cannot establish SSH connection to the SFs") sys.exit(1) logger.info("Starting HTTP server on %s" % server_ip) if not start_http_server(server_ip): logger.error( '\033[91mFailed to start HTTP server on %s\033[0m' % server_ip) sys.exit(1) logger.info("Starting HTTP firewall on %s" % sf2) vxlan_firewall(sf2, port="80") logger.info("Starting SSH firewall on %s" % sf1) vxlan_firewall(sf1, port="22") logger.info("Wait for ODL to update the classification rules in OVS") time.sleep(120) logger.info("Test SSH") if is_ssh_blocked(srv_prv_ip, client_ip): logger.info('\033[92mTEST 1 [PASSED] ==> SSH BLOCKED\033[0m') update_json_results("Test 1: SSH Blocked", "Passed") else: error = ('\033[91mTEST 1 [FAILED] ==> SSH NOT BLOCKED\033[0m') logger.error(error) capture_err_logs( ovs_logger, controller_clients, compute_clients, error) update_json_results("Test 1: SSH Blocked", "Failed") logger.info("Test HTTP") if not is_http_blocked(srv_prv_ip, client_ip): logger.info('\033[92mTEST 2 [PASSED] ==> HTTP WORKS\033[0m') update_json_results("Test 2: HTTP works", "Passed") else: error = ('\033[91mTEST 2 [FAILED] ==> HTTP BLOCKED\033[0m') logger.error(error) capture_err_logs( ovs_logger, controller_clients, compute_clients, error) update_json_results("Test 2: HTTP works", "Failed") sys.exit(-1) logger.info("Changing the classification") subprocess.call(TACKER_CHANGECLASSI, shell=True) # Start measuring the time it takes to implement the classification rules # try: # thread.start_new_thread(capture_time_log, # (ovs_logger, compute_clients,)) # except Exception, e: # logger.error("Unable to start the thread that counts time %s" % e) logger.info("Wait for ODL to update the classification rules in OVS") time.sleep(100) logger.info("Test HTTP") if is_http_blocked(srv_prv_ip, client_ip): logger.info('\033[92mTEST 3 [PASSED] ==> HTTP Blocked\033[0m') update_json_results("Test 3: HTTP Blocked", "Passed") else: error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m') logger.error(error) capture_err_logs(controller_clients, compute_clients, error) update_json_results("Test 3: HTTP Blocked", "Failed") logger.info("Test SSH") if not is_ssh_blocked(srv_prv_ip, client_ip): logger.info('\033[92mTEST 4 [PASSED] ==> SSH Works\033[0m') update_json_results("Test 4: SSH Works", "Passed") else: error = ('\033[91mTEST 4 [FAILED] ==> SSH BLOCKED\033[0m') logger.error(error) capture_err_logs(controller_clients, compute_clients, error) update_json_results("Test 4: SSH Works", "Failed") if json_results["failures"]: status = "FAIL" logger.error('\033[91mSFC TESTS: %s :( FOUND %s FAIL \033[0m' % ( status, json_results["failures"])) ovs_logger.create_artifact_archive() if args.report: stop_time = time.time() logger.debug("Promise Results json: " + str(json_results)) ft_utils.push_results_to_db("sfc", "functest-odl-sfc", start_time, stop_time, status, json_results) if status == "PASS": logger.info('\033[92mSFC ALL TESTS: %s :)\033[0m' % status) sys.exit(0) sys.exit(1)
def deploy_orchestrator(self, **kwargs): self.logger.info("Additional pre-configuration steps") self.neutron_client = os_utils.get_neutron_client(self.admin_creds) self.glance_client = os_utils.get_glance_client(self.admin_creds) self.keystone_client = os_utils.get_keystone_client(self.admin_creds) self.nova_client = os_utils.get_nova_client(self.admin_creds) # needs some images self.logger.info("Upload some OS images if it doesn't exist") temp_dir = os.path.join(self.data_dir, "tmp/") for image_name, image_url in self.images.iteritems(): self.logger.info("image: %s, url: %s" % (image_name, image_url)) try: image_id = os_utils.get_image_id(self.glance_client, image_name) self.logger.debug("image_id: %s" % image_id) except: self.logger.error("Unexpected error: %s" % sys.exc_info()[0]) if image_id == '': self.logger.info( """%s image doesn't exist on glance repository. Try downloading this image and upload on glance !""" % image_name) image_id = download_and_add_image_on_glance( self.glance_client, image_name, image_url, temp_dir) if image_id == '': self.step_failure("Failed to find or upload required OS " "image for this deployment") # Need to extend quota self.logger.info("Update security group quota for this tenant") tenant_id = os_utils.get_tenant_id(self.keystone_client, self.tenant_name) self.logger.debug("Tenant id found %s" % tenant_id) if not os_utils.update_sg_quota(self.neutron_client, tenant_id, 50, 100): self.step_failure("Failed to update security group quota" + " for tenant " + self.tenant_name) self.logger.debug("group quota extended") # start the deployment of cloudify public_auth_url = os_utils.get_endpoint('identity') self.logger.debug("CFY inputs: %s" % self.orchestrator['inputs']) cfy = Orchestrator(self.data_dir, self.orchestrator['inputs']) self.orchestrator['object'] = cfy self.logger.debug("Orchestrator object created") self.logger.debug("Tenant name: %s" % self.tenant_name) cfy.set_credentials(username=self.tenant_name, password=self.tenant_name, tenant_name=self.tenant_name, auth_url=public_auth_url) self.logger.info("Credentials set in CFY") # orchestrator VM flavor self.logger.info("Check Flavor is available, if not create one") self.logger.debug("Flavor details %s " % self.orchestrator['requirements']['ram_min']) flavor_exist, flavor_id = os_utils.get_or_create_flavor( "m1.large", self.orchestrator['requirements']['ram_min'], '1', '1', public=True) self.logger.debug("Flavor id: %s" % flavor_id) if not flavor_id: self.logger.info("Available flavors are: ") self.logger.info(self.nova_client.flavor.list()) self.step_failure("Failed to find required flavor" "for this deployment") cfy.set_flavor_id(flavor_id) self.logger.debug("Flavor OK") # orchestrator VM image self.logger.debug("Orchestrator image") if 'os_image' in self.orchestrator['requirements'].keys(): image_id = os_utils.get_image_id( self.glance_client, self.orchestrator['requirements']['os_image']) self.logger.debug("Orchestrator image id: %s" % image_id) if image_id == '': self.logger.error("CFY image not found") self.step_failure("Failed to find required OS image" " for cloudify manager") else: self.step_failure("Failed to find required OS image" " for cloudify manager") cfy.set_image_id(image_id) self.logger.debug("Orchestrator image set") self.logger.debug("Get External network") ext_net = os_utils.get_external_net(self.neutron_client) self.logger.debug("External network: %s" % ext_net) if not ext_net: self.step_failure("Failed to get external network") cfy.set_external_network_name(ext_net) self.logger.debug("CFY External network set") self.logger.debug("get resolvconf") ns = ft_utils.get_resolvconf_ns() if ns: cfy.set_nameservers(ns) self.logger.debug("Resolvconf set") self.logger.info("Prepare virtualenv for cloudify-cli") cmd = "chmod +x " + self.case_dir + "create_venv.sh" ft_utils.execute_command(cmd) time.sleep(3) cmd = self.case_dir + "create_venv.sh " + self.data_dir ft_utils.execute_command(cmd) cfy.download_manager_blueprint( self.orchestrator['blueprint']['url'], self.orchestrator['blueprint']['branch']) cfy.deploy_manager() return {'status': 'PASS', 'result': ''}
def main(): nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() image_id = os_utils.create_glance_image(glance_client, IMAGE_NAME, IMAGE_PATH, disk=IMAGE_FORMAT, container="bare", public=True, logger=logger) network_dic = os_utils.create_network_full(logger, neutron_client, NET_NAME, SUBNET_NAME, ROUTER_NAME, SUBNET_CIDR) if not network_dic: logger.error( "There has been a problem when creating the neutron network") sys.exit(-1) network_id = network_dic["net_id"] sg_id = os_utils.create_security_group_full(logger, neutron_client, SECGROUP_NAME, SECGROUP_DESCR) # boot INTANCE logger.info("Creating instance '%s'..." % INSTANCE_NAME) logger.debug( "Configuration:\n name=%s \n flavor=%s \n image=%s \n " "network=%s \n" % (INSTANCE_NAME, FLAVOR, image_id, network_id)) instance = os_utils.create_instance_and_wait_for_active(FLAVOR, image_id, network_id, INSTANCE_NAME) if instance is None: logger.error("Error while booting instance.") sys.exit(-1) # Retrieve IP of INSTANCE instance_ip = instance.networks.get(NET_NAME)[0] logger.debug("Instance '%s' got private ip '%s'." % (INSTANCE_NAME, instance_ip)) logger.info("Adding '%s' to security group '%s'..." % (INSTANCE_NAME, SECGROUP_NAME)) os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id) logger.info("Creating floating IP for VM '%s'..." % INSTANCE_NAME) floatip_dic = os_utils.create_floating_ip(neutron_client) floatip = floatip_dic['fip_addr'] # floatip_id = floatip_dic['fip_id'] if floatip is None: logger.error("Cannot create floating IP.") sys.exit(-1) logger.info("Floating IP created: '%s'" % floatip) logger.info("Associating floating ip: '%s' to VM '%s' " % (floatip, INSTANCE_NAME)) if not os_utils.add_floating_ip(nova_client, instance.id, floatip): logger.error("Cannot associate floating IP to VM.") sys.exit(-1) sys.exit(0)
def main(): results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() (floatingip_ids, instance_ids, router_ids, network_ids, image_ids, subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8)) image_id = os_utils.create_glance_image(glance_client, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public='public') image_ids.append(image_id) network_1_id = test_utils.create_net(neutron_client, TESTCASE_CONFIG.net_1_name) subnet_1_id = test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr, network_1_id) network_ids.append(network_1_id) subnet_ids.append(subnet_1_id) sg_id = os_utils.create_security_group_full(neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client) av_zone_1 = "nova:" + compute_nodes[0] av_zone_2 = "nova:" + compute_nodes[1] # boot INSTANCES vm_2 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_2_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1) vm2_ip = test_utils.get_instance_ip(vm_2) u1 = test_utils.generate_ping_userdata([vm2_ip], 1) vm_1 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_1_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u1) vm1_ip = test_utils.get_instance_ip(vm_1) u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip], 1) vm_3 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_3_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_2, userdata=u3) vm3_ip = test_utils.get_instance_ip(vm_3) # We do not put vm_2 id in instance_ids table because we will # delete the current instance during the testing process instance_ids.extend([vm_1.id, vm_3.id]) # Wait for VMs to get ips. instances_up = test_utils.wait_for_instances_up(vm_2) instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1, vm_3) if (not instances_up or not instances_dhcp_up): logger.error("One or more instances are down") # TODO: Handle this appropriately # Create monitor threads to monitor traffic between vm_1, vm_2 and vm_3 m = Manager() monitor_input1 = m.dict() monitor_output1 = m.dict() monitor_input1["stop_thread"] = False monitor_output1["error_msg"] = "" monitor_thread1 = Process(target=monitor, args=(monitor_input1, monitor_output1, vm_1,)) monitor_input2 = m.dict() monitor_output2 = m.dict() monitor_input2["stop_thread"] = False monitor_output2["error_msg"] = "" monitor_thread2 = Process(target=monitor, args=(monitor_input2, monitor_output2, vm_2,)) monitor_input3 = m.dict() monitor_output3 = m.dict() monitor_input3["stop_thread"] = False monitor_output3["error_msg"] = "" monitor_thread3 = Process(target=monitor, args=(monitor_input3, monitor_output3, vm_3,)) # Lists of all monitor threads and their inputs and outputs. threads = [monitor_thread1, monitor_thread2, monitor_thread3] thread_inputs = [monitor_input1, monitor_input2, monitor_input3] thread_outputs = [monitor_output1, monitor_output2, monitor_output3] try: logging.info("Starting all monitor threads") # Start all monitor threads for thread in threads: thread.start() logging.info("Wait before subtest") test_utils.wait_before_subtest() monitor_err_msg = "" for thread_output in thread_outputs: if thread_output["error_msg"] != "": monitor_err_msg += " ,{}".format(thread_output["error_msg"]) thread_output["error_msg"] = "" results.record_action("Check ping status of vm_1, vm_2, and vm_3") results.add_to_summary(0, "-") if len(monitor_err_msg) == 0: results.add_success("Ping succeeds") else: results.add_failure(monitor_err_msg) # Stop monitor thread 2 and delete instance vm_2 thread_inputs[1]["stop_thread"] = True if not os_utils.delete_instance(nova_client, vm_2.id): logging.error("Fail to delete vm_2 instance during " "testing process") raise Exception("Fail to delete instance vm_2.") # Create a new vm (vm_4) on compute 1 node u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip], 1) vm_4 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_4_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u4) instance_ids.append(vm_4.id) # Wait for VMs to get ips. instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_4) if not instances_dhcp_up: logger.error("Instance vm_4 failed to start.") # TODO: Handle this appropriately # Create and start a new monitor thread for vm_4 monitor_input4 = m.dict() monitor_output4 = m.dict() monitor_input4["stop_thread"] = False monitor_output4["error_msg"] = "" monitor_thread4 = Process(target=monitor, args=(monitor_input4, monitor_output4, vm_4,)) threads.append(monitor_thread4) thread_inputs.append(monitor_input4) thread_outputs.append(monitor_output4) logging.info("Starting monitor thread of vm_4") threads[3].start() test_utils.wait_before_subtest() monitor_err_msg = "" for thread_output in thread_outputs: if thread_output["error_msg"] != "": monitor_err_msg += " ,{}".format(thread_output["error_msg"]) thread_output["error_msg"] = "" results.record_action("Check ping status of vm_1, vm_3 and vm_4. " "Instance vm_2 is deleted") results.add_to_summary(0, "-") if len(monitor_err_msg) == 0: results.add_success("Ping succeeds") else: results.add_failure(monitor_err_msg) except Exception as e: logger.error("exception occurred while executing testcase_10: %s", e) raise finally: # Give a stop signal to all threads logging.info("Sending stop signal to monitor thread") for thread_input in thread_inputs: thread_input["stop_thread"] = True # Wait for all threads to stop and return to the main process for thread in threads: thread.join() test_utils.cleanup_nova(nova_client, instance_ids) test_utils.cleanup_glance(glance_client, image_ids) test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces, subnet_ids, router_ids, network_ids) return results.compile_summary()
def main(): results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() (floatingip_ids, instance_ids, router_ids, network_ids, image_ids, subnet_ids, interfaces, bgpvpn_ids, flavor_ids) = ([] for i in range(9)) try: image_id = os_utils.create_glance_image( glance_client, COMMON_CONFIG.ubuntu_image_name, COMMON_CONFIG.ubuntu_image_path, disk="qcow2", container="bare", public="public") image_ids.append(image_id) _, flavor_id = test_utils.create_custom_flavor() flavor_ids.append(flavor_id) network_1_id, subnet_1_id, router_1_id = test_utils.create_network( neutron_client, TESTCASE_CONFIG.net_1_name, TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr, TESTCASE_CONFIG.router_1_name) interfaces.append(tuple((router_1_id, subnet_1_id))) network_ids.extend([network_1_id]) subnet_ids.extend([subnet_1_id]) router_ids.extend([router_1_id]) sg_id = os_utils.create_security_group_full( neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client) av_zone_1 = "nova:" + compute_nodes[0] av_zone_2 = "nova:" + compute_nodes[1] u1 = test_utils.generate_userdata_interface_create( TESTCASE_CONFIG.interface_name, TESTCASE_CONFIG.interface_number, TESTCASE_CONFIG.extra_route_ip, TESTCASE_CONFIG.extra_route_subnet_mask) # boot INTANCES vm_1 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_1_name, image_id, network_1_id, sg_id, flavor=COMMON_CONFIG.custom_flavor_name, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u1) vm_1_ip = test_utils.get_instance_ip(vm_1) vm1_port = test_utils.get_port(neutron_client, vm_1.id) test_utils.update_port_allowed_address_pairs( neutron_client, vm1_port['id'], [ test_utils.AllowedAddressPair(TESTCASE_CONFIG.extra_route_cidr, vm1_port['mac_address']) ]) vm_2 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_2_name, image_id, network_1_id, sg_id, flavor=COMMON_CONFIG.custom_flavor_name, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u1) vm_2_ip = test_utils.get_instance_ip(vm_2) vm2_port = test_utils.get_port(neutron_client, vm_2.id) test_utils.update_port_allowed_address_pairs( neutron_client, vm2_port['id'], [ test_utils.AllowedAddressPair(TESTCASE_CONFIG.extra_route_cidr, vm2_port['mac_address']) ]) test_utils.async_Wait_for_instances([vm_1, vm_2]) msg = ("Create VPN with multiple RDs") results.record_action(msg) vpn_name = "sdnvpn-" + str(randint(100000, 999999)) kwargs = { "import_targets": TESTCASE_CONFIG.targets1, "export_targets": TESTCASE_CONFIG.targets2, "route_distinguishers": TESTCASE_CONFIG.route_distinguishers, "name": vpn_name } bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs) bgpvpn_id = bgpvpn['bgpvpn']['id'] logger.debug("VPN created details: %s" % bgpvpn) bgpvpn_ids.append(bgpvpn_id) msg = ("Associate router '%s' to the VPN." % TESTCASE_CONFIG.router_1_name) results.record_action(msg) results.add_to_summary(0, "-") test_utils.create_router_association(neutron_client, bgpvpn_id, router_1_id) test_utils.update_router_extra_route(neutron_client, router_1_id, [ test_utils.ExtraRoute(TESTCASE_CONFIG.extra_route_cidr, vm_1_ip), test_utils.ExtraRoute(TESTCASE_CONFIG.extra_route_cidr, vm_2_ip) ]) image_2_id = os_utils.create_glance_image( glance_client, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public='public') image_ids.append(image_2_id) logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() u3 = test_utils.generate_ping_userdata( [TESTCASE_CONFIG.extra_route_ip]) vm_3 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_3_name, image_2_id, network_1_id, sg_id, flavor=COMMON_CONFIG.custom_flavor_name, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_2, userdata=u3) instance_ids.extend([vm_1.id, vm_2.id, vm_3.id]) instance_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_3) if (not instance_dhcp_up): logger.error("vm_3 instance is down") results.get_ping_status_target_ip(vm_3, TESTCASE_CONFIG.extra_route_name, TESTCASE_CONFIG.extra_route_ip, expected="PASS", timeout=300) results.add_to_summary(0, "=") logger.info("\n%s" % results.summary) except Exception as e: logger.error("exception occurred while executing testcase_13: %s", e) raise finally: test_utils.update_router_no_extra_route(neutron_client, router_ids) test_utils.cleanup_nova(nova_client, instance_ids) test_utils.cleanup_glance(glance_client, image_ids) test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces, subnet_ids, router_ids, network_ids) return results.compile_summary()
def execute(self): nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() tenant_id = os_utils.get_tenant_id(os_utils.get_keystone_client(), os.environ['OS_PROJECT_NAME']) neutron_quota = test_utils.get_neutron_quota(neutron_client, tenant_id) (neutron_nw_quota, neutron_subnet_quota, neutron_port_quota) = (neutron_quota['network'], neutron_quota['subnet'], neutron_quota['port']) instances_quota = test_utils.get_nova_instances_quota(nova_client) self.__logger.info("Setting net/subnet/port quota to unlimited") test_utils.update_nw_subnet_port_quota( neutron_client, tenant_id, COMMON_CONFIG.neutron_nw_quota, COMMON_CONFIG.neutron_subnet_quota, COMMON_CONFIG.neutron_port_quota) # Workaround for # https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-115 self.__logger.info("Setting instances quota class to unlimited") test_utils.update_instance_quota_class( nova_client, COMMON_CONFIG.nova_instances_quota_class) with open(COMMON_CONFIG.config_file) as f: config_yaml = yaml.safe_load(f) testcases = config_yaml.get("testcases") overall_status = "PASS" for tc in testcases: if testcases[tc]['enabled']: test_name = tc test_descr = testcases[tc]['description'] title = ("Running '%s - %s'" % (test_name, test_descr)) self.__logger.info(title) self.__logger.info("%s\n" % ("=" * len(title))) t = importlib.import_module(test_name, package=None) try: result = t.main() except Exception as ex: result = -1 self.__logger.info("Caught Exception in %s: %s Trace: %s" % (test_name, ex, traceback.format_exc())) if result < 0: status = "FAIL" overall_status = "FAIL" self.__logger.info("Testcase %s failed" % test_name) else: status = result.get("status") self.details.update({ test_name: { 'status': status, 'details': result.get("details") } }) self.__logger.info( "Results of test case '%s - %s':\n%s\n" % (test_name, test_descr, result)) if status == "FAIL": overall_status = "FAIL" self.__logger.info("Resetting subnet/net/port quota") test_utils.update_nw_subnet_port_quota(neutron_client, tenant_id, neutron_nw_quota, neutron_subnet_quota, neutron_port_quota) self.__logger.info("Resetting instances quota class") test_utils.update_instance_quota_class(nova_client, instances_quota) try: installer_type = str(os.environ['INSTALLER_TYPE'].lower()) if installer_type in ["fuel", "apex"]: gather_logs('overall') else: self.__logger.info("Skipping log gathering because installer" "type %s is neither fuel nor apex" % installer_type) except Exception as ex: self.__logger.error( ('Something went wrong in the Log gathering.' 'Ex: %s, Trace: %s') % (ex, traceback.format_exc())) if overall_status == "PASS": self.result = 100 return base.Feature.EX_OK return base.Feature.EX_RUN_ERROR