def remove_instances(nova_client, default_instances): logger.debug("Removing Nova instances...") instances = os_utils.get_instances(nova_client) if instances is None or len(instances) == 0: logger.debug("No instances found.") return for instance in instances: instance_name = getattr(instance, 'name') instance_id = getattr(instance, 'id') logger.debug("'%s', ID=%s " % (instance_name, instance_id)) if (instance_id not in default_instances and instance_name not in default_instances.values()): logger.debug("Removing instance '%s' ..." % instance_id) if os_utils.delete_instance(nova_client, instance_id): logger.debug(" > Request sent.") else: logger.error("There has been a problem removing the " "instance %s..." % instance_id) else: logger.debug(" > this is a default instance and will " "NOT be deleted.") timeout = 50 while timeout > 0: instances = os_utils.get_instances(nova_client) for instance in instances: instance_id = getattr(instance, 'id') if instance_id not in default_instances: logger.debug("Waiting for instances to be terminated...") timeout -= 1 time.sleep(1) continue break
def remove_instances(nova_client, default_instances): logger.info("Removing Nova instances...") instances = os_utils.get_instances(nova_client) if instances is None or len(instances) == 0: logger.debug("No instances found.") return for instance in instances: instance_name = getattr(instance, 'name') instance_id = getattr(instance, 'id') logger.debug("'%s', ID=%s " % (instance_name, instance_id)) if instance_id not in default_instances: logger.debug("Removing instance '%s' ..." % instance_id) if os_utils.delete_instance(nova_client, instance_id): logger.debug(" > Request sent.") else: logger.error("There has been a problem removing the " "instance %s..." % instance_id) else: logger.debug(" > this is a default instance and will " "NOT be deleted.") timeout = 50 while timeout > 0: instances = os_utils.get_instances(nova_client) for instance in instances: instance_id = getattr(instance, 'id') if instance_id not in default_instances: logger.debug("Waiting for instances to be terminated...") timeout -= 1 time.sleep(1) continue break
def delete_instances(): n = os_utils.get_nova_client() instances = os_utils.get_instances(n) if instances is None: return for inst in instances: logger.info("Removing instance: {0}".format(inst.id)) os_utils.delete_instance(n, inst.id)
def get_instances(nova_client): logger.debug("Getting instances...") dic_instances = {} instances = os_utils.get_instances(nova_client) if not (instances is None or len(instances) == 0): for instance in instances: dic_instances.update({getattr(instance, 'id'): getattr(instance, 'name')}) return {'instances': dic_instances}
def get_instances(nova_client): logger.debug("Getting instances...") dic_instances = {} instances = os_utils.get_instances(nova_client) if not (instances is None or len(instances) == 0): for instance in instances: dic_instances.update( {getattr(instance, 'id'): getattr(instance, 'name')}) return {'instances': dic_instances}
def deploy_vnf(self): """Deploy ABOT-OAI-EPC.""" self.__logger.info("Upload VNFD") descriptor = self.vnf['descriptor'] self.__logger.info("Get or create flavor for all Abot-EPC") flavor_settings = FlavorConfig( name=self.vnf['requirements']['flavor']['name'], ram=self.vnf['requirements']['flavor']['ram_min'], disk=10, vcpus=1) flavor_creator = OpenStackFlavor(self.snaps_creds, flavor_settings) flavor_creator.create() self.created_object.append(flavor_creator) self.__logger.info("Deploying Abot-epc bundle file ...") os.system('juju deploy {}'.format('/' + descriptor.get('file_name'))) self.__logger.info("Waiting for instances .....") status = os.system('juju-wait') self.__logger.info("juju wait completed: %s", status) self.__logger.info("Deployed Abot-epc on Openstack") nova_client = nova_utils.nova_client(self.snaps_creds) neutron_client = neutron_utils.neutron_client(self.snaps_creds) if status == 0: instances = os_utils.get_instances(nova_client) for items in instances: metadata = get_instance_metadata(nova_client, items) if 'juju-units-deployed' in metadata: sec_group = ('juju-' + metadata['juju-controller-uuid'] + '-' + metadata['juju-model-uuid']) self.sec_group_id = os_utils.get_security_group_id( neutron_client, sec_group) break self.__logger.info("Adding Security group rule....") os_utils.create_secgroup_rule( neutron_client, self.sec_group_id, 'ingress', 132) self.__logger.info("Copying the feature files to Abot_node ") os.system('juju scp -- -r {}/featureFiles abot-' 'epc-basic/0:~/'.format(self.case_dir)) self.__logger.info("Copying the feature files in Abot_node ") os.system("juju ssh abot-epc-basic/0 'sudo rsync -azvv " "~/featureFiles /etc/rebaca-test-suite" "/featureFiles'") count = 0 while count < 10: epcstatus = os.system('juju status oai-epc | ' 'grep {} | grep {} | grep {}' .format('EPC', 'is', 'running')) if epcstatus == 0: break else: time.sleep(60) count = count + 1 os.system('juju-wait') return True return False
def main(): deploymentHandler = DeploymentFactory.get_handler( INSTALLER["type"], INSTALLER["ip"], INSTALLER["user"], installer_pwd=INSTALLER["password"]) cluster = INSTALLER["cluster"] openstack_nodes = (deploymentHandler.get_nodes({'cluster': cluster}) if cluster is not None else deploymentHandler.get_nodes()) compute_nodes = [node for node in openstack_nodes if node.is_compute()] odl_ip, odl_port = test_utils.get_odl_ip_port(openstack_nodes) neutron_client = os_utils.get_neutron_client() nova_client = os_utils.get_nova_client() tacker_client = os_tacker.get_tacker_client() ovs_logger = ovs_log.OVSLogger( os.path.join(DEMO_DIR, 'ovs-logs'), RESULTS_DIR) sg_id = os_utils.get_security_group_id(neutron_client, SECGROUP_NAME) vnfs = ['testVNF1', 'testVNF2'] topo_seed = 0 testTopology = topo_shuffler.topology(vnfs, seed=topo_seed) logger.info('This test is run with the topology {0}' .format(testTopology['id'])) logger.info('Topology description: {0}' .format(testTopology['description'])) tosca_red = os.path.join(DEMO_DIR, VNFD_DIR, TEST_VNFD) os_tacker.create_vnfd(tacker_client, tosca_file=tosca_red) default_param_file = os.path.join( DEMO_DIR, VNFD_DIR, VNFD_DEFAULT_PARAMS_FILE) test_utils.create_vnf_in_av_zone( tacker_client, vnfs[0], 'test-vnfd1', default_param_file, testTopology[vnfs[0]]) vnf_id = os_tacker.wait_for_vnf(tacker_client, vnf_name='testVNF1') if vnf_id is None: logger.error('ERROR while booting vnf') sys.exit(1) vnf_instance_id = test_utils.get_nova_id(tacker_client, 'vdu1', vnf_id) instances = os_utils.get_instances(nova_client) for instance in instances: if ('client' not in instance.name) and ('server' not in instance.name): os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id) os_tacker.create_sfc(tacker_client, 'red', chain_vnf_names=['testVNF1'], symmetrical=True) os_tacker.create_sfc_classifier( tacker_client, 'red_http', sfc_name='red', match={ 'source_port': 0, 'dest_port': 80, 'protocol': 6 }) os_tacker.create_sfc_classifier( tacker_client, 'red_http_reverse', sfc_name='red', match={ 'source_port': 80, 'dest_port': 0, 'protocol': 6 }) logger.info(test_utils.run_cmd('tacker sfc-list')[1]) logger.info(test_utils.run_cmd('tacker sfc-classifier-list')[1]) sf_floating_ip = test_utils.assign_floating_ip( nova_client, neutron_client, vnf_instance_id) for ip in [sf_floating_ip]: logger.info("Checking connectivity towards floating IP [%s]" % ip) if not test_utils.ping(ip, retries=50, retry_timeout=1): logger.error("Cannot ping floating IP [%s]" % ip) sys.exit(1) logger.info("Successful ping to floating IP [%s]" % ip) if not test_utils.check_ssh([sf_floating_ip]): logger.error("Cannot establish SSH connection to the SFs") sys.exit(1) logger.info("Firewall started, blocking traffic port 80") test_utils.vxlan_firewall(sf_floating_ip, port=80) cmd = "python vxlan_tool.py --metadata -i eth0 -d forward -v off -b 80" cmd = "sh -c 'cd /root;nohup " + cmd + " > /dev/null 2>&1 &'" test_utils.run_cmd_remote(sf_floating_ip, cmd) time.sleep(7) logger.info("Wait for ODL to update the classification rules in OVS") time.sleep(10) rsps = test_utils.get_odl_resource_list( odl_ip, odl_port, 'rendered-service-path', datastore='operational') reverse_path_id = next( rsp['path-id'] for rsp in rsps['rendered-service-paths']['rendered-service-path'] if rsp['name'].endswith('Reverse')) hex_path_id = hex(int(reverse_path_id)) reverse_path_action = "load:{0}->NXM_NX_NSH_C3[]".format(hex_path_id) for compute_node in compute_nodes: compute_ssh = compute_node.ssh_client match_rsp = re.compile( r'.+tp_dst=80.+load:(0x[0-9a-f]+)->NXM_NX_NSP\[0\.\.23\].+') # First line is OFPST_FLOW reply (OF1.3) (xid=0x2): # This is not a flow so ignore flows = (ovs_logger.ofctl_dump_flows(compute_ssh, 'br-int', '11') .strip().split('\n')[1:]) matching_flows = [match_rsp.match(f) for f in flows] if all(m is None for m in matching_flows): break uplink_flow = [f.group(0) for f in matching_flows if f is not None][0] actions = uplink_flow.split("actions=")[1] actions_c3 = "{0},{1}".format(reverse_path_action, actions) cmd = "ovs-ofctl -OOpenflow13 mod-flows br-int \"table=11,tcp,reg0=0x1,tp_dst=80,actions={0}\"".format(actions_c3) #compute_node.run_cmd(cmd) logger.info("Running: {0}".format(cmd)) match_port = re.compile( r'.+table=158.+output:([0-9]+)') flows = (ovs_logger.ofctl_dump_flows(compute_ssh, 'br-int', '158').strip().split('\n')[1:]) matching_flows = [match_port.match(f) for f in flows] sf_port = [f.group(1) for f in matching_flows if f is not None][0] cmd = "ovs-ofctl -O Openflow13 add-flow br-int \"table=11,nsi=254,nsp={0} actions=load:0x1->NXM_NX_REG0[],move:NXM_NX_NSH_C2[]->NXM_NX_TUN_ID[0..31],resubmit({1},1)\"".format(reverse_path_id, sf_port) #compute_node.run_cmd(cmd) logger.info("Running: {0}".format(cmd)) cmd = "ovs-ofctl -O Openflow13 add-flow br-int \"table=1, priority=40000,nsi=254,nsp={0},reg0=0x1,in_port={1} actions=pop_nsh,goto_table:21\"".format(reverse_path_id, sf_port) logger.info("Running: {0}".format(cmd)) logger.info("HTTP traffic from client to server should be blocked") logger.info("When trying to send HTTP traffic to server it should respond with TCP RESET")
def main(): deploymentHandler = DeploymentFactory.get_handler( INSTALLER["type"], INSTALLER["ip"], INSTALLER["user"], installer_pwd=INSTALLER["password"]) cluster = INSTALLER["cluster"] openstack_nodes = (deploymentHandler.get_nodes({'cluster': cluster}) if cluster is not None else deploymentHandler.get_nodes()) compute_nodes = [node for node in openstack_nodes if node.is_compute()] odl_ip, odl_port = test_utils.get_odl_ip_port(openstack_nodes) neutron_client = os_utils.get_neutron_client() nova_client = os_utils.get_nova_client() tacker_client = os_tacker.get_tacker_client() compute_clients = test_utils.get_ssh_clients(compute_nodes) ovs_logger = ovs_log.OVSLogger(os.path.join(DEMO_DIR, 'ovs-logs'), RESULTS_DIR) sg_id = os_utils.get_security_group_id(neutron_client, SECGROUP_NAME) vnfs = ['testVNF1', 'testVNF2'] topo_seed = 0 testTopology = topo_shuffler.topology(vnfs, seed=topo_seed) logger.info('This test is run with the topology {0}'.format( testTopology['id'])) logger.info('Topology description: {0}'.format( testTopology['description'])) tosca_red = os.path.join(DEMO_DIR, VNFD_DIR, TEST_VNFD_RED) os_tacker.create_vnfd(tacker_client, tosca_file=tosca_red) tosca_blue = os.path.join(DEMO_DIR, VNFD_DIR, TEST_VNFD_BLUE) os_tacker.create_vnfd(tacker_client, tosca_file=tosca_blue) default_param_file = os.path.join(DEMO_DIR, VNFD_DIR, VNFD_DEFAULT_PARAMS_FILE) test_utils.create_vnf_in_av_zone(tacker_client, vnfs[0], 'test-vnfd1', default_param_file, testTopology[vnfs[0]]) test_utils.create_vnf_in_av_zone(tacker_client, vnfs[1], 'test-vnfd2', default_param_file, testTopology[vnfs[1]]) vnf1_id = os_tacker.wait_for_vnf(tacker_client, vnf_name='testVNF1') vnf2_id = os_tacker.wait_for_vnf(tacker_client, vnf_name='testVNF2') if vnf1_id is None or vnf2_id is None: logger.error('ERROR while booting vnfs') sys.exit(1) instances = os_utils.get_instances(nova_client) for instance in instances: if ('client' not in instance.name) and ('server' not in instance.name): os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id) os_tacker.create_sfc(tacker_client, 'red', chain_vnf_names=['testVNF1']) os_tacker.create_sfc(tacker_client, 'blue', chain_vnf_names=['testVNF2']) os_tacker.create_sfc_classifier(tacker_client, 'red_http', sfc_name='red', match={ 'source_port': 0, 'dest_port': 80, 'protocol': 6 }) os_tacker.create_sfc_classifier(tacker_client, 'red_ssh', sfc_name='red', match={ 'source_port': 0, 'dest_port': 22, 'protocol': 6 }) logger.info(test_utils.run_cmd('tacker sfc-list')[1]) logger.info(test_utils.run_cmd('tacker sfc-classifier-list')[1]) num_chains = 2 # Start measuring the time it takes to implement the classification rules t1 = threading.Thread(target=test_utils.wait_for_classification_rules, args=( ovs_logger, compute_nodes, odl_ip, odl_port, testTopology, )) try: t1.start() except Exception, e: logger.error("Unable to start the thread that counts time %s" % e)