def main(): conn = os_utils.get_os_connection() results = Results(COMMON_CONFIG.line_length, conn) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") neutron_client = os_utils.get_neutron_client() conn = os_utils.get_os_connection() (floatingip_ids, instance_ids, router_ids, network_ids, image_ids, subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8)) try: image_id = os_utils.create_glance_image( conn, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public='public') image_ids.append(image_id) network_1_id = test_utils.create_net(neutron_client, TESTCASE_CONFIG.net_1_name) subnet_1_id = test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr, network_1_id) network_2_id = test_utils.create_net(neutron_client, TESTCASE_CONFIG.net_2_name) subnet_2_id = test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_2_name, TESTCASE_CONFIG.subnet_2_cidr, network_2_id) network_ids.extend([network_1_id, network_2_id]) subnet_ids.extend([subnet_1_id, subnet_2_id]) sg_id = os_utils.create_security_group_full( neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) compute_nodes = test_utils.assert_and_get_compute_nodes(conn) av_zone_1 = "nova:" + compute_nodes[0] av_zone_2 = "nova:" + compute_nodes[1] # boot INTANCES vm_2 = test_utils.create_instance( conn, TESTCASE_CONFIG.instance_2_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1) vm_2_ip = test_utils.get_instance_ip(conn, vm_2) vm_3 = test_utils.create_instance( conn, TESTCASE_CONFIG.instance_3_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_2) vm_3_ip = test_utils.get_instance_ip(conn, vm_3) vm_5 = test_utils.create_instance( conn, TESTCASE_CONFIG.instance_5_name, image_id, network_2_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_2) vm_5_ip = test_utils.get_instance_ip(conn, vm_5) # We boot vm5 first because we need vm5_ip for vm4 userdata u4 = test_utils.generate_ping_userdata([vm_5_ip]) vm_4 = test_utils.create_instance( conn, TESTCASE_CONFIG.instance_4_name, image_id, network_2_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u4) vm_4_ip = test_utils.get_instance_ip(conn, vm_4) # We boot VM1 at the end because we need to get the IPs first # to generate the userdata u1 = test_utils.generate_ping_userdata([vm_2_ip, vm_3_ip, vm_4_ip, vm_5_ip]) vm_1 = test_utils.create_instance( conn, TESTCASE_CONFIG.instance_1_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u1) instance_ids.extend([vm_1.id, vm_2.id, vm_3.id, vm_4.id, vm_5.id]) msg = ("Create VPN with eRT<>iRT") results.record_action(msg) vpn_name = "sdnvpn-" + str(randint(100000, 999999)) kwargs = { "import_targets": TESTCASE_CONFIG.targets1, "export_targets": TESTCASE_CONFIG.targets2, "route_distinguishers": TESTCASE_CONFIG.route_distinguishers, "name": vpn_name } bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs) bgpvpn_id = bgpvpn['bgpvpn']['id'] logger.debug("VPN created details: %s" % bgpvpn) bgpvpn_ids.append(bgpvpn_id) msg = ("Associate network '%s' to the VPN." % TESTCASE_CONFIG.net_1_name) results.record_action(msg) results.add_to_summary(0, "-") test_utils.create_network_association( neutron_client, bgpvpn_id, network_1_id) # Wait for VMs to be ready. instances_up = test_utils.wait_for_instances_up(vm_2, vm_3, vm_5) instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1, vm_4) if (not instances_up or not instances_dhcp_up): logger.error("One or more instances are down") # TODO: Handle this appropriately results.get_ping_status(vm_1, vm_2, expected="PASS", timeout=200) results.get_ping_status(vm_1, vm_3, expected="PASS", timeout=30) results.get_ping_status(vm_1, vm_4, expected="FAIL", timeout=30) msg = ("Associate network '%s' to the VPN." % TESTCASE_CONFIG.net_2_name) results.add_to_summary(0, "-") results.record_action(msg) results.add_to_summary(0, "-") test_utils.create_network_association( neutron_client, bgpvpn_id, network_2_id) test_utils.wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, network_1_id, network_2_id) logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() results.get_ping_status(vm_4, vm_5, expected="PASS", timeout=30) # TODO enable again when isolation in VPN with iRT != eRT works # results.get_ping_status(vm_1, vm_4, expected="FAIL", timeout=30) # results.get_ping_status(vm_1, vm_5, expected="FAIL", timeout=30) msg = ("Update VPN with eRT=iRT ...") results.add_to_summary(0, "-") results.record_action(msg) results.add_to_summary(0, "-") # use bgpvpn-create instead of update till NETVIRT-1067 bug is fixed # kwargs = {"import_targets": TESTCASE_CONFIG.targets1, # "export_targets": TESTCASE_CONFIG.targets1, # "name": vpn_name} # bgpvpn = test_utils.update_bgpvpn(neutron_client, # bgpvpn_id, **kwargs) test_utils.delete_bgpvpn(neutron_client, bgpvpn_id) bgpvpn_ids.remove(bgpvpn_id) kwargs = { "import_targets": TESTCASE_CONFIG.targets1, "export_targets": TESTCASE_CONFIG.targets1, "route_distinguishers": TESTCASE_CONFIG.route_distinguishers, "name": vpn_name } test_utils.wait_before_subtest() bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs) bgpvpn_id = bgpvpn['bgpvpn']['id'] logger.debug("VPN re-created details: %s" % bgpvpn) bgpvpn_ids.append(bgpvpn_id) msg = ("Associate network '%s' to the VPN." % TESTCASE_CONFIG.net_1_name) results.record_action(msg) results.add_to_summary(0, "-") test_utils.create_network_association( neutron_client, bgpvpn_id, network_1_id) test_utils.create_network_association( neutron_client, bgpvpn_id, network_2_id) test_utils.wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, network_1_id, network_2_id) # The above code has to be removed after re-enabling bgpvpn-update logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() results.get_ping_status(vm_1, vm_4, expected="PASS", timeout=30) results.get_ping_status(vm_1, vm_5, expected="PASS", timeout=30) except Exception as e: logger.error("exception occurred while executing testcase_1: %s", e) raise finally: test_utils.cleanup_nova(conn, instance_ids) test_utils.cleanup_glance(conn, image_ids) test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces, subnet_ids, router_ids, network_ids) return results.compile_summary()
def main(): results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() openstack_nodes = test_utils.get_nodes() (floatingip_ids, instance_ids, router_ids, network_ids, image_ids, subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8)) try: image_id = os_utils.create_glance_image( glance_client, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public='public') image_ids.append(image_id) network_1_id = test_utils.create_net(neutron_client, TESTCASE_CONFIG.net_1_name) subnet_1_id = test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr, network_1_id) network_ids.append(network_1_id) subnet_ids.append(subnet_1_id) sg_id = os_utils.create_security_group_full( neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) # Check required number of compute nodes compute_hostname = ( nova_client.hypervisors.list()[0].hypervisor_hostname) compute_nodes = [node for node in openstack_nodes if node.is_compute()] av_zone_1 = "nova:" + compute_hostname # List of OVS bridges to get groups ovs_br = "br-int" # Get a list of flows and groups, before start topology initial_ovs_flows = len(test_utils.get_ovs_flows(compute_nodes, [ovs_br])) initial_ovs_groups = len(test_utils.get_ovs_groups(compute_nodes, [ovs_br])) # boot INSTANCES vm_2 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_2_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1) vm_1 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_1_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1) instance_ids.extend([vm_1.id, vm_2.id]) # Wait for VMs to get ips. instances_up = test_utils.wait_for_instances_up(vm_1, vm_2) if not instances_up: logger.error("One or more instances is down") logger.info("Wait before subtest") test_utils.wait_before_subtest() # Get added OVS flows and groups added_ovs_flows = len(test_utils.get_ovs_flows(compute_nodes, [ovs_br])) added_ovs_groups = len(test_utils.get_ovs_groups(compute_nodes, [ovs_br])) # Check if flows and groups added successfully results.record_action("Check if new flows and groups were added " "to OVS") msg = "New OVS flows added" results.add_to_summary(0, "-") if added_ovs_flows - initial_ovs_flows > 0: results.add_success(msg) else: results.add_failure(msg) results.add_to_summary(0, "=") msg = "New OVS groups added" results.add_to_summary(0, "-") if added_ovs_groups - initial_ovs_groups > 0: results.add_success(msg) else: results.add_failure(msg) results.add_to_summary(0, "=") get_ext_ip_cmd = "sudo ovs-vsctl get-controller {}".format(ovs_br) ovs_controller_conn = (compute_nodes[0].run_cmd(get_ext_ip_cmd). strip().split('\n')[0]) for compute_node in compute_nodes: # Disconnect OVS from controller compute_node.run_cmd("sudo ovs-vsctl del-controller {}". format(ovs_br)) test_utils.wait_before_subtest() # Connect again OVS to Controller compute_node.run_cmd("sudo ovs-vsctl set-controller {} {}". format(ovs_br, ovs_controller_conn)) logger.info("Wait before subtest resync type 1") test_utils.wait_before_subtest() # Get OVS flows added after the reconnection resynced_ovs_flows = len(test_utils.get_ovs_flows( compute_nodes, [ovs_br])) # Get OVS groups added after the reconnection resynced_ovs_groups = len(test_utils.get_ovs_groups( compute_nodes, [ovs_br])) record_action_msg = ("Check if flows/groups are reprogrammed in OVS " "after its reconnection by del/set controller.") record_test_result(added_ovs_flows, resynced_ovs_flows, added_ovs_groups, resynced_ovs_groups, record_action_msg, results) for compute_node in compute_nodes: # Disconnect OVS from controller compute_node.run_cmd("sudo iptables -A OUTPUT -p tcp --dport 6653" " -j DROP") test_utils.wait_before_subtest() # Connect again OVS to Controller compute_node.run_cmd("sudo iptables -D OUTPUT -p tcp --dport 6653" " -j DROP") logger.info("Wait before subtest resync type 2") test_utils.wait_before_subtest() # Get OVS flows added after the reconnection resynced_ovs_flows = len(test_utils.get_ovs_flows( compute_nodes, [ovs_br])) # Get OVS groups added after the reconnection resynced_ovs_groups = len(test_utils.get_ovs_groups( compute_nodes, [ovs_br])) record_action_msg = ("Check if flows/groups are reprogrammed in OVS " "after its reconnection by firewall rule for " "OF port block/unblok") record_test_result(added_ovs_flows, resynced_ovs_flows, added_ovs_groups, resynced_ovs_groups, record_action_msg, results) except Exception as e: logger.error("exception occurred while executing testcase_12: %s", e) raise finally: # Cleanup topology test_utils.cleanup_nova(nova_client, instance_ids) test_utils.cleanup_glance(glance_client, image_ids) test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces, subnet_ids, router_ids, network_ids) return results.compile_summary()
def main(): results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") openstack_nodes = test_utils.get_nodes() # node.is_odl() doesn't work in Apex # https://jira.opnfv.org/browse/RELENG-192 controllers = [ node for node in openstack_nodes if "running" in node.run_cmd("sudo systemctl status opendaylight") ] computes = [node for node in openstack_nodes if node.is_compute()] msg = ("Verify that OpenDaylight can start/communicate with zrpcd/Quagga") results.record_action(msg) results.add_to_summary(0, "-") if not controllers: msg = ("Controller (ODL) list is empty. Skipping rest of tests.") logger.info(msg) results.add_failure(msg) return results.compile_summary() else: msg = ("Controller (ODL) list is ready") logger.info(msg) results.add_success(msg) controller = controllers[0] # We don't handle HA well get_ext_ip_cmd = "sudo ip a | grep br-ex | grep inet | awk '{print $2}'" ext_net_cidr = controller.run_cmd(get_ext_ip_cmd).strip().split('\n') ext_net_mask = ext_net_cidr[0].split('/')[1] controller_ext_ip = ext_net_cidr[0].split('/')[0] logger.info("Starting bgp speaker of controller at IP %s " % controller_ext_ip) logger.info("Checking if zrpcd is " "running on the controller node") output_zrpcd = controller.run_cmd("ps --no-headers -C " "zrpcd -o state") states = output_zrpcd.split() running = any([s != 'Z' for s in states]) msg = ("zrpcd is running") if not running: logger.info("zrpcd is not running on the controller node") results.add_failure(msg) else: logger.info("zrpcd is running on the controller node") results.add_success(msg) results.add_to_summary(0, "-") # Ensure that ZRPCD ip & port are well configured within ODL add_client_conn_to_bgp = "bgp-connect -p 7644 -h 127.0.0.1 add" test_utils.run_odl_cmd(controller, add_client_conn_to_bgp) # Start bgp daemon start_quagga = "odl:configure-bgp -op start-bgp-server " \ "--as-num 100 --router-id {0}".format(controller_ext_ip) test_utils.run_odl_cmd(controller, start_quagga) logger.info("Checking if bgpd is running" " on the controller node") # Check if there is a non-zombie bgpd process output_bgpd = controller.run_cmd("ps --no-headers -C " "bgpd -o state") states = output_bgpd.split() running = any([s != 'Z' for s in states]) msg = ("bgpd is running") if not running: logger.info("bgpd is not running on the controller node") results.add_failure(msg) else: logger.info("bgpd is running on the controller node") results.add_success(msg) results.add_to_summary(0, "-") # We should be able to restart the speaker # but the test is disabled because of buggy upstream # https://github.com/6WIND/zrpcd/issues/15 # stop_quagga = 'odl:configure-bgp -op stop-bgp-server' # test_utils.run_odl_cmd(controller, stop_quagga) # logger.info("Checking if bgpd is still running" # " on the controller node") # output_bgpd = controller.run_cmd("ps --no-headers -C " \ # "bgpd -o state") # states = output_bgpd.split() # running = any([s != 'Z' for s in states]) # msg = ("bgpd is stopped") # if not running: # logger.info("bgpd is not running on the controller node") # results.add_success(msg) # else: # logger.info("bgpd is still running on the controller node") # results.add_failure(msg) # Taken from the sfc tests if not os.path.isfile(COMMON_CONFIG.ubuntu_image_path): logger.info("Downloading image") os_utils.download_url( "http://artifacts.opnfv.org/sdnvpn/" "ubuntu-16.04-server-cloudimg-amd64-disk1.img", "/home/opnfv/functest/data/") else: logger.info("Using old image") glance_client = os_utils.get_glance_client() nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() (floatingip_ids, instance_ids, router_ids, network_ids, image_ids, subnet_ids, interfaces, bgpvpn_ids, flavor_ids) = ([] for i in range(9)) try: _, flavor_id = test_utils.create_custom_flavor() flavor_ids.append(flavor_id) sg_id = os_utils.create_security_group_full( neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) test_utils.open_icmp(neutron_client, sg_id) test_utils.open_http_port(neutron_client, sg_id) test_utils.open_bgp_port(neutron_client, sg_id) net_id, subnet_1_id, router_1_id = test_utils.create_network( neutron_client, TESTCASE_CONFIG.net_1_name, TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr, TESTCASE_CONFIG.router_1_name) quagga_net_id, subnet_quagga_id, \ router_quagga_id = test_utils.create_network( neutron_client, TESTCASE_CONFIG.quagga_net_name, TESTCASE_CONFIG.quagga_subnet_name, TESTCASE_CONFIG.quagga_subnet_cidr, TESTCASE_CONFIG.quagga_router_name) interfaces.append(tuple((router_1_id, subnet_1_id))) interfaces.append(tuple((router_quagga_id, subnet_quagga_id))) network_ids.extend([net_id, quagga_net_id]) router_ids.extend([router_1_id, router_quagga_id]) subnet_ids.extend([subnet_1_id, subnet_quagga_id]) installer_type = str(os.environ['INSTALLER_TYPE'].lower()) if installer_type == "fuel": disk = 'raw' elif installer_type == "apex": disk = 'qcow2' else: logger.error("Incompatible installer type") ubuntu_image_id = os_utils.create_glance_image( glance_client, COMMON_CONFIG.ubuntu_image_name, COMMON_CONFIG.ubuntu_image_path, disk, container="bare", public="public") image_ids.append(ubuntu_image_id) # NOTE(rski) The order of this seems a bit weird but # there is a reason for this, namely # https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-99 # so we create the quagga instance using cloud-init # and immediately give it a floating IP. # The cloud-init script should contain a small sleep for # this to work. # We also create the FIP first because it is used in the # cloud-init script. fip = os_utils.create_floating_ip(neutron_client) # fake_fip is needed to bypass NAT # see below for the reason why. fake_fip = os_utils.create_floating_ip(neutron_client) floatingip_ids.extend([fip['fip_id'], fake_fip['fip_id']]) # pin quagga to some compute compute_node = nova_client.hypervisors.list()[0] quagga_compute_node = "nova:" + compute_node.hypervisor_hostname # Map the hypervisor used above to a compute handle # returned by releng's manager for comp in computes: if compute_node.host_ip in comp.run_cmd("sudo ip a"): compute = comp break quagga_bootstrap_script = quagga.gen_quagga_setup_script( controller_ext_ip, fake_fip['fip_addr'], ext_net_mask) quagga_vm = test_utils.create_instance( nova_client, TESTCASE_CONFIG.quagga_instance_name, ubuntu_image_id, quagga_net_id, sg_id, fixed_ip=TESTCASE_CONFIG.quagga_instance_ip, flavor=COMMON_CONFIG.custom_flavor_name, userdata=quagga_bootstrap_script, compute_node=quagga_compute_node) instance_ids.append(quagga_vm) fip_added = os_utils.add_floating_ip(nova_client, quagga_vm.id, fip['fip_addr']) msg = ("Assign a Floating IP to %s " % TESTCASE_CONFIG.quagga_instance_name) if fip_added: results.add_success(msg) else: results.add_failure(msg) test_utils.attach_instance_to_ext_br(quagga_vm, compute) try: testcase = "Bootstrap quagga inside an OpenStack instance" cloud_init_success = test_utils.wait_for_cloud_init(quagga_vm) if cloud_init_success: results.add_success(testcase) else: results.add_failure(testcase) results.add_to_summary(0, "=") results.add_to_summary(0, '-') results.add_to_summary(1, "Peer Quagga with OpenDaylight") results.add_to_summary(0, '-') neighbor = quagga.odl_add_neighbor(fake_fip['fip_addr'], controller_ext_ip, controller) peer = quagga.check_for_peering(controller) finally: test_utils.detach_instance_from_ext_br(quagga_vm, compute) if neighbor and peer: results.add_success("Peering with quagga") else: results.add_failure("Peering with quagga") except Exception as e: logger.error("exception occurred while executing testcase_3: %s", e) raise finally: test_utils.cleanup_nova(nova_client, instance_ids, flavor_ids) test_utils.cleanup_glance(glance_client, image_ids) test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces, subnet_ids, router_ids, network_ids) return results.compile_summary()
def main(): results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() (floatingip_ids, instance_ids, router_ids, network_ids, image_ids, subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8)) image_id = os_utils.create_glance_image(glance_client, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public='public') image_ids.append(image_id) network_1_id = test_utils.create_net(neutron_client, TESTCASE_CONFIG.net_1_name) subnet_1_id = test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr, network_1_id) network_ids.append(network_1_id) subnet_ids.append(subnet_1_id) sg_id = os_utils.create_security_group_full(neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client) av_zone_1 = "nova:" + compute_nodes[0] av_zone_2 = "nova:" + compute_nodes[1] # boot INSTANCES vm_2 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_2_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1) vm2_ip = test_utils.get_instance_ip(vm_2) u1 = test_utils.generate_ping_userdata([vm2_ip]) vm_1 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_1_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u1) vm1_ip = test_utils.get_instance_ip(vm_1) u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip]) vm_3 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_3_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_2, userdata=u3) vm3_ip = test_utils.get_instance_ip(vm_3) # We do not put vm_2 id in instance_ids table because we will # delete the current instance during the testing process instance_ids.extend([vm_1.id, vm_3.id]) # Wait for VMs to get ips. instances_up = test_utils.wait_for_instances_up(vm_2) instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1, vm_3) if (not instances_up or not instances_dhcp_up): logger.error("One or more instances are down") # TODO: Handle this appropriately # Create monitor threads to monitor traffic between vm_1, vm_2 and vm_3 m = Manager() monitor_input1 = m.dict() monitor_output1 = m.dict() monitor_input1["stop_thread"] = False monitor_output1["error_msg"] = "" monitor_thread1 = Process(target=monitor, args=( monitor_input1, monitor_output1, vm_1, )) monitor_input2 = m.dict() monitor_output2 = m.dict() monitor_input2["stop_thread"] = False monitor_output2["error_msg"] = "" monitor_thread2 = Process(target=monitor, args=( monitor_input2, monitor_output2, vm_2, )) monitor_input3 = m.dict() monitor_output3 = m.dict() monitor_input3["stop_thread"] = False monitor_output3["error_msg"] = "" monitor_thread3 = Process(target=monitor, args=( monitor_input3, monitor_output3, vm_3, )) # Lists of all monitor threads and their inputs and outputs. threads = [monitor_thread1, monitor_thread2, monitor_thread3] thread_inputs = [monitor_input1, monitor_input2, monitor_input3] thread_outputs = [monitor_output1, monitor_output2, monitor_output3] try: logger.info("Starting all monitor threads") # Start all monitor threads for thread in threads: thread.start() logger.info("Wait before subtest") test_utils.wait_before_subtest() monitor_err_msg = "" for thread_output in thread_outputs: if thread_output["error_msg"] != "": monitor_err_msg += " ,{}".format(thread_output["error_msg"]) thread_output["error_msg"] = "" results.record_action("Check ping status of vm_1, vm_2, and vm_3") results.add_to_summary(0, "-") if len(monitor_err_msg) == 0: results.add_success("Ping succeeds") else: results.add_failure(monitor_err_msg) # Stop monitor thread 2 and delete instance vm_2 thread_inputs[1]["stop_thread"] = True if not os_utils.delete_instance(nova_client, vm_2.id): logger.error("Fail to delete vm_2 instance during " "testing process") raise Exception("Fail to delete instance vm_2.") for thread_input in thread_inputs: thread_input["stop_thread"] = True for thread in threads: thread.join() threads = [] thread_inputs = [] thread_outputs = [] # Create a new vm (vm_4) on compute 1 node u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip]) vm_4 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_4_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u4) instance_ids.append(vm_4.id) # Wait for VMs to get ips. instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_4) if not instances_dhcp_up: logger.error("Instance vm_4 failed to start.") # TODO: Handle this appropriately # Create and start a new monitor thread for vm_4 monitor_input4 = m.dict() monitor_output4 = m.dict() monitor_input4["stop_thread"] = False monitor_output4["error_msg"] = "" monitor_thread4 = Process(target=monitor, args=( monitor_input4, monitor_output4, vm_4, )) threads.append(monitor_thread4) thread_inputs.append(monitor_input4) thread_outputs.append(monitor_output4) logger.info("Starting monitor thread of vm_4") threads[0].start() test_utils.wait_before_subtest() monitor_err_msg = "" for thread_output in thread_outputs: if thread_output["error_msg"] != "": monitor_err_msg += " ,{}".format(thread_output["error_msg"]) thread_output["error_msg"] = "" results.record_action("Check ping status of vm_1, vm_3 and vm_4. " "Instance vm_2 is deleted") results.add_to_summary(0, "-") if len(monitor_err_msg) == 0: results.add_success("Ping succeeds") else: results.add_failure(monitor_err_msg) except Exception as e: logger.error("exception occurred while executing testcase_10: %s", e) raise finally: # Give a stop signal to all threads logger.info("Sending stop signal to monitor thread") for thread_input in thread_inputs: thread_input["stop_thread"] = True # Wait for all threads to stop and return to the main process for thread in threads: thread.join() test_utils.cleanup_nova(nova_client, instance_ids) test_utils.cleanup_glance(glance_client, image_ids) test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces, subnet_ids, router_ids, network_ids) return results.compile_summary()
def main(): conn = os_utils.get_os_connection() results = Results(COMMON_CONFIG.line_length, conn) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") openstack_nodes = test_utils.get_nodes() installer_type = str(os.environ['INSTALLER_TYPE'].lower()) # node.is_odl() doesn't work in Apex # https://jira.opnfv.org/browse/RELENG-192 fuel_cmd = "sudo systemctl status opendaylight" apex_cmd = "sudo docker exec opendaylight_api " \ "/opt/opendaylight/bin/status" health_cmd = "sudo docker ps -f name=opendaylight_api -f " \ "health=healthy -q" if installer_type in ["fuel"]: controllers = [ node for node in openstack_nodes if "running" in node.run_cmd(fuel_cmd) ] elif installer_type in ["apex"]: controllers = [ node for node in openstack_nodes if node.run_cmd(health_cmd) if "Running" in node.run_cmd(apex_cmd) ] computes = [node for node in openstack_nodes if node.is_compute()] msg = ("Verify that OpenDaylight can start/communicate with zrpcd/Quagga") results.record_action(msg) results.add_to_summary(0, "-") if not controllers: msg = ("Controller (ODL) list is empty. Skipping rest of tests.") logger.info(msg) results.add_failure(msg) return results.compile_summary() else: msg = ("Controller (ODL) list is ready") logger.info(msg) results.add_success(msg) logger.info("Checking if zrpcd is " "running on the controller nodes") for controller in controllers: output_zrpcd = controller.run_cmd("ps --no-headers -C " "zrpcd -o state") states = output_zrpcd.split() running = any([s != 'Z' for s in states]) msg = ("zrpcd is running in {name}".format(name=controller.name)) if not running: logger.info( "zrpcd is not running on the controller node {name}".format( name=controller.name)) results.add_failure(msg) else: logger.info( "zrpcd is running on the controller node {name}".format( name=controller.name)) results.add_success(msg) results.add_to_summary(0, "-") # Find the BGP entity owner in ODL because of this bug: # https://jira.opendaylight.org/browse/NETVIRT-1308 msg = ("Found BGP entity owner") controller = test_utils.get_odl_bgp_entity_owner(controllers) if controller is None: logger.error("Failed to find the BGP entity owner") results.add_failure(msg) else: logger.info('BGP entity owner is {name}'.format(name=controller.name)) results.add_success(msg) results.add_to_summary(0, "-") get_ext_ip_cmd = "sudo ip a | grep br-ex | grep inet | awk '{print $2}'" ext_net_cidr = controller.run_cmd(get_ext_ip_cmd).strip().split('\n') ext_net_mask = ext_net_cidr[0].split('/')[1] controller_ext_ip = ext_net_cidr[0].split('/')[0] logger.info("Starting bgp speaker of controller at IP %s " % controller_ext_ip) # Ensure that ZRPCD ip & port are well configured within ODL add_client_conn_to_bgp = "bgp-connect -p 7644 -h 127.0.0.1 add" test_utils.run_odl_cmd(controller, add_client_conn_to_bgp) # Start bgp daemon start_quagga = "odl:configure-bgp -op start-bgp-server " \ "--as-num 100 --router-id {0}".format(controller_ext_ip) test_utils.run_odl_cmd(controller, start_quagga) # we need to wait a bit until the bgpd is up time.sleep(5) logger.info("Checking if bgpd is running" " on the controller node") # Check if there is a non-zombie bgpd process output_bgpd = controller.run_cmd("ps --no-headers -C " "bgpd -o state") states = output_bgpd.split() running = any([s != 'Z' for s in states]) msg = ("bgpd is running") if not running: logger.info("bgpd is not running on the controller node") results.add_failure(msg) else: logger.info("bgpd is running on the controller node") results.add_success(msg) results.add_to_summary(0, "-") # We should be able to restart the speaker # but the test is disabled because of buggy upstream # https://github.com/6WIND/zrpcd/issues/15 # stop_quagga = 'odl:configure-bgp -op stop-bgp-server' # test_utils.run_odl_cmd(controller, stop_quagga) # logger.info("Checking if bgpd is still running" # " on the controller node") # output_bgpd = controller.run_cmd("ps --no-headers -C " \ # "bgpd -o state") # states = output_bgpd.split() # running = any([s != 'Z' for s in states]) # msg = ("bgpd is stopped") # if not running: # logger.info("bgpd is not running on the controller node") # results.add_success(msg) # else: # logger.info("bgpd is still running on the controller node") # results.add_failure(msg) # Taken from the sfc tests if not os.path.isfile(COMMON_CONFIG.ubuntu_image_path): logger.info("Downloading image") image_dest_path = '/'.join( COMMON_CONFIG.ubuntu_image_path.split('/')[:-1]) os_utils.download_url( "http://artifacts.opnfv.org/sdnvpn/" "ubuntu-16.04-server-cloudimg-amd64-disk1.img", image_dest_path) else: logger.info("Using old image") conn = os_utils.get_os_connection() neutron_client = os_utils.get_neutron_client() (floatingip_ids, instance_ids, router_ids, network_ids, image_ids, subnet_ids, interfaces, bgpvpn_ids, flavor_ids) = ([] for i in range(9)) quagga_vm = None fake_fip = None try: _, flavor_id = test_utils.create_custom_flavor() flavor_ids.append(flavor_id) sg_id = os_utils.create_security_group_full( neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) test_utils.open_icmp(neutron_client, sg_id) test_utils.open_http_port(neutron_client, sg_id) test_utils.open_bgp_port(neutron_client, sg_id) image_id = os_utils.create_glance_image( conn, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public='public') image_ids.append(image_id) net_1_id, subnet_1_id, router_1_id = test_utils.create_network( neutron_client, TESTCASE_CONFIG.net_1_name, TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr, TESTCASE_CONFIG.router_1_name) quagga_net_id, subnet_quagga_id, \ router_quagga_id = test_utils.create_network( neutron_client, TESTCASE_CONFIG.quagga_net_name, TESTCASE_CONFIG.quagga_subnet_name, TESTCASE_CONFIG.quagga_subnet_cidr, TESTCASE_CONFIG.quagga_router_name) interfaces.append(tuple((router_1_id, subnet_1_id))) interfaces.append(tuple((router_quagga_id, subnet_quagga_id))) network_ids.extend([net_1_id, quagga_net_id]) router_ids.extend([router_1_id, router_quagga_id]) subnet_ids.extend([subnet_1_id, subnet_quagga_id]) installer_type = str(os.environ['INSTALLER_TYPE'].lower()) if installer_type == "fuel": disk = 'raw' elif installer_type == "apex": disk = 'qcow2' else: logger.error("Incompatible installer type") ubuntu_image_id = os_utils.create_glance_image( conn, COMMON_CONFIG.ubuntu_image_name, COMMON_CONFIG.ubuntu_image_path, disk, container="bare", public="public") image_ids.append(ubuntu_image_id) # NOTE(rski) The order of this seems a bit weird but # there is a reason for this, namely # https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-99 # so we create the quagga instance using cloud-init # and immediately give it a floating IP. # The cloud-init script should contain a small sleep for # this to work. # We also create the FIP first because it is used in the # cloud-init script. # fake_fip is needed to bypass NAT # see below for the reason why. fake_fip = os_utils.create_floating_ip(neutron_client) # pin quagga to some compute floatingip_ids.append(fake_fip['fip_id']) compute_node = conn.compute.hypervisors().next() compute_node = conn.compute.get_hypervisor(compute_node) quagga_compute_node = "nova:" + compute_node.name # Map the hypervisor used above to a compute handle # returned by releng's manager for comp in computes: if compute_node.host_ip in comp.run_cmd("sudo ip a"): compute = comp break quagga_bootstrap_script = quagga.gen_quagga_setup_script( controller_ext_ip, fake_fip['fip_addr'], ext_net_mask, TESTCASE_CONFIG.external_network_ip_prefix, TESTCASE_CONFIG.route_distinguishers, TESTCASE_CONFIG.import_targets, TESTCASE_CONFIG.export_targets) quagga_vm = test_utils.create_instance( conn, TESTCASE_CONFIG.quagga_instance_name, ubuntu_image_id, quagga_net_id, sg_id, fixed_ip=TESTCASE_CONFIG.quagga_instance_ip, flavor=COMMON_CONFIG.custom_flavor_name, userdata=quagga_bootstrap_script, compute_node=quagga_compute_node) instance_ids.append(quagga_vm.id) quagga_vm_port = test_utils.get_port(neutron_client, quagga_vm.id) fip_added = os_utils.attach_floating_ip(neutron_client, quagga_vm_port['id']) msg = ("Assign a Floating IP to %s " % TESTCASE_CONFIG.quagga_instance_name) if fip_added: results.add_success(msg) floatingip_ids.append(fip_added['floatingip']['id']) else: results.add_failure(msg) test_utils.attach_instance_to_ext_br(quagga_vm, compute) testcase = "Bootstrap quagga inside an OpenStack instance" cloud_init_success = test_utils.wait_for_cloud_init(conn, quagga_vm) if cloud_init_success: results.add_success(testcase) else: results.add_failure(testcase) results.add_to_summary(0, "=") results.add_to_summary(0, '-') results.add_to_summary(1, "Peer Quagga with OpenDaylight") results.add_to_summary(0, '-') neighbor = quagga.odl_add_neighbor(fake_fip['fip_addr'], controller_ext_ip, controller) peer = quagga.check_for_peering(controller) if neighbor and peer: results.add_success("Peering with quagga") else: results.add_failure("Peering with quagga") test_utils.add_quagga_external_gre_end_point(controllers, fake_fip['fip_addr']) test_utils.wait_before_subtest() msg = ("Create VPN to define a VRF") results.record_action(msg) vpn_name = vpn_name = "sdnvpn-3" kwargs = { "import_targets": TESTCASE_CONFIG.import_targets, "export_targets": TESTCASE_CONFIG.export_targets, "route_targets": TESTCASE_CONFIG.route_targets, "route_distinguishers": TESTCASE_CONFIG.route_distinguishers, "name": vpn_name } bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs) bgpvpn_id = bgpvpn['bgpvpn']['id'] logger.debug("VPN1 created details: %s" % bgpvpn) bgpvpn_ids.append(bgpvpn_id) msg = ("Associate network '%s' to the VPN." % TESTCASE_CONFIG.net_1_name) results.record_action(msg) results.add_to_summary(0, "-") # create a vm and connect it with network1, # which is going to be bgpvpn associated userdata_common = test_utils.generate_ping_userdata( [TESTCASE_CONFIG.external_network_ip]) compute_node = conn.compute.hypervisors().next() av_zone_1 = "nova:" + compute_node.name vm_bgpvpn = test_utils.create_instance( conn, TESTCASE_CONFIG.instance_1_name, image_id, net_1_id, sg_id, fixed_ip=TESTCASE_CONFIG.instance_1_ip, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=userdata_common) instance_ids.append(vm_bgpvpn.id) # wait for VM to get IP instance_up = test_utils.wait_for_instances_up(vm_bgpvpn) if not instance_up: logger.error("One or more instances are down") test_utils.create_network_association(neutron_client, bgpvpn_id, net_1_id) test_utils.wait_before_subtest() msg = ("External IP prefix %s is exchanged with ODL" % TESTCASE_CONFIG.external_network_ip_prefix) fib_added = test_utils.is_fib_entry_present_on_odl( controllers, TESTCASE_CONFIG.external_network_ip_prefix, TESTCASE_CONFIG.route_distinguishers) if fib_added: results.add_success(msg) else: results.add_failure(msg) # TODO: uncomment the following once OVS is installed with > 2.8.3 and # underlay connectivity is established between vxlan overlay and # external network. # results.get_ping_status_target_ip( # vm_bgpvpn, # TESTCASE_CONFIG.external_network_name, # TESTCASE_CONFIG.external_network_ip, # expected="PASS", # timeout=300) results.add_to_summary(0, "=") logger.info("\n%s" % results.summary) except Exception as e: logger.error("exception occurred while executing testcase_3: %s", e) raise finally: if quagga_vm is not None: test_utils.detach_instance_from_ext_br(quagga_vm, compute) test_utils.cleanup_nova(conn, instance_ids, flavor_ids) test_utils.cleanup_glance(conn, image_ids) test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces, subnet_ids, router_ids, network_ids) if fake_fip is not None: bgp_nbr_disconnect_cmd = ("bgp-nbr -i %s -a 200 del" % fake_fip['fip_addr']) test_utils.run_odl_cmd(controller, bgp_nbr_disconnect_cmd) bgp_server_stop_cmd = ("bgp-rtr -r %s -a 100 del" % controller_ext_ip) odl_zrpc_disconnect_cmd = "bgp-connect -p 7644 -h 127.0.0.1 del" test_utils.run_odl_cmd(controller, bgp_server_stop_cmd) test_utils.run_odl_cmd(controller, odl_zrpc_disconnect_cmd) return results.compile_summary()
def main(): results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() openstack_nodes = test_utils.get_nodes() (floatingip_ids, instance_ids, router_ids, network_ids, image_ids, subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8)) try: image_id = os_utils.create_glance_image( glance_client, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public='public') image_ids.append(image_id) network_1_id = test_utils.create_net(neutron_client, TESTCASE_CONFIG.net_1_name) subnet_1_id = test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr, network_1_id) network_ids.append(network_1_id) subnet_ids.append(subnet_1_id) sg_id = os_utils.create_security_group_full( neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) # Check required number of compute nodes compute_hostname = ( nova_client.hypervisors.list()[0].hypervisor_hostname) compute_nodes = [node for node in openstack_nodes if node.is_compute()] av_zone_1 = "nova:" + compute_hostname # List of OVS bridges to get groups ovs_br = "br-int" # Get a list of groups, before start topology initial_ovs_groups = test_utils.get_ovs_groups(compute_nodes, [ovs_br]) # boot INSTANCES vm_2 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_2_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1) vm_1 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_1_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1) instance_ids.extend([vm_1.id, vm_2.id]) # Wait for VMs to get ips. instances_up = test_utils.wait_for_instances_up(vm_1, vm_2) if not instances_up: logger.error("One or more instances is down") # TODO: Handle this appropriately logger.info("Wait before subtest") test_utils.wait_before_subtest() # Get added OVS groups added_ovs_groups = (len(initial_ovs_groups) - len(test_utils.get_ovs_groups( compute_nodes, [ovs_br]))) # Check if group added successfully results.record_action("Check if a new group was added to OVS") msg = "New OVS group added" results.add_to_summary(0, "-") if added_ovs_groups != 0: results.add_success(msg) else: results.add_failure(msg) results.add_to_summary(0, "=") # Backup OVS controller connection info. # To support HA changes should be made here. get_ext_ip_cmd = "sudo ovs-vsctl get-controller {}".format(ovs_br) ovs_controller_conn = (compute_nodes[0].run_cmd(get_ext_ip_cmd). strip().split('\n')[0]) # Disconnect OVS from controller for compute_node in compute_nodes: compute_node.run_cmd("sudo ovs-vsctl del-controller {}". format(ovs_br)) except Exception as e: logger.error("exception occurred while executing testcase_1: %s", e) raise finally: # Cleanup topology test_utils.cleanup_nova(nova_client, instance_ids) test_utils.cleanup_glance(glance_client, image_ids) test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces, subnet_ids, router_ids, network_ids) # Connect again OVS to Controller for compute_node in compute_nodes: compute_node.run_cmd("sudo ovs-vsctl set-controller {} {}". format(ovs_br, ovs_controller_conn)) logger.info("Wait before subtest") test_utils.wait_before_subtest() # Get OVS groups added after the reconnection added_ovs_groups = (len(initial_ovs_groups) - len(test_utils.get_ovs_groups( compute_nodes, [ovs_br]))) # Check if group removed successfully results.record_action("Check if group was removed from OVS " "after deleting the topology.") msg = "" # After removing the topology, groups must be equal to the initial if added_ovs_groups != 0: msg += " Additional group was not deleted from OVS" results.add_to_summary(0, "-") if len(msg) == 0: msg = "Group was deleted from ovs" results.add_success(msg) else: results.add_failure(msg) return results.compile_summary()
def main(): results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() (floatingip_ids, instance_ids, router_ids, network_ids, image_ids, subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8)) try: image_id = os_utils.create_glance_image( glance_client, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public='public') image_ids.append(image_id) network_1_id, subnet_1_id, router_1_id = test_utils.create_network( neutron_client, TESTCASE_CONFIG.net_1_name, TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr, TESTCASE_CONFIG.router_1_name) network_2_id, subnet_2_id, router_1_id = test_utils.create_network( neutron_client, TESTCASE_CONFIG.net_2_name, TESTCASE_CONFIG.subnet_2_name, TESTCASE_CONFIG.subnet_2_cidr, TESTCASE_CONFIG.router_1_name) interfaces.append(tuple((router_1_id, subnet_1_id))) interfaces.append(tuple((router_1_id, subnet_2_id))) network_ids.extend([network_1_id, network_2_id]) router_ids.append(router_1_id) subnet_ids.extend([subnet_1_id, subnet_2_id]) sg_id = os_utils.create_security_group_full( neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) test_utils.open_icmp(neutron_client, sg_id) test_utils.open_http_port(neutron_client, sg_id) compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client) av_zone_1 = "nova:" + compute_nodes[0] # spawning the VMs on the same compute because fib flow (21) entries # are not created properly if vm1 and vm2 are attached to two # different computes vm_2 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_2_name, image_id, network_2_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1) vm_2_ip = test_utils.get_instance_ip(vm_2) u1 = test_utils.generate_ping_userdata([vm_2_ip]) vm_1 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_1_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u1) instance_ids.extend([vm_1.id, vm_2.id]) # TODO: uncomment the lines 107-134 once ODL fixes # the bug https://jira.opendaylight.org/browse/NETVIRT-932 # results.record_action("Create VPN with eRT==iRT") # vpn_name = "sdnvpn-8" # kwargs = { # "import_targets": TESTCASE_CONFIG.targets, # "export_targets": TESTCASE_CONFIG.targets, # "route_distinguishers": TESTCASE_CONFIG.route_distinguishers, # "name": vpn_name # } # bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs) # bgpvpn_id = bgpvpn['bgpvpn']['id'] # logger.debug("VPN created details: %s" % bgpvpn) # bgpvpn_ids.append(bgpvpn_id) # msg = ("Associate router '%s' and net '%s' to the VPN." # % (TESTCASE_CONFIG.router_1_name, # TESTCASE_CONFIG.net_2_name)) # results.record_action(msg) # results.add_to_summary(0, "-") # test_utils.create_router_association( # neutron_client, bgpvpn_id, router_1_id) # test_utils.create_network_association( # neutron_client, bgpvpn_id, network_2_id) # test_utils.wait_for_bgp_router_assoc( # neutron_client, bgpvpn_id, router_1_id) # test_utils.wait_for_bgp_net_assoc( # neutron_client, bgpvpn_id, network_2_id) # Wait for VMs to get ips. instances_up = test_utils.wait_for_instances_up(vm_2) instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1) if (not instances_up or not instances_dhcp_up): logger.error("One or more instances are down") # TODO: Handle this appropriately logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() results.get_ping_status(vm_1, vm_2, expected="PASS", timeout=200) results.add_to_summary(0, "=") msg = "Assign a Floating IP to %s" % vm_1.name results.record_action(msg) fip = os_utils.create_floating_ip(neutron_client) fip_added = os_utils.add_floating_ip(nova_client, vm_1.id, fip['fip_addr']) if fip_added: results.add_success(msg) else: results.add_failure(msg) results.add_to_summary(0, "=") results.record_action("Ping %s via Floating IP" % vm_1.name) results.add_to_summary(0, "-") results.ping_ip_test(fip['fip_addr']) floatingip_ids.append(fip['fip_id']) except Exception as e: logger.error("exception occurred while executing testcase_8: %s", e) raise finally: test_utils.cleanup_nova(nova_client, instance_ids) test_utils.cleanup_glance(glance_client, image_ids) test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces, subnet_ids, router_ids, network_ids) return results.compile_summary()
def main(): results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() (floatingip_ids, instance_ids, router_ids, network_ids, image_ids, subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8)) try: image_id = os_utils.create_glance_image( glance_client, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public='public') image_ids.append(image_id) network_1_id, subnet_1_id, router_1_id = test_utils.create_network( neutron_client, TESTCASE_CONFIG.net_1_name, TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr, TESTCASE_CONFIG.router_1_name) network_2_id, subnet_2_id, router_2_id = test_utils.create_network( neutron_client, TESTCASE_CONFIG.net_2_name, TESTCASE_CONFIG.subnet_2_name, TESTCASE_CONFIG.subnet_2_cidr, TESTCASE_CONFIG.router_2_name) interfaces.append(tuple((router_1_id, subnet_1_id))) interfaces.append(tuple((router_2_id, subnet_2_id))) network_ids.extend([network_1_id, network_2_id]) router_ids.extend([router_1_id, router_2_id]) subnet_ids.extend([subnet_1_id, subnet_2_id]) sg_id = os_utils.create_security_group_full( neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) test_utils.open_icmp(neutron_client, sg_id) test_utils.open_http_port(neutron_client, sg_id) vm_2 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_2_name, image_id, network_2_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name) vm_2_ip = test_utils.get_instance_ip(vm_2) u1 = test_utils.generate_ping_userdata([vm_2_ip]) vm_1 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_1_name, image_id, network_1_id, sg_id, secgroup_name=TESTCASE_CONFIG.secgroup_name, userdata=u1) instance_ids.extend([vm_1.id, vm_2.id]) msg = ("Create VPN with eRT==iRT") results.record_action(msg) vpn_name = "sdnvpn-7" kwargs = { "import_targets": TESTCASE_CONFIG.targets, "export_targets": TESTCASE_CONFIG.targets, "route_distinguishers": TESTCASE_CONFIG.route_distinguishers, "name": vpn_name } bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs) bgpvpn_id = bgpvpn['bgpvpn']['id'] logger.debug("VPN created details: %s" % bgpvpn) bgpvpn_ids.append(bgpvpn_id) msg = ("Associate networks '%s', '%s' to the VPN." % (TESTCASE_CONFIG.net_1_name, TESTCASE_CONFIG.net_2_name)) results.record_action(msg) results.add_to_summary(0, "-") test_utils.create_network_association(neutron_client, bgpvpn_id, network_1_id) test_utils.create_network_association(neutron_client, bgpvpn_id, network_2_id) test_utils.wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, network_1_id) test_utils.wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, network_2_id) # Wait for VMs to get ips. instances_up = test_utils.wait_for_instances_up(vm_2) instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1) if (not instances_up or not instances_dhcp_up): logger.error("One or more instances are down") # TODO: Handle this appropriately logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() results.get_ping_status(vm_1, vm_2, expected="PASS", timeout=200) results.add_to_summary(0, "=") msg = "Assign a Floating IP to %s and ping it" % vm_2.name results.record_action(msg) results.add_to_summary(0, '-') fip = os_utils.create_floating_ip(neutron_client) fip_added = os_utils.add_floating_ip(nova_client, vm_2.id, fip['fip_addr']) if fip_added: results.add_success(msg) else: results.add_failure(msg) results.ping_ip_test(fip['fip_addr']) floatingip_ids.append(fip['fip_id']) except Exception as e: logger.error("exception occurred while executing testcase_7: %s", e) raise finally: test_utils.cleanup_nova(nova_client, instance_ids) test_utils.cleanup_glance(glance_client, image_ids) test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces, subnet_ids, router_ids, network_ids) return results.compile_summary()
def main(): results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() (floatingip_ids, instance_ids, router_ids, network_ids, image_ids, subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8)) try: logger.debug("Using private key %s injected to the VMs." % COMMON_CONFIG.keyfile_path) keyfile = open(COMMON_CONFIG.keyfile_path, 'r') key = keyfile.read() keyfile.close() files = {"/home/cirros/id_rsa": key} image_id = os_utils.create_glance_image( glance_client, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public='public') image_ids.append(image_id) network_1_id = test_utils.create_net( neutron_client, TESTCASE_CONFIG.net_1_name) subnet_1a_id = test_utils.create_subnet( neutron_client, TESTCASE_CONFIG.subnet_1a_name, TESTCASE_CONFIG.subnet_1a_cidr, network_1_id) # TODO: uncomment the commented lines once ODL has # support for mulitple subnets under same neutron network # subnet_1b_id = test_utils.create_subnet( # neutron_client, # TESTCASE_CONFIG.subnet_1b_name, # TESTCASE_CONFIG.subnet_1b_cidr, # network_1_id) network_2_id = test_utils.create_net( neutron_client, TESTCASE_CONFIG.net_2_name) # subnet_2a_id = test_utils.create_subnet( # neutron_client, # TESTCASE_CONFIG.subnet_2a_name, # TESTCASE_CONFIG.subnet_2a_cidr, # network_2_id) subnet_2b_id = test_utils.create_subnet( neutron_client, TESTCASE_CONFIG.subnet_2b_name, TESTCASE_CONFIG.subnet_2b_cidr, network_2_id) network_ids.extend([network_1_id, network_2_id]) subnet_ids.extend([subnet_1a_id, # subnet_1b_id, # subnet_2a_id, subnet_2b_id]) sg_id = os_utils.create_security_group_full( neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client) av_zone_1 = "nova:" + compute_nodes[0] # av_zone_2 = "nova:" + compute_nodes[1] # boot INTANCES userdata_common = test_utils.generate_userdata_common() vm_2 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_2_name, image_id, network_1_id, sg_id, fixed_ip=TESTCASE_CONFIG.instance_2_ip, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=userdata_common) # vm_3 = test_utils.create_instance( # nova_client, # TESTCASE_CONFIG.instance_3_name, # image_id, # network_1_id, # sg_id, # fixed_ip=TESTCASE_CONFIG.instance_3_ip, # secgroup_name=TESTCASE_CONFIG.secgroup_name, # compute_node=av_zone_2, # userdata=userdata_common) # # vm_5 = test_utils.create_instance( # nova_client, # TESTCASE_CONFIG.instance_5_name, # image_id, # network_2_id, # sg_id, # fixed_ip=TESTCASE_CONFIG.instance_5_ip, # secgroup_name=TESTCASE_CONFIG.secgroup_name, # compute_node=av_zone_2, # userdata=userdata_common) # We boot vm5 first because we need vm5_ip for vm4 userdata u4 = test_utils.generate_userdata_with_ssh( [TESTCASE_CONFIG.instance_1_ip # TESTCASE_CONFIG.instance_3_ip, # TESTCASE_CONFIG.instance_5_ip ]) vm_4 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_4_name, image_id, network_2_id, sg_id, fixed_ip=TESTCASE_CONFIG.instance_4_ip, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u4, files=files) # We boot VM1 at the end because we need to get the IPs first # to generate the userdata u1 = test_utils.generate_userdata_with_ssh( [TESTCASE_CONFIG.instance_2_ip, # TESTCASE_CONFIG.instance_3_ip, TESTCASE_CONFIG.instance_4_ip, # TESTCASE_CONFIG.instance_5_ip ]) vm_1 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_1_name, image_id, network_1_id, sg_id, fixed_ip=TESTCASE_CONFIG.instance_1_ip, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u1, files=files) instance_ids.extend([vm_1.id, vm_2.id, # vm_3.id, vm_4.id, # vm_5.id ]) msg = ("Create VPN1 with eRT=iRT") results.record_action(msg) vpn1_name = "sdnvpn-1-" + str(randint(100000, 999999)) kwargs = { "import_targets": TESTCASE_CONFIG.targets2, "export_targets": TESTCASE_CONFIG.targets2, "route_targets": TESTCASE_CONFIG.targets2, "route_distinguishers": TESTCASE_CONFIG.route_distinguishers1, "name": vpn1_name } bgpvpn1 = test_utils.create_bgpvpn(neutron_client, **kwargs) bgpvpn1_id = bgpvpn1['bgpvpn']['id'] logger.debug("VPN1 created details: %s" % bgpvpn1) bgpvpn_ids.append(bgpvpn1_id) msg = ("Associate network '%s' to the VPN." % TESTCASE_CONFIG.net_1_name) results.record_action(msg) results.add_to_summary(0, "-") test_utils.create_network_association( neutron_client, bgpvpn1_id, network_1_id) # Wait for VMs to get ips. instances_up = test_utils.wait_for_instances_up(vm_2) instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1, vm_4) if (not instances_up or not instances_dhcp_up): logger.error("One or more instances are down") # TODO: Handle this appropriately logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() # 10.10.10.12 should return sdnvpn-2 to sdnvpn-1 results.check_ssh_output(vm_1, vm_2, expected=TESTCASE_CONFIG.instance_2_name, timeout=200) # 10.10.11.13 should return sdnvpn-3 to sdnvpn-1 # results.check_ssh_output(vm_1, vm_3, # expected=TESTCASE_CONFIG.instance_3_name, # timeout=30) results.add_to_summary(0, "-") msg = ("Create VPN2 with eRT=iRT") results.record_action(msg) vpn2_name = "sdnvpn-2-" + str(randint(100000, 999999)) kwargs = { "import_targets": TESTCASE_CONFIG.targets1, "export_targets": TESTCASE_CONFIG.targets1, "route_targets": TESTCASE_CONFIG.targets1, "route_distinguishers": TESTCASE_CONFIG.route_distinguishers2, "name": vpn2_name } bgpvpn2 = test_utils.create_bgpvpn(neutron_client, **kwargs) bgpvpn2_id = bgpvpn2['bgpvpn']['id'] logger.debug("VPN created details: %s" % bgpvpn2) bgpvpn_ids.append(bgpvpn2_id) msg = ("Associate network '%s' to the VPN2." % TESTCASE_CONFIG.net_2_name) results.record_action(msg) results.add_to_summary(0, "-") test_utils.create_network_association( neutron_client, bgpvpn2_id, network_2_id) test_utils.wait_for_bgp_net_assoc(neutron_client, bgpvpn1_id, network_1_id) test_utils.wait_for_bgp_net_assoc(neutron_client, bgpvpn2_id, network_2_id) logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() # 10.10.11.13 should return sdnvpn-5 to sdnvpn-4 # results.check_ssh_output(vm_4, vm_5, # expected=TESTCASE_CONFIG.instance_5_name, # timeout=30) # 10.10.10.11 should return "not reachable" to sdnvpn-4 results.check_ssh_output(vm_4, vm_1, expected="not reachable", timeout=30) except Exception as e: logger.error("exception occurred while executing testcase_2: %s", e) raise finally: test_utils.cleanup_nova(nova_client, instance_ids) test_utils.cleanup_glance(glance_client, image_ids) test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces, subnet_ids, router_ids, network_ids) return results.compile_summary()
def main(): results = Results(COMMON_CONFIG.line_length) results.add_to_summary(0, "=") results.add_to_summary(2, "STATUS", "SUBTEST") results.add_to_summary(0, "=") nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() glance_client = os_utils.get_glance_client() (floatingip_ids, instance_ids, router_ids, network_ids, image_ids, subnet_ids, interfaces, bgpvpn_ids, flavor_ids) = ([] for i in range(9)) try: image_id = os_utils.create_glance_image( glance_client, COMMON_CONFIG.ubuntu_image_name, COMMON_CONFIG.ubuntu_image_path, disk="qcow2", container="bare", public="public") image_ids.append(image_id) _, flavor_id = test_utils.create_custom_flavor() flavor_ids.append(flavor_id) network_1_id, subnet_1_id, router_1_id = test_utils.create_network( neutron_client, TESTCASE_CONFIG.net_1_name, TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr, TESTCASE_CONFIG.router_1_name) interfaces.append(tuple((router_1_id, subnet_1_id))) network_ids.extend([network_1_id]) subnet_ids.extend([subnet_1_id]) router_ids.extend([router_1_id]) sg_id = os_utils.create_security_group_full( neutron_client, TESTCASE_CONFIG.secgroup_name, TESTCASE_CONFIG.secgroup_descr) compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client) av_zone_1 = "nova:" + compute_nodes[0] av_zone_2 = "nova:" + compute_nodes[1] u1 = test_utils.generate_userdata_interface_create( TESTCASE_CONFIG.interface_name, TESTCASE_CONFIG.interface_number, TESTCASE_CONFIG.extra_route_ip, TESTCASE_CONFIG.extra_route_subnet_mask) # boot INTANCES vm_1 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_1_name, image_id, network_1_id, sg_id, flavor=COMMON_CONFIG.custom_flavor_name, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u1) vm_1_ip = test_utils.get_instance_ip(vm_1) vm1_port = test_utils.get_port(neutron_client, vm_1.id) test_utils.update_port_allowed_address_pairs( neutron_client, vm1_port['id'], [ test_utils.AllowedAddressPair(TESTCASE_CONFIG.extra_route_cidr, vm1_port['mac_address']) ]) vm_2 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_2_name, image_id, network_1_id, sg_id, flavor=COMMON_CONFIG.custom_flavor_name, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_1, userdata=u1) vm_2_ip = test_utils.get_instance_ip(vm_2) vm2_port = test_utils.get_port(neutron_client, vm_2.id) test_utils.update_port_allowed_address_pairs( neutron_client, vm2_port['id'], [ test_utils.AllowedAddressPair(TESTCASE_CONFIG.extra_route_cidr, vm2_port['mac_address']) ]) test_utils.async_Wait_for_instances([vm_1, vm_2]) image_2_id = os_utils.create_glance_image( glance_client, TESTCASE_CONFIG.image_name, COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format, container="bare", public='public') image_ids.append(image_2_id) # Moved vm_3 creation before associating its network/router with # bgpvpn. If VM is created after its network is associated to bgpvpn # via router, then BGPVPN in ODL uses router's vrf id for newly created # VMs which causes testcase to fail. u3 = test_utils.generate_ping_userdata( [TESTCASE_CONFIG.extra_route_ip]) vm_3 = test_utils.create_instance( nova_client, TESTCASE_CONFIG.instance_3_name, image_2_id, network_1_id, sg_id, flavor=COMMON_CONFIG.custom_flavor_name, secgroup_name=TESTCASE_CONFIG.secgroup_name, compute_node=av_zone_2, userdata=u3) instance_ids.extend([vm_1.id, vm_2.id, vm_3.id]) instance_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_3) if (not instance_dhcp_up): logger.error("vm_3 instance is down") msg = ("Create VPN with multiple RDs") results.record_action(msg) vpn_name = "sdnvpn-" + str(randint(100000, 999999)) kwargs = { "import_targets": TESTCASE_CONFIG.targets1, "export_targets": TESTCASE_CONFIG.targets2, "route_distinguishers": TESTCASE_CONFIG.route_distinguishers, "name": vpn_name } bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs) bgpvpn_id = bgpvpn['bgpvpn']['id'] logger.debug("VPN created details: %s" % bgpvpn) bgpvpn_ids.append(bgpvpn_id) msg = ("Associate router '%s' to the VPN." % TESTCASE_CONFIG.router_1_name) results.record_action(msg) results.add_to_summary(0, "-") test_utils.create_router_association(neutron_client, bgpvpn_id, router_1_id) test_utils.update_router_extra_route(neutron_client, router_1_id, [ test_utils.ExtraRoute(TESTCASE_CONFIG.extra_route_cidr, vm_1_ip), test_utils.ExtraRoute(TESTCASE_CONFIG.extra_route_cidr, vm_2_ip) ]) logger.info("Waiting for the VMs to connect to each other using the" " updated network configuration") test_utils.wait_before_subtest() results.get_ping_status_target_ip(vm_3, TESTCASE_CONFIG.extra_route_name, TESTCASE_CONFIG.extra_route_ip, expected="PASS", timeout=300) results.add_to_summary(0, "=") logger.info("\n%s" % results.summary) except Exception as e: logger.error("exception occurred while executing testcase_13: %s", e) raise finally: test_utils.update_router_no_extra_route(neutron_client, router_ids) test_utils.cleanup_nova(nova_client, instance_ids, flavor_ids) test_utils.cleanup_glance(glance_client, image_ids) test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces, subnet_ids, router_ids, network_ids) return results.compile_summary()