def test_live_migrate_while_launch_delete_vms(check_hypervisors, flavors): """ Launch/delete vms while live migrating another vm Args: flavors: Test Steps: - Thread_1: Launch a 2vcpu vm with dedicated policy and keep live migrating it - Thread_2: Launch a 2vcpu-dedicated-vm on numa0, and another on numa1; stop and Delete them. Repeat. """ end_event = Event() run_time = 10 * 3600 # run_time = 300 start_time = time.time() end_time = start_time + run_time thread_timeout = run_time + 300 thread_1 = MThread(live_migrate_vm, end_time, end_event=end_event) thread_2 = MThread(launch_delete_vms, flavors, end_time, end_event=end_event) LOG.info("Starting threads") thread_1.start_thread(thread_timeout) thread_2.start_thread(thread_timeout) is_ended_1, error_1 = thread_1.wait_for_thread_end(fail_ok=True) is_ended_2, error_2 = thread_2.wait_for_thread_end(fail_ok=True) assert is_ended_1 and not error_1, "Threaded ended: {}. Error: {}".format(is_ended_1, error_1) assert is_ended_2 and not error_2, "Threaded ended: {}. Error: {}".format(is_ended_2, error_2)
def ping_all_vms_from_nat_box(): """ :return: """ natbox_client = NATBoxClient.get_natbox_client() vms = get_all_vms() ips_list = network_helper.get_mgmt_ips_for_vms(vms=vms) timeout = 1000 vm_threads = [] for vm in ips_list: new_thread = MThread(network_helper.ping_server(vm, natbox_client)) new_thread.start_thread(timeout=timeout + 30) vm_threads.append(new_thread) time.sleep(5) for vm_thr in vm_threads: vm_thr.wait_for_thread_end() # param = ','.join(map(str, ips_list)) # cmd1 = "cd /home/cgcs/bin" # cmd2 = "python monitor.py --addresses " + param # code1, output1 = natbox_client.send(cmd=cmd1) # code, output = natbox_client.send(cmd=cmd2) # output = natbox_client.cmd_output # pattern = str(len(ips_list))+ "/" + str(len(ips_list)) # pattern_to_look = re.compile(pattern=pattern) # if not pattern_to_look.findall(output): # return False return True
def test_launch_delete_vms(flavors): """ Launch two 2vcpu-vms on each numa node, wait for pingable, then stop and delete. Repeat this action. Args: flavors: flavors with numa0 and numa1 specified Test Steps: (Each vm launch/delete is using a separate thread) - Launch two 2vcpu-vms on each numa node - Wait for vms pingable - Stop and delete vms - Repeat above operations """ end_event = Event() run_time = 10 * 3600 # run_time = 300 start_time = time.time() end_time = start_time + run_time thread_timeout = run_time + 300 threads = [] for name, flv_id in flavors.items(): for i in range(2): thread = MThread(launch_delete_vm, [name, flv_id], end_time, end_event=end_event) thread.start_thread(thread_timeout) threads.append(thread) for thr in threads: thr.wait_for_thread_end()
def test_timing(): threads = [] flav_id = nova_helper.create_flavor('thread_testing')[1] ResourceCleanup.add(resource_type='flavor', resource_id=flav_id) start_1 = time() for i in range(0, 6): thread = MThread(vm_helper.boot_vm, 'threading_vm', flavor=flav_id) thread.start_thread(240) threads.append(thread) for thread in threads: thread.wait_for_thread_end() for thread in threads: ResourceCleanup.add(resource_type='vm', resource_id=thread.get_output()[1]) end_1 = time() start_2 = time() for i in range(0, 2): vm_id = vm_helper.boot_vm('loop_vm', flav_id)[1] ResourceCleanup.add(resource_type='vm', resource_id=vm_id) end_2 = time() LOG.info("Time results:\n" "Multithreading: {}\n" "Single loop: {}".format(end_1 - start_1, end_2 - start_2))
def check_port_forwarding_protocol(ext_gateway_ip, nat_ssh, vm_pfs, vm_ssh_pfs, protocol): vm_threads = [] end_event = Events("Hello msg sent to ports") start_events = [] received_events = [] try: LOG.tc_step("Start listening on vms {} ports .... ".format(protocol)) for vm_id_, v in vm_pfs.items(): greeting = "Hello {}".format(v['public_port']) ssh_public_port = vm_ssh_pfs[vm_id_]['public_port'] start_event = Events("VM {} started listening".format(vm_id_)) start_events.append(start_event) received_event = Events( "Greeting received on vm {}".format(vm_id_)) received_events.append(received_event) thread_vm = MThread(check_ssh_to_vm_and_wait_for_packets, start_event, end_event, received_event, vm_id_, ext_gateway_ip, ssh_public_port, greeting, protocol) thread_vm.start_thread() vm_threads.append(thread_vm) for event_ in start_events: event_.wait_for_event(timeout=180, fail_ok=False) diff_protocol = 'udp' if protocol == 'tcp' else 'tcp' LOG.tc_step( "Send Hello msg to vms from NATBox via {} ports, and check they are not received via {} ports" .format(diff_protocol, protocol)) for vm_id_, v in vm_pfs.items(): greeting = "Hello {}".format(v['public_port']) send_packets_to_vm_from_nat_box(nat_ssh, ext_gateway_ip, v['public_port'], greeting, diff_protocol) time.sleep(10) for event in received_events: assert not event.is_set(), "Event {} is set".format(event) LOG.tc_step( "Send Hello msg to vms from NATBox via {} ports, and check they are received" .format(protocol, protocol)) for vm_id_, v in vm_pfs.items(): greeting = "Hello {}".format(v['public_port']) send_packets_to_vm_from_nat_box(nat_ssh, ext_gateway_ip, v['public_port'], greeting, protocol) time.sleep(10) for event in received_events: assert event.wait_for_event( timeout=40, fail_ok=False), "Event {} is not set".format(event) finally: end_event.set() for thread in vm_threads: thread.wait_for_thread_end(timeout=40, fail_ok=True)
def check_server_group_messaging_enabled(vms, action): vms = list(set(vms)) vm_sender = random.choice(vms) vms.remove(vm_sender) if action == 'message': msg = MSG timeout = 180 elif action == 'pause': msg = '{}.*paused'.format(vm_sender) timeout = 240 else: raise ValueError("Unknown action - '{}' provided".format(action)) res_events = [] sent_event = Events("srv msg/event triggered") listener_event = Events("VM started listening to server group messages") vm_threads = [] sender_thread = None try: for vm in vms: listener_event.clear() new_thread = MThread(_wait_for_srv_grp_msg, vm, msg, timeout=timeout, res_events=res_events, listener_event=listener_event, sent_event=sent_event) new_thread.start_thread(timeout=timeout + 30) vm_threads.append(new_thread) listener_event.wait_for_event() time.sleep(5) # this 60 seconds timeout is hardcoded for action == 'message' scenario to send # the message out sender_thread = MThread(trigger_srv_grp_msg, vm_sender, action, timeout=60, sent_event=sent_event, rcv_event=res_events) sender_thread.start_thread(timeout=timeout) sent_event.wait_for_event() for res_event in res_events: res_event.wait_for_event() finally: # wait for server group msg to be received for vm_thr in vm_threads: vm_thr.wait_for_thread_end(timeout=30) if sender_thread: sender_thread.wait_for_thread_end(timeout=30) if action == 'pause': vm_helper.unpause_vm(vm_sender)
def sys_lock_unlock_hosts(number_of_hosts_to_lock): """ This is to test the evacuation of vms due to compute lock/unlock :return: """ # identify a host with atleast 5 vms vms_by_compute_dic = vm_helper.get_vms_per_host() compute_to_lock = [] vms_to_check = [] hosts_threads = [] timeout = 1000 for k, v in vms_by_compute_dic.items(): if len(v) >= 5: compute_to_lock.append(k) vms_to_check.append(v) if compute_to_lock is None: skip("There are no compute with 5 or moer vms") if len(compute_to_lock) > number_of_hosts_to_lock: compute_to_lock = compute_to_lock[0:number_of_hosts_to_lock] vms_to_check = vms_to_check[0:number_of_hosts_to_lock] else: LOG.warning( "There are only {} computes available with more than 5 vms ". format(len(compute_to_lock))) for host in compute_to_lock: new_thread = MThread(host_helper.lock_host, host) new_thread.start_thread(timeout=timeout + 30) hosts_threads.append(new_thread) for host_thr in hosts_threads: host_thr.wait_for_thread_end() LOG.tc_step("Verify lock succeeded and vms still in good state") for vm_list in vms_to_check: vm_helper.wait_for_vms_values(vms=vm_list, fail_ok=False) for host, vms in zip(compute_to_lock, vms_to_check): for vm in vms: vm_host = vm_helper.get_vm_host(vm_id=vm) assert vm_host != host, "VM is still on {} after lock".format(host) vm_helper.wait_for_vm_pingable_from_natbox( vm_id=vm, timeout=VMTimeout.DHCP_RETRY) hosts_threads = [] for host in compute_to_lock: new_thread = MThread(host_helper.unlock_host, host) new_thread.start_thread(timeout=timeout + 30) hosts_threads.append(new_thread) for host_thr in hosts_threads: host_thr.wait_for_thread_end()
def test_multi_thread(): LOG.tc_step("Create MThreads") thread_1 = MThread(func, 1, 10, extra_arg="Hello") thread_2 = MThread(func, 2, 6, extra_arg="Second") # runs after test steps complete thread_3 = MThread(func, 3, 20, extra_arg="run for a long time") thread_4 = MThread(nova_helper.create_flavor, 'threading', 'auto', vcpus=2, ram=1024) LOG.tc_step("Starting threads") thread_1.start_thread() thread_2.start_thread() thread_3.start_thread() thread_4.start_thread() LOG.tc_step("Finished starting threads") LOG.tc_step("Waiting for threads to finish") thread_1.wait_for_thread_end() thread_2.wait_for_thread_end() thread_4.wait_for_thread_end() LOG.tc_step("Threads have finished") id_ = thread_4.get_output()[1] LOG.info("flav_id = {}".format(id_)) ResourceCleanup.add(resource_type='flavor', resource_id=id_)
def test_events(): e = Events("functions should wait here") LOG.tc_step("Create multiple threads") thread_1 = MThread(events_func, 1, 10, e) thread_2 = MThread(events_func, 2, 15, e) thread_1.start_thread(60) thread_2.start_thread(60) sleep(20) LOG.tc_step("Setting event") e.set() thread_1.wait_for_thread_end() thread_2.wait_for_thread_end() LOG.tc_step("Threads have finished") e.clear() e.wait_for_event(20, fail_ok=True)
def check_port_forwarding_ports(ext_gateway_ip, nat_ssh, vm_id, ssh_port, old_port, new_port, protocol): end_event = Events("Hello msg sent to ports") start_event = Events("VM {} started listening".format(vm_id)) received_event = Events("Greeting received on vm {}".format(vm_id)) LOG.tc_step("Starting VM ssh session threads .... ") new_greeting = "Hello {}".format(new_port) vm_thread = MThread(check_ssh_to_vm_and_wait_for_packets, start_event, end_event, received_event, vm_id, ext_gateway_ip, ssh_port, new_greeting, protocol) vm_thread.start_thread() try: start_event.wait_for_event(timeout=180, fail_ok=False) LOG.tc_step( "Send Hello msg to vm from NATBox via old {} port {}, and check it's not received" .format(protocol, old_port)) greeting = "Hello {}".format(old_port) send_packets_to_vm_from_nat_box(nat_ssh, ext_gateway_ip, old_port, greeting, protocol) time.sleep(10) assert not received_event.is_set(), "Event {} is set".format( received_event) LOG.tc_step( "Check greeting is received on vm via new {} port {}".format( protocol, new_port)) send_packets_to_vm_from_nat_box(nat_ssh, ext_gateway_ip, new_port, new_greeting, protocol) assert received_event.wait_for_event( timeout=30), "Event {} is not set".format(received_event) finally: end_event.set() vm_thread.wait_for_thread_end(timeout=40, fail_ok=False)
def test_lock(): LOG.tc_step("Positive lock example") lock = TiSLock(True) thread_1 = MThread(get_lock, lock, 1) thread_2 = MThread(get_lock, lock, 2) thread_1.start_thread(30) sleep(1) thread_2.start_thread(30) thread_1.wait_for_thread_end(0) thread_2.wait_for_thread_end(30) LOG.tc_step("Negative lock example") lock = TiSLock(True, 2) thread_1 = MThread(get_lock, lock, 1) thread_2 = MThread(get_lock, lock, 2) thread_1.start_thread(30) sleep(1) thread_2.start_thread(30) thread_1.wait_for_thread_end(0, fail_ok=True) thread_2.wait_for_thread_end(30, fail_ok=True)
def test_barriers(): LOG.tc_step("Negative barrier example (not enough threads waiting)") barrier = TiSBarrier(2, timeout=20) thread_1 = MThread(barr_func, 1, 4, barrier) thread_1.start_thread(timeout=30) thread_1.wait_for_thread_end(fail_ok=True) LOG.tc_step("Positive barrier example") barrier = TiSBarrier(2, timeout=20) thread_1 = MThread(barr_func, 2, 4, barrier) thread_2 = MThread(barr_func, 3, 4, barrier) thread_1.start_thread(timeout=30) thread_2.start_thread(timeout=30) thread_1.wait_for_thread_end() thread_2.wait_for_thread_end()
def _verify_port_from_natbox(con_ssh, port, port_expected_open): """ :param con_ssh: Controller ssh client :param port: (number) Port to test :param port_expected_open: (boolean) """ if ProjVar.get_var('IS_DC'): subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD') lab_ip = ProjVar.get_var('LAB')[subcloud]['floating ip'] else: lab_ip = ProjVar.get_var('LAB')['floating ip'] cli.system('show', source_openrc=True, force_source=True) LOG.info("Check if port {} is listed in iptables".format(port)) cmd = 'iptables -nvL | grep --color=never -w {}'.format(port) end_time = time.time() + 90 while time.time() < end_time: output = con_ssh.exec_sudo_cmd(cmd, get_exit_code=False)[1] if (port_expected_open and output) or (not port_expected_open and not output): LOG.info("Port {} is {}listed in iptables as expected".format( port, '' if port_expected_open else 'not ')) break time.sleep(5) else: assert 0, "Port {} is {}listed in iptables. ".format( port, 'not ' if port_expected_open else '') end_event = Events('Packet received') LOG.info("Open listener on port {}".format(port)) listener_thread = MThread(_listen_on_port, port, end_event=end_event, ssh_name=ProjVar.get_var('PRIMARY_SUBCLOUD')) listener_thread.start_thread(timeout=300) extra_str = 'succeeded' if port_expected_open else 'rejected' LOG.info("Verify access to port {} from natbox is {}".format( port, extra_str)) try: wait_for_port_to_listen(con_ssh, port) natbox_ssh = NATBoxClient.get_natbox_client() end_time = time.time() + 60 while time.time() < end_time: output = natbox_ssh.exec_cmd("nc -v -w 2 {} {}".format( lab_ip, port), get_exit_code=False)[1] if (port_expected_open and 'succeeded' in output) or ( not port_expected_open and 'succeeded' not in output): LOG.info("Access via port {} {} as expected".format( port, extra_str)) return else: assert False, "Access via port {} is not {}".format( port, extra_str) finally: end_event.set() listener_thread.wait_for_thread_end(timeout=10) con_ssh.send_control('c') con_ssh.expect(con_ssh.get_prompt())
def _check_packets_forwarded_in_sfc_vm(source_vm_id, dest_vm_id, sfc_vm_ids, dest_vm_internal_net_ip, protocol, nsh_aware, symmetric, load_balancing=False): end_event = Events("Hello or ping sent to vm") start_event = Events("VM {} started listening".format(dest_vm_id)) received_event = Events("Greeting received on vm {}".format(dest_vm_id)) vms_events = {} for sfc_vm in sfc_vm_ids: start_event_sfc = Events("SFC vm {} started listening".format(sfc_vm)) received_event_sfc = Events( "Packets received on SFC vm {}".format(sfc_vm)) vms_events[sfc_vm] = (start_event_sfc, received_event_sfc) greeting = "hello" port = 20010 vm_thread = None if protocol != 'icmp': func_args = (start_event, end_event, received_event, dest_vm_id, dest_vm_internal_net_ip, greeting, port, protocol, load_balancing) vm_thread = MThread(_ssh_to_dest_vm_and_wait_for_greetings, *func_args) sfc_vm_threads = [] for sfc_vm in sfc_vm_ids: start_event_sfc, received_event_sfc = vms_events[sfc_vm] func_args = (start_event_sfc, end_event, received_event_sfc, sfc_vm, protocol, nsh_aware, symmetric) sfc_vm_thread = MThread(_ssh_to_sfc_vm_and_wait_for_packets, *func_args) sfc_vm_threads.append(sfc_vm_thread) LOG.tc_step( "Starting VM ssh session threads to ping (icmp) or send hello (tcp, udp)" ) if protocol != 'icmp': vm_thread.start_thread() for sfc_vm_thread in sfc_vm_threads: LOG.tc_step("Starting each SFC VM threads") sfc_vm_thread.start_thread() try: if protocol != 'icmp': start_event.wait_for_event(timeout=180, fail_ok=False) for sfc_vm in sfc_vm_ids: start_event_sfc, received_event_sfc = vms_events[sfc_vm] start_event_sfc.wait_for_event(timeout=120, fail_ok=False) if protocol == 'icmp': LOG.tc_step( "Ping from from vm {} to vm {}, and check it's received". format(source_vm_id, dest_vm_id)) _ping_from_source_to_dest_vm(source_vm_id, end_event, dest_vm_internal_net_ip) else: if load_balancing: LOG.tc_step( "Send Hello msg from vm using tcp_client.py {} to vm {}, and check it's received" .format(source_vm_id, dest_vm_id)) _send_hello_message_from_vm_using_tcp_client( source_vm_id, end_event, dest_vm_internal_net_ip) else: LOG.tc_step( "Send Hello msg from vm {} to vm {}, and check it's received" .format(source_vm_id, dest_vm_id)) _send_hello_message_from_vm(source_vm_id, greeting, end_event, dest_vm_internal_net_ip, port, protocol) if protocol != 'icmp': assert received_event.wait_for_event( timeout=30), "Received Event {} is not set".format( received_event) for sfc_vm in sfc_vm_ids: start_event_sfc, received_event_sfc = vms_events[sfc_vm] assert received_event_sfc.wait_for_event( timeout=10), "Received Event is not set in SFC function" finally: end_event.set() if protocol != 'icmp': vm_thread.wait_for_thread_end(timeout=40, fail_ok=False) for sfc_vm_thread in sfc_vm_threads: sfc_vm_thread.wait_for_thread_end(timeout=40, fail_ok=False)