def restart_ovs_on_controllers(self, env, os_conn):
     ovs_agent_ids, ovs_conroller_agents = self.get_ovs_agents(env, os_conn)
     os_conn.wait_agents_alive(ovs_agent_ids)
     common.ban_ovs_agents_controllers(env)
     os_conn.wait_agents_down(ovs_conroller_agents)
     common.clear_ovs_agents_controllers(env)
     common.restart_ovs_agents_on_computes(env)
     os_conn.wait_agents_alive(ovs_agent_ids)
     # sleep to make sure that system will be stable after ovs restarting
     time.sleep(30)
    def test_ovs_restart_pcs_disable_enable_ping_private_vms(self):
        """Restart openvswitch-agents with pcs disable/enable on controllers.

        Steps:
            1. Update default security group if needed
            2. Create CONFIG 1:
                Network: test_net_05
                SubNetw: test_net_05__subnet, 192.168.5.0/24
                Router:  test_router_05
            3. Create CONFIG 2:
                Network: test_net_06
                SubNetw: test_net_06__subnet, 192.168.6.0/24
                Router:  test_router_06
            4. Launch 'test_vm_05' inside 'config 1'
            5. Launch 'test_vm_06' inside 'config 2'
            6. Go to 'test_vm_05' console and send pings to 'test_vm_05'.
                Pings should NOT go between VMs.
            7. Operations with OVS agents:
                - Check that all OVS are alive;
                - Disable ovs-agents on all controllers;
                - Check that they wend down;
                - Restart OVS agent service on all computes;
                - Enable ovs-agents on all controllers;
                - Check that they wend up and alive;
            8. Wait 30 seconds, send pings from 'test_vm_05' to 'test_vm_06'
                and check that they are still NOT successful.

        Duration 5m

        """
        self._prepare_openstack()
        # Check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # Disable ovs agent on all controllers
        common.disable_ovs_agents_on_controller(self.env)

        # Then check that all ovs went down
        self.os_conn.wait_agents_down(self.ovs_conroller_agents)

        # Restart ovs agent service on all computes
        common.restart_ovs_agents_on_computes(self.env)

        # Enable ovs agent on all controllers
        common.enable_ovs_agents_on_controllers(self.env)

        # Then check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # sleep is used to check that system will be stable for some time
        # after restarting service
        time.sleep(30)

        self.check_no_ping_from_vm(self.server1, self.instance_keypair,
                                   self.server2_ip, timeout=None)
    def test_ovs_restart_pcs_disable_enable(self, count):
        """Restart openvswitch-agents with pcs disable/enable on controllers

        Steps:
            1. Update default security group
            2. Create router01, create networks net01: net01__subnet,
                192.168.1.0/24, net02: net02__subnet, 192.168.2.0/24 and
                attach them to router01.
            3. Launch vm1 in net01 network and vm2 in net02 network
                on different computes
            4. Go to vm1 console and send pings to vm2
            5. Disable ovs-agents on a controller, restart service
                neutron-plugin-openvswitch-agent on all computes, and enable
                them back. To do this, launch the script against master node.
            6. Wait 30 seconds, send pings from vm1 to vm2 and check that
                it is successful.
            7. Repeat steps 6-7 'count' argument times

        Duration 10m

        """
        self._prepare_openstack()
        for _ in range(count):
            # Check that all ovs agents are alive
            self.os_conn.wait_agents_alive(self.ovs_agent_ids)

            # Disable ovs agent on a controller
            common.disable_ovs_agents_on_controller(self.env)

            # Then check that all ovs went down
            self.os_conn.wait_agents_down(self.ovs_conroller_agents)

            # Restart ovs agent service on all computes
            common.restart_ovs_agents_on_computes(self.env)

            # Enable ovs agent on a controller
            common.enable_ovs_agents_on_controllers(self.env)

            # Then check that all ovs agents are alive
            self.os_conn.wait_agents_alive(self.ovs_agent_ids)

            # sleep is used to check that system will be stable for some time
            # after restarting service
            time.sleep(30)

            network_checks.check_ping_from_vm(
                self.env, self.os_conn, self.server1, self.instance_keypair,
                self.server2_ip, timeout=10 * 60)

            # check all agents are alive
            assert all([agt['alive'] for agt in
                        self.os_conn.neutron.list_agents()['agents']])
    def test_ovs_new_flows_added_after_restart(self):
        """Check that new flows are added after ovs-agents restart

        Steps:
            1. Create network net01: net01__subnet, 192.168.1.0/24
            2. Launch vm1 in net01 network
            3. Get list of flows for br-int
            4. Save cookie parameter for bridge
            5. Disable ovs-agents on all controllers, restart service
               neutron-plugin-openvswitch-agent on all computes, and enable
               them back. To do this, launch the script against master node.
            6. Check that all ovs-agents are in alive state
            7. Get list of flows for br-int again
            8. Compare cookie parameters
        """
        self._prepare_openstack()
        server = self.os_conn.nova.servers.find(name="server_for_flow_check")
        node_name = getattr(server, "OS-EXT-SRV-ATTR:hypervisor_hostname")
        compute = [i for i in self.env.get_nodes_by_role('compute')
                   if i.data['fqdn'] == node_name][0]

        # Check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        before_value = self.get_current_cookie(compute)

        assert all([len(x) < 2 for x in before_value.values()])

        # Disable ovs agent on all controllers
        common.disable_ovs_agents_on_controller(self.env)

        # Then check that all ovs went down
        self.os_conn.wait_agents_down(self.ovs_conroller_agents)

        # Restart ovs agent service on all computes
        common.restart_ovs_agents_on_computes(self.env)

        # Enable ovs agent on all controllers
        common.enable_ovs_agents_on_controllers(self.env)

        # Then check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # sleep is used to check that system will be stable for some time
        # after restarting service
        time.sleep(30)

        after_value = self.get_current_cookie(compute)
        assert before_value != after_value

        assert all([len(x) < 2 for x in after_value.values()])
    def test_ovs_restart_pcs_vms_on_single_compute_in_single_network(self):
        """Check connectivity for instances scheduled on a single compute in
         a single private network

        Steps:
            1. Update default security group
            2. Create networks net01: net01__subnet, 192.168.1.0/24
            3. Launch vm1 and vm2 in net01 network on a single compute compute
            4. Go to vm1 console and send pings to vm2
            5. Disable ovs-agents on all controllers, restart service
                neutron-plugin-openvswitch-agent on all computes, and enable
                them back. To do this, launch the script against master node.
            6. Wait 30 seconds, send pings from vm1 to vm2 and check that
                it is successful.

        Duration 10m

        """
        self._prepare_openstack()
        # Check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # Disable ovs agent on all controllers
        common.disable_ovs_agents_on_controller(self.env)

        # Then check that all ovs went down
        self.os_conn.wait_agents_down(self.ovs_conroller_agents)

        # Restart ovs agent service on all computes
        common.restart_ovs_agents_on_computes(self.env)

        # Enable ovs agent on all controllers
        common.enable_ovs_agents_on_controllers(self.env)

        # Then check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # sleep is used to check that system will be stable for some time
        # after restarting service
        time.sleep(30)

        network_checks.check_ping_from_vm(
            self.env, self.os_conn, self.server1, self.instance_keypair,
            self.server2_ip, timeout=3 * 60)

        # check all agents are alive
        assert all([agt['alive'] for agt in
                    self.os_conn.neutron.list_agents()['agents']])
    def test_ovs_restart_pcs_ban_clear(self):
        """Restart openvswitch-agents with pcs ban/clear on controllers

        Steps:
            1. Update default security group
            2. Create router01, create networks.
            3. Launch vm1 in net01 network and vm2 in net02 network
                on different computes.
            4. Go to vm1 console and send pings to vm2
            5. Ban ovs-agents on all controllers, clear them and restart
                service neutron-plugin-openvswitch-agent on all computes.
                To do this, launch the script against master node.
            6. Wait 30 seconds, send pings from vm1 to vm2 and
                check that it is successful.

        Duration 10m

        """
        self._prepare_openstack()
        # Check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # Ban ovs agents on all controllers
        common.ban_ovs_agents_controllers(self.env)

        # Then check that all ovs went down
        self.os_conn.wait_agents_down(self.ovs_conroller_agents)

        # Cleat ovs agent on all controllers
        common.clear_ovs_agents_controllers(self.env)

        # Restart ovs agent service on all computes
        common.restart_ovs_agents_on_computes(self.env)

        # Then check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # sleep is used to check that system will be stable for some time
        # after restarting service
        time.sleep(30)

        network_checks.check_ping_from_vm(
            self.env, self.os_conn, self.server1, self.instance_keypair,
            self.server2_ip, timeout=3 * 60)

        # check all agents are alive
        assert all([agt['alive'] for agt in
                    self.os_conn.neutron.list_agents()['agents']])
    def test_ovs_restart_with_iperf_traffic(self):
        """Checks that iperf traffic is not interrupted during ovs restart

        Steps:
            1. Run iperf server on server2
            2. Run iperf client on server 1
            3. Check that  packet losses < 1%
            4. Disable ovs-agents on all controllers,
                restart service neutron-plugin-openvswitch-agent
                on all computes, and enable them back.
            5. Check that all ovs-agents are in alive state
            6. Check that iperf traffic wasn't interrupted during ovs restart,
                and not more than 20% datagrams are lost
        """
        self._prepare_openstack()

        iperf_log_file = '/tmp/iperf_client.log'

        client = self.server1
        server = self.server2

        # Launch iperf client
        pid = self.launch_iperf_client(client, server,
                                       self.instance_keypair,
                                       vm_login='******',
                                       stdout=iperf_log_file)

        time.sleep(60)

        self.wait_command_done(pid, vm=client, keypair=self.instance_keypair,
                               vm_login='******')

        with self.os_conn.ssh_to_instance(self.env,
                                          vm=client,
                                          vm_keypair=self.instance_keypair,
                                          username='******') as remote:
            with remote.open(iperf_log_file) as f:
                iperf_result = f.read()

        # Check iperf traffic before restart
        lost = self.get_lost_percentage(iperf_result)
        err_msg = "Packet losses more than 1%. Actual value is {0}%".format(
            lost)
        assert lost < 1, err_msg

        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # Launch client in background and restart agents
        pid = self.launch_iperf_client(client, server,
                                       self.instance_keypair,
                                       vm_login='******',
                                       stdout=iperf_log_file)

        common.disable_ovs_agents_on_controller(self.env)
        self.os_conn.wait_agents_down(self.ovs_conroller_agents)
        common.restart_ovs_agents_on_computes(self.env)
        common.enable_ovs_agents_on_controllers(self.env)
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        self.wait_command_done(pid, vm=client, keypair=self.instance_keypair,
                               vm_login='******')

        with self.os_conn.ssh_to_instance(self.env,
                                          vm=client,
                                          vm_keypair=self.instance_keypair,
                                          username='******') as remote:
            with remote.open(iperf_log_file) as f:
                iperf_result = f.read()

        err_msg = "{0}% datagrams lost. Should be < 20%".format(lost)
        assert lost < 20, err_msg

        # check all agents are alive
        assert all([agt['alive'] for agt in
                    self.os_conn.neutron.list_agents()['agents']])
    def test_restart_openvswitch_agent_under_bat(self):
        """Restart openvswitch-agents with broadcast traffic background

        Steps:
            1. Go to vm1's console and run arping
               to initiate broadcast traffic:
                    arping -I eth0 <vm2_fixed_ip>
            2. Disable ovs-agents on all controllers
            3. Restart service 'neutron-plugin-openvswitch-agent'
               on all computes
            4. Enable ovs-agents back.
            5. Check that pings between vm1 and vm2 aren't interrupted
               or not more than 2 packets are lost
        """
        self._prepare_openstack()
        # Run arping in background on server01 towards server02
        srv_list = self.os_conn.nova.servers.list()
        srv1, srv2 = srv_list[:2]
        vm_ip = self.os_conn.get_nova_instance_ips(srv2)['fixed']

        arping_cmd = 'sudo /usr/sbin/arping -I eth0 {}'.format(vm_ip)
        arping_log = '/tmp/arp.log'
        with self.os_conn.ssh_to_instance(self.env,
                                          srv1,
                                          vm_keypair=self.instance_keypair,
                                          username='******') as remote:
            pid = remote.background_call(arping_cmd, stdout=arping_log)

        # Then check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # Disable ovs agent on all controllers
        common.disable_ovs_agents_on_controller(self.env)

        # Then check that all ovs went down
        self.os_conn.wait_agents_down(self.ovs_conroller_agents)

        # Restart ovs agent service on all computes
        common.restart_ovs_agents_on_computes(self.env)

        # Enable ovs agent on all controllers
        common.enable_ovs_agents_on_controllers(self.env)

        # Then check that all ovs agents are alive
        self.os_conn.wait_agents_alive(self.ovs_agent_ids)

        # Check that arping is still executing
        cmd = 'ps -o pid | grep {}'.format(pid)
        with self.os_conn.ssh_to_instance(self.env,
                                          srv1,
                                          vm_keypair=self.instance_keypair,
                                          username='******') as remote:
            result = remote.execute(cmd)
            assert result.is_ok, 'Arping command is died'

            # Read log of arpping execution for future possible debug
            with remote.open(arping_log) as f:
                logger.debug(f.read())

        # Check connectivity
        network_checks.check_vm_connectivity(self.env, self.os_conn)