Ejemplo n.º 1
0
    def _test_assert_pings_during_br_phys_setup_not_lost(self, provider_net):
        # Separate namespace is needed when pinging from one port to another,
        # otherwise Linux ping uses loopback instead for sending and receiving
        # ping, hence ignoring flow setup.
        ns_phys = self.useFixture(net_helpers.NamespaceFixture()).name

        ports = self.create_test_ports(amount=2)
        port_int = ports[0]
        port_phys = ports[1]
        ip_int = port_int['fixed_ips'][0]['ip_address']
        ip_phys = port_phys['fixed_ips'][0]['ip_address']

        self.setup_agent_and_ports(port_dicts=[port_int], create_tunnels=False,
                                   network=provider_net)

        self.plug_ports_to_phys_br(provider_net, [port_phys],
                                   namespace=ns_phys)

        # The OVS agent doesn't monitor the physical bridges, no notification
        # is sent when a port is up on a physical bridge, hence waiting only
        # for the ports connected to br-int
        self.wait_until_ports_state([port_int], up=True)

        with net_helpers.async_ping(ns_phys, [ip_int]) as done:
            while not done():
                self.agent.setup_physical_bridges(self.agent.bridge_mappings)
                time.sleep(0.25)

        with net_helpers.async_ping(self.namespace, [ip_phys]) as done:
            while not done():
                self.agent.setup_physical_bridges(self.agent.bridge_mappings)
                time.sleep(0.25)
Ejemplo n.º 2
0
    def test_l2_agent_restart(self, agent_restart_timeout=20):
        # Environment preparation is effectively the same as connectivity test
        vms = self._prepare_vms_in_single_network()
        vms.ping_all()

        ns0 = vms[0].namespace
        ip1 = vms[1].ip
        agents = [host.l2_agent for host in self.environment.hosts]

        # Restart agents on all nodes simultaneously while pinging across
        # the hosts. The ping has to cross int and phys bridges and travels
        # via central bridge as the vms are on separate hosts.
        with net_helpers.async_ping(ns0, [ip1], timeout=2,
                                    count=agent_restart_timeout) as done:
            LOG.debug("Restarting agents")
            executor = futures.ThreadPoolExecutor(max_workers=len(agents))
            restarts = [agent.restart(executor=executor)
                        for agent in agents]

            futures.wait(restarts, timeout=agent_restart_timeout)

            self.assertTrue(all([r.done() for r in restarts]))
            LOG.debug("Restarting agents - done")

            # It is necessary to give agents time to initialize
            # because some crucial steps (e.g. setting up bridge flows)
            # happen only after RPC is established
            common_utils.wait_until_true(
                done,
                exception=RuntimeError("Could not ping the other VM, L2 agent "
                                       "restart leads to network disruption"))
Ejemplo n.º 3
0
    def _test_controller_timeout_does_not_break_connectivity(self,
                                                             kill_signal=None):
        # Environment preparation is effectively the same as connectivity test
        vms = self._prepare_vms_in_single_network()
        vms.ping_all()

        ns0 = vms[0].namespace
        ip1 = vms[1].ip

        LOG.debug("Stopping agents (hence also OVS bridge controllers)")
        for host in self.environment.hosts:
            if kill_signal is not None:
                host.l2_agent.stop(kill_signal=kill_signal)
            else:
                host.l2_agent.stop()

        # Ping to make sure that 3 x 5 seconds is overcame even under a high
        # load. The time was chosen to match three times inactivity_probe time,
        # which is the time after which the OVS vswitchd
        # treats the controller as dead and starts managing the bridge
        # by itself when the fail type settings is not set to secure (see
        # ovs-vsctl man page for further details)
        with net_helpers.async_ping(ns0, [ip1], timeout=2, count=25) as done:
            common_utils.wait_until_true(
                done,
                exception=RuntimeError("Networking interrupted after "
                                       "controllers have vanished"))
Ejemplo n.º 4
0
    def _assert_ping_during_agents_restart(self,
                                           agents,
                                           src_namespace,
                                           ips,
                                           restart_timeout=10,
                                           ping_timeout=1,
                                           count=10):
        with net_helpers.async_ping(src_namespace,
                                    ips,
                                    timeout=ping_timeout,
                                    count=count) as done:
            LOG.debug("Restarting agents")
            executor = futures.ThreadPoolExecutor(max_workers=len(agents))
            restarts = [agent.restart(executor=executor) for agent in agents]

            futures.wait(restarts, timeout=restart_timeout)

            self.assertTrue(all([r.done() for r in restarts]))
            LOG.debug("Restarting agents - done")

            # It is necessary to give agents time to initialize
            # because some crucial steps (e.g. setting up bridge flows)
            # happen only after RPC is established
            agent_names = ', '.join(
                {agent.process_fixture.process_name
                 for agent in agents})
            common_utils.wait_until_true(done,
                                         timeout=count * (ping_timeout + 1),
                                         exception=RuntimeError(
                                             "Could not ping the other VM, "
                                             "re-starting %s leads to network "
                                             "disruption" % agent_names))
Ejemplo n.º 5
0
    def _assert_ping_during_agents_restart(
            self, agents, src_namespace, ips, restart_timeout=10,
            ping_timeout=1, count=10):
        with net_helpers.async_ping(
                src_namespace, ips, timeout=ping_timeout,
                count=count) as done:
            LOG.debug("Restarting agents")
            executor = futures.ThreadPoolExecutor(max_workers=len(agents))
            restarts = [agent.restart(executor=executor)
                        for agent in agents]

            futures.wait(restarts, timeout=restart_timeout)

            self.assertTrue(all([r.done() for r in restarts]))
            LOG.debug("Restarting agents - done")

            # It is necessary to give agents time to initialize
            # because some crucial steps (e.g. setting up bridge flows)
            # happen only after RPC is established
            agent_names = ', '.join({agent.process_fixture.process_name
                                     for agent in agents})
            common_utils.wait_until_true(
                done,
                timeout=count * (ping_timeout + 1),
                exception=RuntimeError("Could not ping the other VM, "
                                       "re-starting %s leads to network "
                                       "disruption" % agent_names))
Ejemplo n.º 6
0
    def _test_controller_timeout_does_not_break_connectivity(self,
                                                             kill_signal=None):
        # Environment preparation is effectively the same as connectivity test
        vms = self._prepare_vms_in_single_network()
        vms.ping_all()

        ns0 = vms[0].namespace
        ip1 = vms[1].ip

        LOG.debug("Stopping agents (hence also OVS bridge controllers)")
        for host in self.environment.hosts:
            if kill_signal is not None:
                host.l2_agent.stop(kill_signal=kill_signal)
            else:
                host.l2_agent.stop()

        # Ping to make sure that 3 x 5 seconds is overcame even under a high
        # load. The time was chosen to match three times inactivity_probe time,
        # which is the time after which the OVS vswitchd
        # treats the controller as dead and starts managing the bridge
        # by itself when the fail type settings is not set to secure (see
        # ovs-vsctl man page for further details)
        with net_helpers.async_ping(ns0, [ip1], timeout=2, count=25) as done:
            common_utils.wait_until_true(
                done,
                exception=RuntimeError("Networking interrupted after "
                                       "controllers have vanished"))
Ejemplo n.º 7
0
    def test_l2_agent_restart(self, agent_restart_timeout=20):
        # Environment preparation is effectively the same as connectivity test
        vms = self._prepare_vms_in_single_network()
        vms.ping_all()

        ns0 = vms[0].namespace
        ip1 = vms[1].ip
        agents = [host.l2_agent for host in self.environment.hosts]

        # Restart agents on all nodes simultaneously while pinging across
        # the hosts. The ping has to cross int and phys bridges and travels
        # via central bridge as the vms are on separate hosts.
        with net_helpers.async_ping(ns0, [ip1], timeout=2,
                                    count=agent_restart_timeout) as done:
            LOG.debug("Restarting agents")
            executor = futures.ThreadPoolExecutor(max_workers=len(agents))
            restarts = [agent.restart(executor=executor)
                        for agent in agents]

            futures.wait(restarts, timeout=agent_restart_timeout)

            self.assertTrue(all([r.done() for r in restarts]))
            LOG.debug("Restarting agents - done")

            # It is necessary to give agents time to initialize
            # because some crucial steps (e.g. setting up bridge flows)
            # happen only after RPC is established
            common_utils.wait_until_true(
                done,
                exception=RuntimeError("Could not ping the other VM, L2 agent "
                                       "restart leads to network disruption"))
Ejemplo n.º 8
0
 def test_assert_pings_during_br_int_setup_not_lost(self):
     self.setup_agent_and_ports(port_dicts=self.create_test_ports(), create_tunnels=False)
     self.wait_until_ports_state(self.ports, up=True)
     ips = [port["fixed_ips"][0]["ip_address"] for port in self.ports]
     with net_helpers.async_ping(self.namespace, ips) as done:
         while not done():
             self.agent.setup_integration_br()
             time.sleep(0.25)
Ejemplo n.º 9
0
 def test_assert_pings_during_br_int_setup_not_lost(self):
     self.setup_agent_and_ports(port_dicts=self.create_test_ports(),
                                create_tunnels=False)
     self.wait_until_ports_state(self.ports, up=True)
     ips = [port['fixed_ips'][0]['ip_address'] for port in self.ports]
     with net_helpers.async_ping(self.namespace, ips) as done:
         while not done():
             self.agent.setup_integration_br()
             time.sleep(0.25)
Ejemplo n.º 10
0
def wait_for_dscp_marked_packet(sender_vm, receiver_vm, dscp_mark):
    cmd = [
        "tcpdump", "-i", receiver_vm.port.name, "-nlt", "src", sender_vm.ip,
        'and', 'dst', receiver_vm.ip
    ]
    if dscp_mark:
        cmd += ["and", "(ip[1] & 0xfc == %s)" % (dscp_mark << 2)]
    tcpdump_async = async_process.AsyncProcess(cmd,
                                               run_as_root=True,
                                               namespace=receiver_vm.namespace)
    tcpdump_async.start(block=True)

    with net_helpers.async_ping(sender_vm.namespace, [receiver_vm.ip]) as done:
        while not done():
            time.sleep(0.25)

    try:
        tcpdump_async.stop(kill_signal=signal.SIGINT)
    except async_process.AsyncProcessException:
        # If it was already stopped than we don't care about it
        pass

    tcpdump_stderr_lines = []
    pattern = r"(?P<packets_count>^\d+) packets received by filter"
    for line in tcpdump_async.iter_stderr():
        m = re.match(pattern, line)
        if m and int(m.group("packets_count")) != 0:
            return
        tcpdump_stderr_lines.append(line)

    tcpdump_stdout_lines = [line for line in tcpdump_async.iter_stdout()]
    LOG.debug("Captured output lines from tcpdump. Stdout: %s; Stderr: %s",
              tcpdump_stdout_lines, tcpdump_stderr_lines)

    raise TcpdumpException(
        "No packets marked with DSCP = %(dscp_mark)s received from %(src)s "
        "to %(dst)s" % {
            'dscp_mark': dscp_mark,
            'src': sender_vm.ip,
            'dst': receiver_vm.ip
        })
Ejemplo n.º 11
0
def wait_for_dscp_marked_packet(sender_vm, receiver_vm, dscp_mark):
    cmd = [
        "tcpdump", "-i", receiver_vm.port.name, "-nlt",
        "src", sender_vm.ip, 'and', 'dst', receiver_vm.ip]
    if dscp_mark:
        cmd += ["and", "(ip[1] & 0xfc == %s)" % (dscp_mark << 2)]
    tcpdump_async = async_process.AsyncProcess(cmd, run_as_root=True,
                                               namespace=receiver_vm.namespace)
    tcpdump_async.start(block=True)

    with net_helpers.async_ping(sender_vm.namespace, [receiver_vm.ip]) as done:
        while not done():
            time.sleep(0.25)

    try:
        tcpdump_async.stop(kill_signal=signal.SIGINT)
    except async_process.AsyncProcessException:
        # If it was already stopped than we don't care about it
        pass

    tcpdump_stderr_lines = []
    pattern = r"(?P<packets_count>^\d+) packets received by filter"
    for line in tcpdump_async.iter_stderr():
        m = re.match(pattern, line)
        if m and int(m.group("packets_count")) != 0:
            return
        tcpdump_stderr_lines.append(line)

    tcpdump_stdout_lines = [line for line in tcpdump_async.iter_stdout()]
    LOG.debug("Captured output lines from tcpdump. Stdout: %s; Stderr: %s",
              tcpdump_stdout_lines, tcpdump_stderr_lines)

    raise TcpdumpException(
        "No packets marked with DSCP = %(dscp_mark)s received from %(src)s "
        "to %(dst)s" % {'dscp_mark': dscp_mark,
                        'src': sender_vm.ip,
                        'dst': receiver_vm.ip})