예제 #1
0
def perf_flowstate():
    """
    Title: Run a 1 hour workload that generates a lot of flow state

    Send 100 UDP packets per second for an hour, changing the src and dst port
    for each packet. The topology is set up in such a way that both conntrack
    and NAT flow state is generated.
    """
    global PARAMETER_FPS, PARAMETER_MINUTES
    sender = BM.get_interface_on_vport('port_1')
    receiver = BM.get_interface_on_vport('port_2')

    messages_per_second = PARAMETER_FPS
    delay = 1000000 / messages_per_second
    try:
        sender.execute("hping3 -q -2 -i u%d --destport ++0 %s"
                       % (delay, VTM.get_fip_ip()))
        rcv_filter = 'udp and ip dst %s' % (receiver.get_ip())
        # check that we are still receiving traffic every minute for 30 minutes
        for i in range(0, PARAMETER_MINUTES):
            assert_that(receiver,
                        receives(rcv_filter, within_sec(60)))
            time.sleep(60)
    finally:
        sender.execute("pkill hping3")
예제 #2
0
def ping(src, dst, expected_failure=False, retries=3):
    try:
        # src and dst could be the vm object
        # or the port name where the vm is bound
        LOG.info("VPNaaS: pinging from %s to %s" % (src, dst))
        src_vm = src if not isinstance(src, str) \
            else BM.get_interface_on_vport(src)
        dst_vm = dst if not isinstance(dst, str) \
            else BM.get_interface_on_vport(dst)
        f1 = src_vm.ping_ipv4_addr(dst_vm.get_ip(update=True),
                                   interval=1, count=5)

        wait_on_futures([f1])
        output_stream, exec_id = f1.result()
        exit_status = src_vm.compute_host.check_exit_status(exec_id,
                                                            output_stream,
                                                            timeout=10)

        assert_that(exit_status, equal_to(0), "Ping did not return any data")
    except AssertionError:
        if retries == 0:
            if expected_failure:
                return
            raise AssertionError("Ping failed after max retries. Giving up.")
        LOG.debug("VPNaaS: failed ping from %s to %s... (%d retries left)" %
                  (src, dst, retries))
        ping(src, dst, expected_failure, retries=retries - 1)
예제 #3
0
    def build(self, binding_data=None, ptm=None):
        super(QosTopology, self).build(binding_data)

        ptm.add_host_to_tunnel_zone('midolman1', 'tztest1')
        ptm.add_host_to_tunnel_zone('midolman2', 'tztest1')

        self.add_bridge({'name': 'main1'})
        self.main_bridge = self.get_bridge('main1')
        self.main_bridge.add_dhcp_subnet(
            {'id': 1,
             'ipv4_gw': '172.16.1.254',
             'network': '172.16.1.0/24'})

        self.main_bridge.add_port({'id': 1, 'type': 'exterior'})
        self.main_bridge.add_port({'id': 2, 'type': 'exterior'})
        self.vm1_port = self.main_bridge.get_port(1)
        self.vm2_port = self.main_bridge.get_port(2)

        host1 = service.get_container_by_hostname('midolman1')
        host2 = service.get_container_by_hostname('midolman2')

        vm1data = {'hw_addr': 'aa:bb:cc:00:00:11',
                   'ipv4_addr': ['172.16.1.2/24'],
                   'ipv4_gw': '172.16.1.1'}
        vm2data = {'hw_addr': 'aa:bb:cc:00:00:22',
                   'ipv4_addr': ['172.16.1.3/24'],
                   'ipv4_gw': '172.16.1.1'}

        self.vm1 = host1.create_vmguest(**vm1data)
        self.vm2 = host2.create_vmguest(**vm2data)

        ptm.addCleanup(host1.destroy_vmguest, self.vm1)
        ptm.addCleanup(host2.destroy_vmguest, self.vm2)

        port1_real_id = self.vm1_port.get_real_id()
        port2_real_id = self.vm2_port.get_real_id()

        host1.bind_port(self.vm1, port1_real_id)
        host2.bind_port(self.vm2, port2_real_id)

        utils.await_port_active(port1_real_id, active=True)
        utils.await_port_active(port2_real_id, active=True)

        self.qos_pol1 = self.add_qos_policy(
            {'name': 'pol1', 'description': 'Description',
             'shared': True})
        self.qos_pol2 = self.add_qos_policy(
            {'name': 'pol2', 'description': 'Description Two',
             'shared': True})

        assert_that(self.qos_pol1)
        assert_that(self.qos_pol2)
예제 #4
0
def test_qos_policy_update():
    """
    Title: QoS Policy update

    1) Update existing QoS policy and see the updates take effect.
    """
    try:
        PTM.build()
        VTM.build(ptm=PTM)
        VTM.qos_pol1.description("Updated description")

        assert_that(VTM.qos_pol1.get_mn_resource().get_description() ==
                    "Updated description")

    finally:
        VTM.destroy()
        PTM.destroy()
예제 #5
0
def test_qos_policy_update():
    """
    Title: QoS Policy update

    1) Update existing QoS policy and see the updates take effect.
    """
    try:
        PTM.build()
        VTM.build(ptm=PTM)
        VTM.qos_pol1.description("Updated description")

        assert_that(VTM.qos_pol1.get_mn_resource().get_description() ==
                    "Updated description")

    finally:
        VTM.destroy()
        PTM.destroy()
예제 #6
0
def test_dhcp():
    """
    Title: DHCP feature in MidoNet Bridge

    Scenario 1:
    Given: a bridge that has DHCP configurations
    When: a VM connected to the bridge sends DHCP requests,
    Then: the VM should get DHCP response accoridingly.
    """
    iface = BM.get_iface_for_port('bridge-000-001', 2)
    iface_new = BM.get_iface_for_port('bridge-000-001', 3)
    shared_lease = '/override/shared-%s.lease' % iface.get_ifname()
    try:

        # Check that interface has 1500 byte MTU before DHCP
        assert iface.get_mtu(update=True) == 1500

        # Check that namespace doesn't have routes before DHCP
        assert iface.get_num_routes(update=True) == 0

        # Run dhclient in the namespace for a while with a specific lease file
        # FIXME: wait 15 seconds? better to wait for an ack from the command?
        result = iface.execute('dhclient -lf %s %s' %
                               (shared_lease, iface.get_ifname()),
                               timeout=15,
                               sync=True)
        LOG.debug('dhclient got response: %s' % result)

        # Assert that the interface gets ip address
        assert_that(iface.get_cidr(update=True), equal_to('172.16.1.101/24'),
                    "Wrong CIDR")

        # TODO(tomoe): assert for default gw and static routes with opt 121
        assert_that(iface.get_num_routes(update=True), greater_than(0),
                    "No routes found")

        # MTU should be 1450 (interface mtu minus 50B, max of gre/vxlan overhead)
        assert_that(iface.get_mtu(update=True), equal_to(1450), "Wrong MTU")

        # MI-536 regression test
        # Check that the 2nd vm using an incorrect lease file, receives a nack
        # without waiting for the request to timeout which is 60s.
        iface_new.update_interface_name(iface.get_ifname())
        result = iface_new.execute('dhclient -lf %s %s' %
                                   (shared_lease, iface_new.get_ifname()),
                                   timeout=15,
                                   sync=True)
        LOG.debug('dhclient got response: %s' % result)

        # After 15s, check if the interface is correctly configured
        assert iface_new.get_cidr(update=True) == '172.16.1.100/24'
        assert iface_new.get_num_routes(update=True) > 0
        assert iface_new.get_mtu(update=True) == 1450
    finally:
        # Cleanup lease file
        iface.execute('rm -rf /override/shared-%s.lease' % iface.get_ifname())
예제 #7
0
def test_dhcp():
    """
    Title: DHCP feature in MidoNet Bridge

    Scenario 1:
    Given: a bridge that has DHCP configurations
    When: a VM connected to the bridge sends DHCP requests,
    Then: the VM should get DHCP response accoridingly.
    """
    iface = BM.get_iface_for_port('bridge-000-001', 2)
    iface_new = BM.get_iface_for_port('bridge-000-001', 3)
    shared_lease = '/override/shared-%s.lease' % iface.get_ifname()
    try:

        # Check that interface has 1500 byte MTU before DHCP
        assert iface.get_mtu(update=True) == 1500

        # Check that namespace doesn't have routes before DHCP
        assert iface.get_num_routes(update=True) == 0

        # Run dhclient in the namespace for a while with a specific lease file
        # FIXME: wait 15 seconds? better to wait for an ack from the command?
        result = iface.execute(
            'dhclient -lf %s %s' % (shared_lease, iface.get_ifname()),
            timeout=15, sync=True)
        LOG.debug('dhclient got response: %s' % result)

        # Assert that the interface gets ip address
        assert_that(iface.get_cidr(update=True), equal_to('172.16.1.101/24'),
                    "Wrong CIDR")

        # TODO(tomoe): assert for default gw and static routes with opt 121
        assert_that(iface.get_num_routes(update=True), greater_than(0),
                    "No routes found")

        # MTU should be 1450 (interface mtu minus 50B, max of gre/vxlan overhead)
        assert_that(iface.get_mtu(update=True), equal_to(1450),
                    "Wrong MTU")

        # MI-536 regression test
        # Check that the 2nd vm using an incorrect lease file, receives a nack
        # without waiting for the request to timeout which is 60s.
        iface_new.update_interface_name(iface.get_ifname())
        result = iface_new.execute(
            'dhclient -lf %s %s' % (shared_lease, iface_new.get_ifname()),
            timeout=15, sync=True)
        LOG.debug('dhclient got response: %s' % result)

        # After 15s, check if the interface is correctly configured
        assert iface_new.get_cidr(update=True) == '172.16.1.100/24'
        assert iface_new.get_num_routes(update=True) > 0
        assert iface_new.get_mtu(update=True) == 1450
    finally:
        # Cleanup lease file
        iface.execute('rm -rf /override/shared-%s.lease' % iface.get_ifname())
예제 #8
0
def test_container_maintained_on_cluster_restart():
    # Set container weight on midolman1 and midolman3 to 0 such that containers
    # are scheduled on midolman2
    midonet_api = VTM._midonet_api

    midolman1 = service.get_container_by_hostname('midolman1')
    host1 = midonet_api.get_host(midolman1.get_midonet_host_id())
    host1.container_weight(0).update()

    midolman2 = service.get_container_by_hostname('midolman2')
    host2 = midonet_api.get_host(midolman2.get_midonet_host_id())
    host2.container_weight(1).update()

    midolman3 = service.get_container_by_hostname('midolman3')
    host3 = midonet_api.get_host(midolman3.get_midonet_host_id())
    host3.container_weight(0).update()

    cluster1 = service.get_container_by_hostname('cluster1')

    # Schedule resetting it to 1 after test
    BM.addCleanup(host1.container_weight(1).update)
    BM.addCleanup(host2.container_weight(1).update)
    BM.addCleanup(host3.container_weight(1).update)

    left_router, left_peer_address, left_subnet = VTM.get_site_data('left')
    right_router, right_peer_address, right_subnet = VTM.get_site_data('right')
    up_router, up_peer_address, up_subnet = VTM.get_site_data('up')
    left_tenant, right_tenant, up_tenant = \
        BM.get_binding_data()['config']['tenants']

    left_vpn = VTM.add_vpn_service('left', 'left_vpn', left_tenant, left_router,
                                   left_subnet)
    right_vpn = VTM.add_vpn_service('right', 'right_vpn', right_tenant,
                                    right_router, right_subnet)

    VTM.add_ipsec_site_connection(
            'left', 'left_to_right', left_tenant, right_peer_address,
            vpn=left_vpn, peer_cidrs=[right_subnet['subnet']['cidr']])
    VTM.add_ipsec_site_connection(
            'left', 'left_to_up', left_tenant, up_peer_address,
            vpn=left_vpn, peer_cidrs=[up_subnet['subnet']['cidr']])

    # Wait for container status to be RUNNING?
    time.sleep(5)

    VTM.add_ipsec_site_connection(
            'right', 'right_to_left', right_tenant, left_peer_address,
            vpn=right_vpn, peer_cidrs=[left_subnet['subnet']['cidr']])
    VTM.add_ipsec_site_connection(
            'right', 'right_to_up', right_tenant, up_peer_address,
            vpn=right_vpn, peer_cidrs=[up_subnet['subnet']['cidr']])

    # Wait for container status to be RUNNING?
    time.sleep(10)

    # The containers are scheduled on midolman2
    containers = midonet_api.get_service_containers()
    for container in containers:
        assert_that(container.get_host_id(), equal_to(host2.get_id()), "")
        assert_that(container.get_status(), equal_to("RUNNING"), "")

    # Set the container weight to 1 for all hosts
    host1.container_weight(1).update()
    host3.container_weight(3).update()

    # The containers are scheduled on midolman2
    containers = midonet_api.get_service_containers()
    for container in containers:
        assert_that(container.get_host_id(), equal_to(host2.get_id()), "")
        assert_that(container.get_status(), equal_to("RUNNING"), "")

    # Stop the cluster node
    cluster1.stop(wait=True)

    # Ping from left to right and viceversa
    ping('port_left', 'port_right')
    ping('port_right', 'port_left')

    # Start the cluster node
    cluster1.start(wait=True)

    # Wait for the cluster to be started
    time.sleep(10)

    # The containers are scheduled on midolman2
    containers = midonet_api.get_service_containers()
    for container in containers:
        assert_that(container.get_host_id(), equal_to(host2.get_id()), "")
        assert_that(container.get_status(), equal_to("RUNNING"), "")

    # Ping from left to right and viceversa
    ping('port_left', 'port_right')
    ping('port_right', 'port_left')
예제 #9
0
def test_container_restored_on_agent_failure():
    # Set container weight on midolman1 and midolman3 to 0 such that containers
    # are scheduled on midolman2
    midonet_api = VTM._midonet_api

    midolman1 = service.get_container_by_hostname('midolman1')
    host1 = midonet_api.get_host(midolman1.get_midonet_host_id())
    host1.container_weight(0).update()

    midolman2 = service.get_container_by_hostname('midolman2')
    host2 = midonet_api.get_host(midolman2.get_midonet_host_id())
    host2.container_weight(1).update()

    midolman3 = service.get_container_by_hostname('midolman3')
    host3 = midonet_api.get_host(midolman3.get_midonet_host_id())
    host3.container_weight(0).update()

    # Schedule resetting it to 1 after test
    BM.addCleanup(host1.container_weight(1).update)
    BM.addCleanup(host2.container_weight(1).update)
    BM.addCleanup(host3.container_weight(1).update)

    left_router, left_peer_address, left_subnet = VTM.get_site_data('left')
    right_router, right_peer_address, right_subnet = VTM.get_site_data('right')
    up_router, up_peer_address, up_subnet = VTM.get_site_data('up')
    left_tenant, right_tenant, up_tenant = \
        BM.get_binding_data()['config']['tenants']

    left_vpn = VTM.add_vpn_service('left', 'left_vpn', left_tenant, left_router,
                                   left_subnet)
    right_vpn = VTM.add_vpn_service('right', 'right_vpn', right_tenant,
                                    right_router, right_subnet)

    VTM.add_ipsec_site_connection(
            'left', 'left_to_right', left_tenant, right_peer_address,
            vpn=left_vpn, peer_cidrs=[right_subnet['subnet']['cidr']])
    VTM.add_ipsec_site_connection(
            'left', 'left_to_up', left_tenant, up_peer_address,
            vpn=left_vpn, peer_cidrs=[up_subnet['subnet']['cidr']])

    # Wait for container status to be RUNNING
    time.sleep(5)

    VTM.add_ipsec_site_connection(
            'right', 'right_to_left', right_tenant, left_peer_address,
            vpn=right_vpn, peer_cidrs=[left_subnet['subnet']['cidr']])
    VTM.add_ipsec_site_connection(
            'right', 'right_to_up', right_tenant, up_peer_address,
            vpn=right_vpn, peer_cidrs=[up_subnet['subnet']['cidr']])

    # Kill the agent
    pid = midolman2.exec_command("pidof /usr/lib/jvm/java-8-openjdk-amd64/bin/java")
    midolman2.exec_command("kill -9 %s" % pid)
    midolman2.stop(wait=True)

    # Wait for the agent to be up
    # Start midolman2
    midolman2.start(wait=True)

    # Wait for container status to be RUNNING
    time.sleep(10)

    # The containers are scheduled on midolman2
    containers = midonet_api.get_service_containers()
    for container in containers:
        assert_that(container.get_host_id(), equal_to(host2.get_id()), "")
        assert_that(container.get_status(), equal_to("RUNNING"), "")

    # Ping from left to right and viceversa
    ping('port_left', 'port_right')
    ping('port_right', 'port_left')
예제 #10
0
    def build(self, binding_data=None, ptm=None):
        super(QosTopology, self).build(binding_data)

        ptm.add_host_to_tunnel_zone('midolman1', 'tztest1')
        ptm.add_host_to_tunnel_zone('midolman2', 'tztest1')

        self.add_bridge({'name': 'main1'})
        self.main_bridge = self.get_bridge('main1')
        self.main_bridge.add_dhcp_subnet({
            'id': 1,
            'ipv4_gw': '172.16.1.254',
            'network': '172.16.1.0/24'
        })

        self.main_bridge.add_port({'id': 1, 'type': 'exterior'})
        self.main_bridge.add_port({'id': 2, 'type': 'exterior'})
        self.vm1_port = self.main_bridge.get_port(1)
        self.vm2_port = self.main_bridge.get_port(2)

        host1 = service.get_container_by_hostname('midolman1')
        host2 = service.get_container_by_hostname('midolman2')

        vm1data = {
            'hw_addr': 'aa:bb:cc:00:00:11',
            'ipv4_addr': ['172.16.1.2/24'],
            'ipv4_gw': '172.16.1.1'
        }
        vm2data = {
            'hw_addr': 'aa:bb:cc:00:00:22',
            'ipv4_addr': ['172.16.1.3/24'],
            'ipv4_gw': '172.16.1.1'
        }

        self.vm1 = host1.create_vmguest(**vm1data)
        self.vm2 = host2.create_vmguest(**vm2data)

        ptm.addCleanup(host1.destroy_vmguest, self.vm1)
        ptm.addCleanup(host2.destroy_vmguest, self.vm2)

        port1_real_id = self.vm1_port.get_real_id()
        port2_real_id = self.vm2_port.get_real_id()

        host1.bind_port(self.vm1, port1_real_id)
        host2.bind_port(self.vm2, port2_real_id)

        utils.await_port_active(port1_real_id, active=True)
        utils.await_port_active(port2_real_id, active=True)

        self.qos_pol1 = self.add_qos_policy({
            'name': 'pol1',
            'description': 'Description',
            'shared': True
        })
        self.qos_pol2 = self.add_qos_policy({
            'name': 'pol2',
            'description': 'Description Two',
            'shared': True
        })

        assert_that(self.qos_pol1)
        assert_that(self.qos_pol2)