Example #1
0
    def bind(self):
        # Schedule deletion of virtual and physical topologies
        self.addCleanup(self._ptm.destroy)
        self._ptm.build(self._data)
        self.addCleanup(self._vtm.destroy)
        self._vtm.build(self._data)
        self._add_hosts_to_tunnel_zone()
        for binding in self._data['bindings']:
            vport = self._vtm.get_resource(binding['vport'])
            bind_iface = binding['interface']
            if isinstance(bind_iface, dict):
                # We are specifying the vms inside the binding
                iface_def = self._update_addresses(bind_iface['definition'], vport)
                iface_type = bind_iface['type']
                hostname = bind_iface['hostname']
                host = service.get_container_by_hostname(hostname)
                iface = getattr(host, "create_%s" % iface_type)(**iface_def)
                self.addCleanup(getattr(host, "destroy_%s" % iface_type), iface)
            else:
                # It's a vm already created and saved as a resource
                iface = self._ptm.get_resource(binding['interface'])

            vport_id = self._get_port_id(vport)

            # Do the actual binding
            binding = iface.compute_host.bind_port(iface, vport_id)
            self.addCleanup(iface.compute_host.unbind_port, iface)
            self._mappings[vport_id] = iface
            await_port_active(vport_id)
Example #2
0
    def bind(self, filename=None, data=None):

        self._data = self._get_data(filename, data)
        # Get a new api ref to workaround previous zk failures
        self._api = get_midonet_api()

        bindings = self._data['bindings']
        for b in bindings:
            binding = b['binding']

            host_id = binding['host_id']
            iface_id = binding['interface_id']
            device_name = binding['device_name']
            port_id = binding['port_id']

            self._port_if_map[(device_name, port_id)] = (host_id, iface_id)

            device_port = self._vtm.get_device_port(device_name, port_id)
            mn_vport = device_port._mn_resource
            if mn_vport.get_type() == 'InteriorRouter' or \
               mn_vport.get_type() == 'InteriorBridge':
                LOG.error("Cannot bind interior port")
                sys.exit(-1) # TODO: make this fancier

            mn_vport_id = mn_vport.get_id()
            iface = self._ptm.get_interface(host_id, iface_id)
            iface.clear_arp(sync=True)
            iface_name = iface.interface['ifname']
            mn_host_id = iface.host['mn_host_id']
            iface.vport_id = mn_vport_id

            self._api.get_host(mn_host_id).add_host_interface_port()\
                                       .port_id(mn_vport_id)\
                                       .interface_name(iface_name).create()
            await_port_active(mn_vport_id)
Example #3
0
    def bind(self, filename=None, data=None):

        self._data = self._get_data(filename, data)
        # Get a new api ref to workaround previous zk failures
        self._api = get_midonet_api()

        bindings = self._data['bindings']
        for b in bindings:
            binding = b['binding']

            host_id = binding['host_id']
            iface_id = binding['interface_id']
            device_name = binding['device_name']
            port_id = binding['port_id']

            self._port_if_map[(device_name, port_id)] = (host_id, iface_id)

            device_port = self._vtm.get_device_port(device_name, port_id)
            mn_vport = device_port._mn_resource
            if mn_vport.get_type() == 'InteriorRouter' or \
               mn_vport.get_type() == 'InteriorBridge':
                LOG.error("Cannot bind interior port")
                sys.exit(-1)  # TODO: make this fancier

            mn_vport_id = mn_vport.get_id()
            iface = self._ptm.get_interface(host_id, iface_id)
            iface.clear_arp(sync=True)
            iface_name = iface.interface['ifname']
            mn_host_id = iface.host['mn_host_id']
            iface.vport_id = mn_vport_id

            self._api.get_host(mn_host_id).add_host_interface_port()\
                                       .port_id(mn_vport_id)\
                                       .interface_name(iface_name).create()
            await_port_active(mn_vport_id)
Example #4
0
    def bind(self):
        # Schedule deletion of virtual and physical topologies
        self.addCleanup(self._ptm.destroy)
        self._ptm.build(self._data)
        self.addCleanup(self._vtm.destroy)
        self._vtm.build(self._data)
        self._add_hosts_to_tunnel_zone()
        for binding in self._data['bindings']:
            vport = self._vtm.get_resource(binding['vport'])
            bind_iface = binding['interface']
            if isinstance(bind_iface, dict):
                # We are specifying the vms inside the binding
                iface_def = self._update_addresses(bind_iface['definition'],
                                                   vport)
                iface_type = bind_iface['type']
                hostname = bind_iface['hostname']
                host = service.get_container_by_hostname(hostname)
                iface = getattr(host, "create_%s" % iface_type)(**iface_def)
                self.addCleanup(getattr(host, "destroy_%s" % iface_type),
                                iface)
            else:
                # It's a vm already created and saved as a resource
                iface = self._ptm.get_resource(binding['interface'])

            vport_id = self._get_port_id(vport)

            # Do the actual binding
            binding = iface.compute_host.bind_port(iface, vport_id)
            self.addCleanup(iface.compute_host.unbind_port, iface)
            self._mappings[vport_id] = iface
            await_port_active(vport_id)
Example #5
0
    def bind(self, filename=None, data=None):
        # Build a new virtual topology at every binding, destroy at the end
        self._ptm.build()
        self._vtm.build()

        self._data = self._get_data(filename, data)
        # Get a new api ref to workaround previous zk failures
        self._api = get_midonet_api()

        bindings = self._data['bindings']
        for b in bindings:
            binding = b['binding']

            host_id = binding['host_id']
            iface_id = binding['interface_id']
            device_name = binding['device_name']
            port_id = binding['port_id']

            self._port_if_map[(device_name, port_id)] = \
                (host_id, iface_id)

            device_port = self._vtm.get_device_port(device_name, port_id)
            mn_vport = device_port._mn_resource
            if mn_vport.get_type() == 'InteriorRouter' or \
               mn_vport.get_type() == 'InteriorBridge':
                LOG.error("Cannot bind interior port")
                sys.exit(-1) # TODO: make this fancier

            mn_vport_id = mn_vport.get_id()
            host = service.get_container('midolman', host_id)

            # Clean up yamls or remove them completely, this is so ugly
            _host = filter(
                lambda x: x['host']['id'] == host_id,
                self._ptm._hosts)[0]['host']
            _interface = filter(
                lambda x: x['interface']['id'] == iface_id,
                _host['interfaces']
            )[0]['interface']

            # Remove kwargs we are not interested in
            _interface_vm = dict(_interface)
            del _interface_vm['ipv6_addr']
            del _interface_vm['type']
            del _interface_vm['id']

            iface = host.create_vmguest(**_interface_vm)
            self._port_if_map[(device_name, port_id)] = iface
            iface.vport_id = mn_vport_id
            self._vms.append(iface)
            iface.clear_arp(sync=True)
            iface_name = iface.get_host_ifname()
            #iface.interface['ifname']
            mn_host_id = host.get_midonet_host_id()
            #iface.host['mn_host_id']
            iface.vport_id = mn_vport_id
            host.bind_port(iface, mn_vport_id)
            await_port_active(mn_vport_id)
Example #6
0
def test_qos_dscp_mark_on_network():
    """
    1) Test that setting a DSCP rule on a network will transform all
    default packets that ingress the cloud on the network to set the IP
    DS headers to the given DSCP mark, or reset the IP DS headers to the
    given DSCP mark, if already set.

    2) Test that clearing a DSCP rule on a network will no longer set
    the IP DS header on default, and will no longer change the IP DS
    header on packets with DS header already set.

    3) Test that setting and clearing the DSCP rule on a network will also
    affect any traffic on any new ports created on that network.
    """

    try:
        PTM.build()
        VTM.build(ptm=PTM)
        VTM.qos_pol1.add_dscp_rule({'dscp_mark': 11})

        VTM.main_bridge.set_qos_policy(VTM.qos_pol1)

        VTM.vm1.verify_packet_field(
            target_iface=VTM.vm2,
            field='tos', value=11)

        VTM.main_bridge.add_port({'id': 3, 'type': 'exterior'})
        vm3_port = VTM.main_bridge.get_port(3)
        host1 = service.get_container_by_hostname('midolman1')
        vm3data = {'hw_addr': 'aa:bb:cc:00:00:11',
                   'ipv4_addr': ['172.16.1.4/24'],
                   'ipv4_gw': '172.16.1.1'}
        vm3 = host1.create_vmguest(**vm3data)
        port3_real_id = vm3_port.get_real_id()
        host1.bind_port(vm3, port3_real_id)
        utils.await_port_active(port3_real_id, active=True)

        vm3.verify_packet_field(
            target_iface=VTM.vm2,
            field='tos', value=11)

        VTM.main_bridge.clear_qos_policy()

        VTM.vm1.verify_packet_field(
            target_iface=VTM.vm2,
            field='tos', value=None)

        vm3.verify_packet_field(
            target_iface=VTM.vm2,
            field='tos', value=None)

        VTM.qos_pol2.clear_dscp_rules()
        VTM.qos_pol1.clear_dscp_rules()

        host1.destroy_vmguest(vm3)
    finally:
        VTM.destroy()
        PTM.destroy()
Example #7
0
def test_qos_dscp_mark_on_network():
    """
    1) Test that setting a DSCP rule on a network will transform all
    default packets that ingress the cloud on the network to set the IP
    DS headers to the given DSCP mark, or reset the IP DS headers to the
    given DSCP mark, if already set.

    2) Test that clearing a DSCP rule on a network will no longer set
    the IP DS header on default, and will no longer change the IP DS
    header on packets with DS header already set.

    3) Test that setting and clearing the DSCP rule on a network will also
    affect any traffic on any new ports created on that network.
    """

    try:
        PTM.build()
        VTM.build(ptm=PTM)
        VTM.qos_pol1.add_dscp_rule({'dscp_mark': 11})

        VTM.main_bridge.set_qos_policy(VTM.qos_pol1)

        VTM.vm1.verify_packet_field(
            target_iface=VTM.vm2,
            field='tos', value=11)

        VTM.main_bridge.add_port({'id': 3, 'type': 'exterior'})
        vm3_port = VTM.main_bridge.get_port(3)
        host1 = service.get_container_by_hostname('midolman1')
        vm3data = {'hw_addr': 'aa:bb:cc:00:00:11',
                   'ipv4_addr': ['172.16.1.4/24'],
                   'ipv4_gw': '172.16.1.1'}
        vm3 = host1.create_vmguest(**vm3data)
        port3_real_id = vm3_port.get_real_id()
        host1.bind_port(vm3, port3_real_id)
        utils.await_port_active(port3_real_id, active=True)

        vm3.verify_packet_field(
            target_iface=VTM.vm2,
            field='tos', value=11)

        VTM.main_bridge.clear_qos_policy()

        VTM.vm1.verify_packet_field(
            target_iface=VTM.vm2,
            field='tos', value=None)

        vm3.verify_packet_field(
            target_iface=VTM.vm2,
            field='tos', value=None)

        VTM.qos_pol2.clear_dscp_rules()
        VTM.qos_pol1.clear_dscp_rules()

        host1.destroy_vmguest(vm3)
    finally:
        VTM.destroy()
        PTM.destroy()
Example #8
0
    def build(self, binding_data=None, ptm=None):
        super(QosTopology, self).build(binding_data)

        ptm.add_host_to_tunnel_zone('midolman1', 'tztest1')
        ptm.add_host_to_tunnel_zone('midolman2', 'tztest1')

        self.add_bridge({'name': 'main1'})
        self.main_bridge = self.get_bridge('main1')
        self.main_bridge.add_dhcp_subnet(
            {'id': 1,
             'ipv4_gw': '172.16.1.254',
             'network': '172.16.1.0/24'})

        self.main_bridge.add_port({'id': 1, 'type': 'exterior'})
        self.main_bridge.add_port({'id': 2, 'type': 'exterior'})
        self.vm1_port = self.main_bridge.get_port(1)
        self.vm2_port = self.main_bridge.get_port(2)

        host1 = service.get_container_by_hostname('midolman1')
        host2 = service.get_container_by_hostname('midolman2')

        vm1data = {'hw_addr': 'aa:bb:cc:00:00:11',
                   'ipv4_addr': ['172.16.1.2/24'],
                   'ipv4_gw': '172.16.1.1'}
        vm2data = {'hw_addr': 'aa:bb:cc:00:00:22',
                   'ipv4_addr': ['172.16.1.3/24'],
                   'ipv4_gw': '172.16.1.1'}

        self.vm1 = host1.create_vmguest(**vm1data)
        self.vm2 = host2.create_vmguest(**vm2data)

        ptm.addCleanup(host1.destroy_vmguest, self.vm1)
        ptm.addCleanup(host2.destroy_vmguest, self.vm2)

        port1_real_id = self.vm1_port.get_real_id()
        port2_real_id = self.vm2_port.get_real_id()

        host1.bind_port(self.vm1, port1_real_id)
        host2.bind_port(self.vm2, port2_real_id)

        utils.await_port_active(port1_real_id, active=True)
        utils.await_port_active(port2_real_id, active=True)

        self.qos_pol1 = self.add_qos_policy(
            {'name': 'pol1', 'description': 'Description',
             'shared': True})
        self.qos_pol2 = self.add_qos_policy(
            {'name': 'pol2', 'description': 'Description Two',
             'shared': True})

        assert_that(self.qos_pol1)
        assert_that(self.qos_pol2)
Example #9
0
    def build(self, binding_data=None, ptm=None):
        super(QosTopology, self).build(binding_data)

        ptm.add_host_to_tunnel_zone('midolman1', 'tztest1')
        ptm.add_host_to_tunnel_zone('midolman2', 'tztest1')

        self.add_bridge({'name': 'main1'})
        self.main_bridge = self.get_bridge('main1')
        self.main_bridge.add_dhcp_subnet(
            {'id': 1,
             'ipv4_gw': '172.16.1.254',
             'network': '172.16.1.0/24'})

        self.main_bridge.add_port({'id': 1, 'type': 'exterior'})
        self.main_bridge.add_port({'id': 2, 'type': 'exterior'})
        self.vm1_port = self.main_bridge.get_port(1)
        self.vm2_port = self.main_bridge.get_port(2)

        host1 = service.get_container_by_hostname('midolman1')
        host2 = service.get_container_by_hostname('midolman2')

        vm1data = {'hw_addr': 'aa:bb:cc:00:00:11',
                   'ipv4_addr': ['172.16.1.2/24'],
                   'ipv4_gw': '172.16.1.1'}
        vm2data = {'hw_addr': 'aa:bb:cc:00:00:22',
                   'ipv4_addr': ['172.16.1.3/24'],
                   'ipv4_gw': '172.16.1.1'}

        self.vm1 = host1.create_vmguest(**vm1data)
        self.vm2 = host2.create_vmguest(**vm2data)

        ptm.addCleanup(host1.destroy_vmguest, self.vm1)
        ptm.addCleanup(host2.destroy_vmguest, self.vm2)

        port1_real_id = self.vm1_port.get_real_id()
        port2_real_id = self.vm2_port.get_real_id()

        host1.bind_port(self.vm1, port1_real_id)
        host2.bind_port(self.vm2, port2_real_id)

        utils.await_port_active(port1_real_id, active=True)
        utils.await_port_active(port2_real_id, active=True)

        self.qos_pol1 = self.add_qos_policy(
            {'name': 'pol1', 'description': 'Description',
             'shared': True})
        self.qos_pol2 = self.add_qos_policy(
            {'name': 'pol2', 'description': 'Description Two',
             'shared': True})

        assert_that(self.qos_pol1)
        assert_that(self.qos_pol2)
Example #10
0
def test_simple_port_migration():
    """
    Title: Tests that flow state changes are backwards compatible

    The topology is set up in such a way that both conntrack
    and NAT flow state is generated.

    Send nonfip-to-fip udp packets between two agents and return packets
    Unbind the public port and bind it to a different vm
    Verify that previous flows still work in both directions
    """

    binding_type = BM.get_binding_data()['binding_type']

    agent2 = service.get_container_by_hostname('midolman2')
    agent3 = service.get_container_by_hostname('midolman3')

    # vm on midolman1
    private_interface_vm = BM.get_interface_on_vport('private_port')
    # vm on midolman2
    public_interface_vm = BM.get_interface_on_vport('public_port')
    # port to be migrated
    public_port = VTM.get_resource('public_port')['port']

    # new free vm on midolman3
    free_interface_vm_data = {
        'hw_addr': public_port['mac_address'],
        'ipv4_addr': [public_port['fixed_ips'][0]['ip_address'] + '/24'],
        'ipv4_gw': '10.0.1.1'
    }
    free_interface_vm = agent3.create_vmguest(**free_interface_vm_data)
    VTM.addCleanup(agent3.destroy_vmguest, free_interface_vm)

    fip = VTM.get_resource(
        'public_port_fip')['floatingip']['floating_ip_address']

    # Generate flow state
    snat = check_forward_flow(private_interface_vm, public_interface_vm, fip,
                              50000, 80)
    check_return_flow(public_interface_vm, private_interface_vm, snat['ip'],
                      snat['port'], 50000, 80)

    # Unbind/bind port to a different host
    if binding_type == BindingType.API:
        agent2.unbind_port(public_interface_vm, type=binding_type)
    agent3.bind_port(free_interface_vm, public_port['id'], type=binding_type)

    await_port_active(public_port['id'], active=True)

    check_return_flow(free_interface_vm, private_interface_vm, snat['ip'],
                      snat['port'], 50000, 80)
Example #11
0
def test_simple_port_migration():
    """
    Title: Tests that flow state changes are backwards compatible

    The topology is set up in such a way that both conntrack
    and NAT flow state is generated.

    Send nonfip-to-fip udp packets between two agents and return packets
    Unbind the public port and bind it to a different vm
    Verify that previous flows still work in both directions
    """

    binding_type = BM.get_binding_data()['binding_type']

    agent2 = service.get_container_by_hostname('midolman2')
    agent3 = service.get_container_by_hostname('midolman3')

    # vm on midolman1
    private_interface_vm = BM.get_interface_on_vport('private_port')
    # vm on midolman2
    public_interface_vm = BM.get_interface_on_vport('public_port')
    # port to be migrated
    public_port = VTM.get_resource('public_port')['port']

    # new free vm on midolman3
    free_interface_vm_data = {
        'hw_addr' : public_port['mac_address'],
        'ipv4_addr': [public_port['fixed_ips'][0]['ip_address'] + '/24'],
        'ipv4_gw': '10.0.1.1'}
    free_interface_vm = agent3.create_vmguest(**free_interface_vm_data)
    VTM.addCleanup(agent3.destroy_vmguest, free_interface_vm)

    fip = VTM.get_resource('public_port_fip')['floatingip']['floating_ip_address']

    # Generate flow state
    snat = check_forward_flow(
        private_interface_vm, public_interface_vm, fip, 50000, 80)
    check_return_flow(
        public_interface_vm, private_interface_vm, snat['ip'], snat['port'], 50000, 80)

    # Unbind/bind port to a different host
    if binding_type == BindingType.API:
        agent2.unbind_port(public_interface_vm, type=binding_type)
    agent3.bind_port(free_interface_vm, public_port['id'], type=binding_type)

    await_port_active(public_port['id'], active=True)

    check_return_flow(
        free_interface_vm, private_interface_vm, snat['ip'], snat['port'], 50000, 80)
Example #12
0
    def unbind(self):

        bindings = self._data['bindings']

        for vm in self._vms:
            # Remove binding
            compute_host_id = vm.compute_host.get_midonet_host_id()
            for port in self._api.get_host(compute_host_id).get_ports():
                if port.get_interface_name() == vm.get_host_ifname():
                    port.delete()
                    # FIXME: possibly replace vm.vport_id by corresponding
                    # port object so we don't need to store it
                    await_port_active(vm.vport_id, active=False)

            # Remove vm
            vm.destroy()

        # Destroy the virtual topology
        self._vtm.destroy()
        self._ptm.destroy()
Example #13
0
    def unbind(self):

        bindings = self._data['bindings']
        for b in bindings:
            binding = b['binding']

            host_id = binding['host_id']
            iface_id = binding['interface_id']

            iface = self._ptm.get_interface(host_id, iface_id)
            iface_name = iface.interface['ifname']
            mn_host_id = iface.host['mn_host_id']
            mn_vport_id = iface.vport_id

            for hip in self._api.get_host(mn_host_id).get_ports():
                if hip.get_interface_name() == iface_name:
                    hip.delete()
                    iface.vport_id = None
                    await_port_active(mn_vport_id, active=False)

        self._port_if_map = {}
Example #14
0
    def unbind(self):

        bindings = self._data['bindings']
        for b in bindings:
            binding = b['binding']

            host_id = binding['host_id']
            iface_id = binding['interface_id']

            iface = self._ptm.get_interface(host_id, iface_id)
            iface_name = iface.interface['ifname']
            mn_host_id = iface.host['mn_host_id']
            mn_vport_id = iface.vport_id

            for hip in self._api.get_host(mn_host_id).get_ports():
                if hip.get_interface_name() == iface_name:
                    hip.delete()
                    iface.vport_id = None
                    await_port_active(mn_vport_id, active=False)

        self._port_if_map = {}
Example #15
0
def build_simple_topology():
    api = service.get_container_by_hostname('cluster1').get_midonet_api()
    host = service.get_container_by_hostname('midolman1')
    host_id = host.get_midonet_host_id()
    interface = host.create_vmguest()

    # Add host to tunnel zone
    tz = api.add_gre_tunnel_zone() \
        .name('tz-testing') \
        .create()
    tz_host = tz.add_tunnel_zone_host() \
        .ip_address(host.get_ip_address()) \
        .host_id(host_id) \
        .create()

    # Create bridge
    bridge = api.add_bridge() \
        .name('bridge-testing') \
        .tenant_id('tenant-testing') \
        .create()

    # Create port
    port = bridge \
        .add_port() \
        .create()
    port_id = port.get_id()

    # Bind port to interface
    host_ifname = interface.get_binding_ifname()
    api.get_host(host_id) \
        .add_host_interface_port() \
        .port_id(port_id) \
        .interface_name(host_ifname).create()

    await_port_active(port_id, active=True, timeout=60, sleep_period=1)

    return host, interface, tz, bridge, port
Example #16
0
def build_simple_topology():
    api = service.get_container_by_hostname('cluster1').get_midonet_api()
    host = service.get_container_by_hostname('midolman1')
    host_id = host.get_midonet_host_id()
    interface = host.create_vmguest()

    # Add host to tunnel zone
    tz = api.add_gre_tunnel_zone() \
        .name('tz-testing') \
        .create()
    tz.add_tunnel_zone_host() \
        .ip_address(host.get_ip_address()) \
        .host_id(host_id) \
        .create()

    # Create bridge
    bridge = api.add_bridge() \
        .name('bridge-testing') \
        .tenant_id('tenant-testing') \
        .create()

    # Create port
    port = bridge \
        .add_port() \
        .create()
    port_id = port.get_id()

    # Bind port to interface
    host_ifname = interface.get_binding_ifname()
    api.get_host(host_id) \
        .add_host_interface_port() \
        .port_id(port_id) \
        .interface_name(host_ifname).create()

    await_port_active(port_id, active=True, timeout=60, sleep_period=1)

    return host, interface, tz, bridge, port
def test_compat_flowstate():
    """
    Title: Tests that flow state changes are backwards compatible

    The topology is set up in such a way that both conntrack
    and NAT flow state is generated.

    Send nonfip-to-fip udp packets between two agents in both directions
    Restart one of the agents with the package built and stored on the override
    Verify that previous flows still work in both directions
    Verify that new flows can be created in both directions
    """

    # vms on midolman1
    public_vm1 = BM.get_interface_on_vport('public_1')
    private_vm1 = BM.get_interface_on_vport('private_1')
    # vms on midolman2
    public_vm2 = BM.get_interface_on_vport('public_2')
    private_vm2 = BM.get_interface_on_vport('private_2')

    fip1 = VTM.get_resource('public_1_fip')['floatingip']['floating_ip_address']
    fip2 = VTM.get_resource('public_2_fip')['floatingip']['floating_ip_address']

    agent = service.get_container_by_hostname('midolman1')

    snat_1 = check_forward_flow(private_vm1, public_vm2, fip2, 50000, 80)
    check_return_flow(public_vm2, private_vm1, snat_1['ip'], snat_1['port'], 50000, 80)

    snat_2 = check_forward_flow(private_vm2, public_vm1, fip1, 50000, 80)
    check_return_flow(public_vm1, private_vm2, snat_2['ip'], snat_2['port'], 50000, 80)

    # When: rebooting the agent with the updated package (waiting for the ports
    #        to be up).
    public_vm1_id = VTM.get_resource('public_1')['port']['id']
    private_vm1_id = VTM.get_resource('private_1')['port']['id']
    agent.stop(wait=True)
    await_port_active(public_vm1_id, active=False)
    await_port_active(private_vm1_id, active=False)
    agent.start(wait=True)
    await_port_active(public_vm1_id, active=True)
    await_port_active(private_vm1_id, active=True)

    # Eventually: it may take some time before we gather flow state from storage
    #             keep trying until we succeed.
    attempts = 10
    while True:
        try:
            # Check that flow state keys are fetched from storage
            check_return_flow(public_vm2, private_vm1, snat_1['ip'], snat_1['port'], 50000, 80)
            check_return_flow(public_vm1, private_vm2, snat_2['ip'], snat_2['port'], 50000, 80)
            break
        except:
            if attempts > 0:
                time.sleep(5)
                attempts -= 1
            else:
                raise

    # And: Check that the same port is used on same forward flow
    assert_that(check_forward_flow(private_vm1, public_vm2, fip2, 50000, 80),
                equal_to(snat_1))
    assert_that(check_forward_flow(private_vm2, public_vm1, fip1, 50000, 80),
                equal_to(snat_2))

    # And: we can create new flows between two agents on different versions
    #      on both directions.
    snat_1 = check_forward_flow(private_vm1, public_vm2, fip2, 50001, 81)
    check_return_flow(public_vm2, private_vm1, snat_1['ip'], snat_1['port'], 50001, 81)

    snat_2 = check_forward_flow(private_vm2, public_vm1, fip1, 50001, 81)
    check_return_flow(public_vm1, private_vm2, snat_2['ip'], snat_2['port'], 50001, 81)
Example #18
0
def test_compat_flowstate():
    """
    Title: Tests that flow state changes are backwards compatible

    The topology is set up in such a way that both conntrack
    and NAT flow state is generated.

    Send nonfip-to-fip udp packets between two agents in both directions
    Restart one of the agents with the package built and stored on the override
    Verify that previous flows still work in both directions
    Verify that new flows can be created in both directions
    """

    # vms on midolman1
    public_vm1 = BM.get_interface_on_vport('public_1')
    private_vm1 = BM.get_interface_on_vport('private_1')
    # vms on midolman2
    public_vm2 = BM.get_interface_on_vport('public_2')
    private_vm2 = BM.get_interface_on_vport('private_2')

    fip1 = VTM.get_resource(
        'public_1_fip')['floatingip']['floating_ip_address']
    fip2 = VTM.get_resource(
        'public_2_fip')['floatingip']['floating_ip_address']

    agent = service.get_container_by_hostname('midolman1')

    snat_1 = check_forward_flow(private_vm1, public_vm2, fip2, 50000, 80)
    check_return_flow(public_vm2, private_vm1, snat_1['ip'], snat_1['port'],
                      50000, 80)

    snat_2 = check_forward_flow(private_vm2, public_vm1, fip1, 50000, 80)
    check_return_flow(public_vm1, private_vm2, snat_2['ip'], snat_2['port'],
                      50000, 80)

    # When: rebooting the agent with the updated package (waiting for the ports
    #        to be up).
    public_vm1_id = VTM.get_resource('public_1')['port']['id']
    private_vm1_id = VTM.get_resource('private_1')['port']['id']
    agent.stop(wait=True)
    await_port_active(public_vm1_id, active=False)
    await_port_active(private_vm1_id, active=False)
    agent.start(wait=True)
    await_port_active(public_vm1_id, active=True)
    await_port_active(private_vm1_id, active=True)

    # Eventually: it may take some time before we gather flow state from storage
    #             keep trying until we succeed.
    attempts = 10
    while True:
        try:
            # Check that flow state keys are fetched from storage
            check_return_flow(public_vm2, private_vm1, snat_1['ip'],
                              snat_1['port'], 50000, 80)
            check_return_flow(public_vm1, private_vm2, snat_2['ip'],
                              snat_2['port'], 50000, 80)
            break
        except:
            if attempts > 0:
                time.sleep(5)
                attempts -= 1
            else:
                raise

    # And: Check that the same port is used on same forward flow
    assert_that(check_forward_flow(private_vm1, public_vm2, fip2, 50000, 80),
                equal_to(snat_1))
    assert_that(check_forward_flow(private_vm2, public_vm1, fip1, 50000, 80),
                equal_to(snat_2))

    # And: we can create new flows between two agents on different versions
    #      on both directions.
    snat_1 = check_forward_flow(private_vm1, public_vm2, fip2, 50001, 81)
    check_return_flow(public_vm2, private_vm1, snat_1['ip'], snat_1['port'],
                      50001, 81)

    snat_2 = check_forward_flow(private_vm2, public_vm1, fip1, 50001, 81)
    check_return_flow(public_vm1, private_vm2, snat_2['ip'], snat_2['port'],
                      50001, 81)
Example #19
0
    def bind(self, filename=None):
        self._ptm.build()
        self._vtm.build()
        # Get a new api ref to workaround previous zk failures
        self._api = get_midonet_api()

        bindings = self._data['bindings']
        for b in bindings:
            binding = b['binding']

            host_id = binding['host_id']
            iface_id = binding['interface_id']
            device_name = binding['device_name']
            port_id = binding['port_id']

            device_port = self._vtm.get_device_port(device_name, port_id)
            mn_vport = device_port._mn_resource
            if mn_vport.get_type() == 'InteriorRouter' or \
               mn_vport.get_type() == 'InteriorBridge':
                LOG.error("Cannot bind interior port")
                sys.exit(-1)  # TODO: make this fancier

            # FIXME: some hosts are specified by midonet host_id while others
            # are referenced by hostname. Need a coherent mechanism
            # Cleanup everything not related to bindings from here
            if 'host_id' in binding:
                host_id = binding['host_id']
                # FIXME:
                # Clean up yamls or remove them completely, this is so ugly
                _host = filter(
                    lambda x: x['host']['id'] == host_id,
                    self._ptm._hosts)[0]['host']
            elif 'hostname' in binding:
                hostname = binding['hostname']
                _host = filter(
                    lambda x: x['host']['hostname'] == hostname,
                    self._ptm._hosts)[0]['host']
            else:
                raise RuntimeError("Hosts in the binding should have a"
                                   "host_id or a hostname property")

            _interface = filter(
                lambda x: x['interface']['id'] == iface_id,
                _host['interfaces']
            )[0]['interface']

            mn_vport_id = mn_vport.get_id()
            host = service.get_container_by_hostname('midolman%s' % host_id)

            if _interface['type'] == 'netns':
                iface = host.create_vmguest(**_interface)
            elif _interface['type'] == 'trunk':
                iface = host.create_trunk(**_interface)
            else:  # provided
                iface = host.create_provided(**_interface)

            self._port_if_map[(device_name, port_id)] = iface

            iface.vport_id = mn_vport_id
            self._interfaces.append((iface, host))
            iface.clear_arp(sync=True)

            iface.vport_id = mn_vport_id
            host.bind_port(iface, mn_vport_id)
            await_port_active(mn_vport_id)
Example #20
0
def await_ports(active):
    await_port_active(left_uplink_port()._mn_resource.get_id(), active=active)
    await_port_active(downlink_port()._mn_resource.get_id(), active=active)
Example #21
0
    def bind(self, filename=None):
        self._ptm.build()
        self._vtm.build()
        # Get a new api ref to workaround previous zk failures
        self._api = get_midonet_api()

        bindings = self._data['bindings']
        for b in bindings:
            binding = b['binding']

            host_id = binding['host_id']
            iface_id = binding['interface_id']
            device_name = binding['device_name']
            port_id = binding['port_id']

            device_port = self._vtm.get_device_port(device_name, port_id)
            mn_vport = device_port._mn_resource
            if mn_vport.get_type() == 'InteriorRouter' or \
               mn_vport.get_type() == 'InteriorBridge':
                LOG.error("Cannot bind interior port")
                sys.exit(-1)  # TODO: make this fancier

            # FIXME: some hosts are specified by midonet host_id while others
            # are referenced by hostname. Need a coherent mechanism
            # Cleanup everything not related to bindings from here
            if 'host_id' in binding:
                host_id = binding['host_id']
                # FIXME:
                # Clean up yamls or remove them completely, this is so ugly
                _host = filter(lambda x: x['host']['id'] == host_id,
                               self._ptm._hosts)[0]['host']
            elif 'hostname' in binding:
                hostname = binding['hostname']
                _host = filter(lambda x: x['host']['hostname'] == hostname,
                               self._ptm._hosts)[0]['host']
            else:
                raise RuntimeError("Hosts in the binding should have a"
                                   "host_id or a hostname property")

            _interface = filter(lambda x: x['interface']['id'] == iface_id,
                                _host['interfaces'])[0]['interface']

            mn_vport_id = mn_vport.get_id()
            host = service.get_container_by_hostname('midolman%s' % host_id)

            if _interface['type'] == 'netns':
                iface = host.create_vmguest(**_interface)
            elif _interface['type'] == 'trunk':
                iface = host.create_trunk(**_interface)
            else:  # provided
                iface = host.create_provided(**_interface)

            self._port_if_map[(device_name, port_id)] = iface

            iface.vport_id = mn_vport_id
            self._interfaces.append((iface, host))
            iface.clear_arp(sync=True)

            iface.vport_id = mn_vport_id
            host.bind_port(iface, mn_vport_id)
            await_port_active(mn_vport_id)
Example #22
0
def test_non_vpn_subnet():
    left_router, left_peer_address, left_subnet = VTM.get_site_data('left')
    right_router, right_peer_address, right_subnet = VTM.get_site_data('right')

    left_tenant, right_tenant, _ = BM.get_binding_data()['config']['tenants']

    left_vpn = VTM.add_vpn_service('left', 'left_vpn', left_tenant, left_router,
                                   left_subnet)
    right_vpn = VTM.add_vpn_service('right','right_vpn', right_tenant, right_router,
                                    right_subnet)

    # Kilo version, supported also in liberty and mitaka
    # Create two connections
    VTM.add_ipsec_site_connection(
            'left', 'left_to_right', left_tenant, right_peer_address,
            vpn=left_vpn, peer_cidrs=[right_subnet['subnet']['cidr']])
    VTM.add_ipsec_site_connection(
            'right', 'right_to_left', right_tenant, left_peer_address,
            vpn=right_vpn, peer_cidrs=[left_subnet['subnet']['cidr']])

    # Add additional subnet on network left and attach to router left
    new_network = VTM.create_resource(VTM.api.create_network(
            {'network': {'name': 'net_private_left_2',
                         'tenant_id': left_tenant}}))
    new_subnet = VTM.create_resource(VTM.api.create_subnet(
            {'subnet': {'name': 'subnet_private_left_2',
                        'network_id': new_network['network']['id'],
                        'ip_version': 4,
                        'cidr': '10.1.0.0/24',
                        'gateway_ip': '10.1.0.1',
                        'tenant_id': left_tenant}}))

    # Add router interface for the new subnet
    VTM.api.add_interface_router(
            left_router['router']['id'], {'subnet_id': new_subnet['subnet']['id']})
    VTM.addCleanup(VTM.api.remove_interface_router,
                   left_router['router']['id'],
                   {'subnet_id': new_subnet['subnet']['id']})

    # Create port
    new_port = VTM.create_resource(
            VTM.api.create_port({'port': {'name': 'port_left_2',
                                          'network_id': new_network['network']['id'],
                                          'admin_state_up': True,
                                          'tenant_id': left_tenant}}))
    # Create vm (on the same host as the sibling vm) and bind
    host = BM.get_interface_on_vport('port_left').compute_host
    new_vm = host.create_vmguest(
            **{'ipv4_gw': '10.1.0.1',
               'ipv4_addr': [new_port['port']['fixed_ips'][0]['ip_address']+'/24'],
               'hw_addr': new_port['port']['mac_address']})
    BM.addCleanup(host.destroy_vmguest, new_vm)
    host.bind_port(new_vm, new_port['port']['id'])
    BM.addCleanup(host.unbind_port, new_vm)
    await_port_active(new_port['port']['id'])

    # Ping from left to right and viceversa
    ping('port_left', 'port_right')
    ping('port_right', 'port_left')

    # Check that new connections do not work
    ping(new_vm, 'port_right', expected_failure=True)
    ping('port_right', new_vm, expected_failure=True)
Example #23
0
def test_non_vpn_subnet():
    left_router, left_peer_address, left_subnet = VTM.get_site_data('left')
    right_router, right_peer_address, right_subnet = VTM.get_site_data('right')

    left_tenant, right_tenant, _ = BM.get_binding_data()['config']['tenants']

    left_vpn = VTM.add_vpn_service('left', 'left_vpn', left_tenant, left_router,
                                   left_subnet)
    right_vpn = VTM.add_vpn_service('right','right_vpn', right_tenant, right_router,
                                    right_subnet)

    # Kilo version, supported also in liberty and mitaka
    # Create two connections
    VTM.add_ipsec_site_connection(
            'left', 'left_to_right', left_tenant, right_peer_address,
            vpn=left_vpn, peer_cidrs=[right_subnet['subnet']['cidr']])
    VTM.add_ipsec_site_connection(
            'right', 'right_to_left', right_tenant, left_peer_address,
            vpn=right_vpn, peer_cidrs=[left_subnet['subnet']['cidr']])

    # Add additional subnet on network left and attach to router left
    new_network = VTM.create_resource(VTM.api.create_network(
            {'network': {'name': 'net_private_left_2',
                         'tenant_id': left_tenant}}))
    new_subnet = VTM.create_resource(VTM.api.create_subnet(
            {'subnet': {'name': 'subnet_private_left_2',
                        'network_id': new_network['network']['id'],
                        'ip_version': 4,
                        'cidr': '10.1.0.0/24',
                        'gateway_ip': '10.1.0.1',
                        'tenant_id': left_tenant}}))

    # Add router interface for the new subnet
    VTM.api.add_interface_router(
            left_router['router']['id'], {'subnet_id': new_subnet['subnet']['id']})
    VTM.addCleanup(VTM.api.remove_interface_router,
                   left_router['router']['id'],
                   {'subnet_id': new_subnet['subnet']['id']})

    # Create port
    new_port = VTM.create_resource(
            VTM.api.create_port({'port': {'name': 'port_left_2',
                                          'network_id': new_network['network']['id'],
                                          'admin_state_up': True,
                                          'tenant_id': left_tenant}}))
    # Create vm (on the same host as the sibling vm) and bind
    host = BM.get_interface_on_vport('port_left').compute_host
    new_vm = host.create_vmguest(
            **{'ipv4_gw': '10.1.0.1',
               'ipv4_addr': [new_port['port']['fixed_ips'][0]['ip_address']+'/24'],
               'hw_addr': new_port['port']['mac_address']})
    BM.addCleanup(host.destroy_vmguest, new_vm)
    host.bind_port(new_vm, new_port['port']['id'])
    BM.addCleanup(host.unbind_port, new_vm)
    await_port_active(new_port['port']['id'])

    # Ping from left to right and viceversa
    ping('port_left', 'port_right')
    ping('port_right', 'port_left')

    # Check that new connections do not work
    ping(new_vm, 'port_right', expected_failure=True)
    ping('port_right', new_vm, expected_failure=True)