def test_dup_project_fails(self):
        logger.info('Creating first project in "keystone"')
        proj_id = str(uuid.uuid4())
        proj_name = self.id()
        test_case.get_keystone_client().tenants.add_tenant(proj_id, proj_name)
        proj_obj = self._vnc_lib.project_read(id=proj_id)
        self.assertThat(proj_obj.name, Equals(proj_name))
        # create a VN in it so old isn't removed (due to synchronous delete)
        # when new with same name is created
        vn_obj = vnc_api.VirtualNetwork('vn-%s' % (self.id()), proj_obj)
        self._vnc_lib.virtual_network_create(vn_obj)

        stale_mode = self.openstack_driver._resync_stale_mode
        self.openstack_driver._resync_stale_mode = 'new_fails'
        try:
            logger.info(
                'Creating second project with same name diff id in "keystone"')
            new_proj_id = str(uuid.uuid4())
            test_case.get_keystone_client().tenants.add_tenant(
                new_proj_id, proj_name)
            with ExpectedException(vnc_api.NoIdError):
                self._vnc_lib.project_read(id=new_proj_id)

            self._vnc_lib.virtual_network_delete(id=vn_obj.uuid)
            self._vnc_lib.project_delete(id=proj_id)
        finally:
            self.openstack_driver._resync_stale_mode = stale_mode
Example #2
0
    def test_dup_domain(self):
        openstack_driver = FakeExtensionManager.get_extension_objects("vnc_cfg_api.resync")[0]
        orig_ks_domains_list = openstack_driver._ks_domains_list
        orig_ks_domain_get = openstack_driver._ks_domain_get
        try:
            openstack_driver._ks_domains_list = openstack_driver._ksv3_domains_list
            openstack_driver._ks_domain_get = openstack_driver._ksv3_domain_get
            logger.info('Creating first domain in "keystone"')
            dom_id = str(uuid.uuid4())
            dom_name = self.id()
            test_case.get_keystone_client().domains.add_domain(dom_id, dom_name)
            dom_obj = self._vnc_lib.domain_read(id=dom_id)
            self.assertThat(dom_obj.name, Equals(dom_name))

            logger.info('Creating second domain with same name diff id in "keystone"')
            new_dom_id = str(uuid.uuid4())
            test_case.get_keystone_client().domains.add_domain(new_dom_id, dom_name)
            new_dom_obj = self._vnc_lib.domain_read(id=new_dom_id)
            self.assertThat(new_dom_obj.name, Not(Equals(dom_name)))
            self.assertThat(new_dom_obj.name, Contains(dom_name))

            self._vnc_lib.domain_delete(id=dom_id)
            self._vnc_lib.domain_delete(id=new_dom_id)
        finally:
            openstack_driver._ks_domains_list = orig_ks_domains_list
            openstack_driver._ks_domain_get = orig_ks_domain_get
    def test_dup_domain(self):
        orig_ks_domains_list = self.openstack_driver._ks_domains_list
        orig_ks_domain_get = self.openstack_driver._ks_domain_get
        try:
            self.openstack_driver._ks_domains_list = self.openstack_driver._ksv3_domains_list
            self.openstack_driver._ks_domain_get = self.openstack_driver._ksv3_domain_get
            logger.info('Creating first domain in "keystone"')
            dom_id = str(uuid.uuid4())
            dom_name = self.id()
            test_case.get_keystone_client().domains.add_domain(dom_id, dom_name)
            dom_obj = self._vnc_lib.domain_read(id=dom_id)
            self.assertThat(dom_obj.name, Equals(dom_name))
            # create a project under domain so synch delete of domain fails
            proj_obj = vnc_api.Project('proj-%s' %(self.id()), dom_obj)
            self._vnc_lib.project_create(proj_obj)

            logger.info('Creating second domain with same name diff id in "keystone"')
            new_dom_id = str(uuid.uuid4())
            test_case.get_keystone_client().domains.add_domain(new_dom_id, dom_name)
            new_dom_obj = self._vnc_lib.domain_read(id=new_dom_id)
            self.assertThat(new_dom_obj.name, Not(Equals(dom_name)))
            self.assertThat(new_dom_obj.name, Contains(dom_name))
            self.assertThat(new_dom_obj.display_name, Equals(dom_name))

            self._vnc_lib.project_delete(id=proj_obj.uuid)
            self._vnc_lib.domain_delete(id=dom_id)
            self._vnc_lib.domain_delete(id=new_dom_id)
        finally:
            self.openstack_driver._ks_domains_list = orig_ks_domains_list
            self.openstack_driver._ks_domain_get = orig_ks_domain_get
    def test_dup_domain(self):
        orig_ks_domains_list = self.openstack_driver._ks_domains_list
        orig_ks_domain_get = self.openstack_driver._ks_domain_get
        try:
            self.openstack_driver._ks_domains_list = self.openstack_driver._ksv3_domains_list
            self.openstack_driver._ks_domain_get = self.openstack_driver._ksv3_domain_get
            logger.info('Creating first domain in "keystone"')
            dom_id = str(uuid.uuid4())
            dom_name = self.id()
            test_case.get_keystone_client().domains.add_domain(
                dom_id, dom_name)
            dom_obj = self._vnc_lib.domain_read(id=dom_id)
            self.assertThat(dom_obj.name, Equals(dom_name))
            # create a project under domain so synch delete of domain fails
            proj_obj = vnc_api.Project('proj-%s' % (self.id()), dom_obj)
            self._vnc_lib.project_create(proj_obj)

            logger.info(
                'Creating second domain with same name diff id in "keystone"')
            new_dom_id = str(uuid.uuid4())
            test_case.get_keystone_client().domains.add_domain(
                new_dom_id, dom_name)
            new_dom_obj = self._vnc_lib.domain_read(id=new_dom_id)
            self.assertThat(new_dom_obj.name, Not(Equals(dom_name)))
            self.assertThat(new_dom_obj.name, Contains(dom_name))
            self.assertThat(new_dom_obj.display_name, Equals(dom_name))

            self._vnc_lib.project_delete(id=proj_obj.uuid)
            self._vnc_lib.domain_delete(id=dom_id)
            self._vnc_lib.domain_delete(id=new_dom_id)
        finally:
            self.openstack_driver._ks_domains_list = orig_ks_domains_list
            self.openstack_driver._ks_domain_get = orig_ks_domain_get
    def test_dup_project_fails(self):
        logger.info('Creating first project in "keystone"')
        proj_id = str(uuid.uuid4())
        proj_name = self.id()
        test_case.get_keystone_client().tenants.add_tenant(proj_id, proj_name)
        proj_obj = self._vnc_lib.project_read(id=proj_id)
        self.assertThat(proj_obj.name, Equals(proj_name))
        # create a VN in it so old isn't removed (due to synchronous delete)
        # when new with same name is created
        vn_obj = vnc_api.VirtualNetwork('vn-%s' %(self.id()), proj_obj)
        self._vnc_lib.virtual_network_create(vn_obj)

        stale_mode = self.openstack_driver._resync_stale_mode
        self.openstack_driver._resync_stale_mode = 'new_fails'
        try:
            logger.info('Creating second project with same name diff id in "keystone"')
            new_proj_id = str(uuid.uuid4())
            test_case.get_keystone_client().tenants.add_tenant(new_proj_id, proj_name)
            with ExpectedException(vnc_api.NoIdError):
                self._vnc_lib.project_read(id=new_proj_id)

            self._vnc_lib.virtual_network_delete(id=vn_obj.uuid)
            self._vnc_lib.project_delete(id=proj_id)
        finally:
            self.openstack_driver._resync_stale_mode = stale_mode
Example #6
0
    def test_dup_domain(self):
        openstack_driver = FakeExtensionManager.get_extension_objects(
            'vnc_cfg_api.resync')[0]
        orig_ks_domains_list = openstack_driver._ks_domains_list
        orig_ks_domain_get = openstack_driver._ks_domain_get
        try:
            openstack_driver._ks_domains_list = openstack_driver._ksv3_domains_list
            openstack_driver._ks_domain_get = openstack_driver._ksv3_domain_get
            logger.info('Creating first domain in "keystone"')
            dom_id = str(uuid.uuid4())
            dom_name = self.id()
            test_case.get_keystone_client().domains.add_domain(dom_id, dom_name)
            dom_obj = self._vnc_lib.domain_read(id=dom_id)
            self.assertThat(dom_obj.name, Equals(dom_name))

            logger.info('Creating second domain with same name diff id in "keystone"')
            new_dom_id = str(uuid.uuid4())
            test_case.get_keystone_client().domains.add_domain(new_dom_id, dom_name)
            new_dom_obj = self._vnc_lib.domain_read(id=new_dom_id)
            self.assertThat(new_dom_obj.name, Not(Equals(dom_name)))
            self.assertThat(new_dom_obj.name, Contains(dom_name))

            self._vnc_lib.domain_delete(id=dom_id)
            self._vnc_lib.domain_delete(id=new_dom_id)
        finally:
            openstack_driver._ks_domains_list = orig_ks_domains_list
            openstack_driver._ks_domain_get = orig_ks_domain_get
    def test_dup_project_new_unique_fqn(self):
        logger.info('Creating first project in "keystone"')
        proj_id = str(uuid.uuid4())
        proj_name = self.id()
        test_case.get_keystone_client().tenants.add_tenant(proj_id, proj_name)
        proj_obj = self._vnc_lib.project_read(id=proj_id)
        self.assertThat(proj_obj.name, Equals(proj_name))
        # create a VN in it so old isn't removed (due to synchronous delete)
        # when new with same name is created
        vn_obj = vnc_api.VirtualNetwork('vn-%s' % (self.id()), proj_obj)
        self._vnc_lib.virtual_network_create(vn_obj)

        logger.info(
            'Creating second project with same name diff id in "keystone"')
        new_proj_id = str(uuid.uuid4())
        test_case.get_keystone_client().tenants.add_tenant(
            new_proj_id, proj_name)
        new_proj_obj = self._vnc_lib.project_read(id=new_proj_id)
        self.assertThat(new_proj_obj.name, Not(Equals(proj_name)))
        self.assertThat(new_proj_obj.name, Contains(proj_name))
        self.assertThat(new_proj_obj.display_name, Equals(proj_name))

        self._vnc_lib.virtual_network_delete(id=vn_obj.uuid)
        self._vnc_lib.project_delete(id=proj_id)
        self._vnc_lib.project_delete(id=new_proj_id)
 def test_post_project_create_default_sg(self):
     proj_id = str(uuid.uuid4())
     proj_name = self.id()
     test_case.get_keystone_client().tenants.add_tenant(proj_id, proj_name)
     proj_obj = self._vnc_lib.project_read(id=proj_id)
     sg_obj = self._vnc_lib.security_group_read(
         fq_name=proj_obj.fq_name+['default'])
     self._vnc_lib.security_group_delete(id=sg_obj.uuid)
Example #9
0
    def test_connection_status_change(self):
        # up->down->up transition check
        openstack_driver = FakeExtensionManager.get_extension_objects(
            'vnc_cfg_api.resync')[0]
        proj_id = str(uuid.uuid4())
        proj_name = self.id() + 'verify-active'
        test_case.get_keystone_client().tenants.add_tenant(proj_id, proj_name)
        proj_obj = self._vnc_lib.project_read(id=proj_id)
        conn_info = [
            ConnectionState._connection_map[x]
            for x in ConnectionState._connection_map if x[1] == 'Keystone'
        ][0]
        self.assertThat(conn_info.status.lower(), Equals('up'))

        fake_list_invoked = []

        def fake_list(*args, **kwargs):
            fake_list_invoked.append(True)
            raise Exception("Fake Keystone Projects List exception")

        with test_common.flexmocks([(openstack_driver._ks.tenants, 'list',
                                     fake_list)]):
            proj_id = str(uuid.uuid4())
            proj_name = self.id() + 'verify-down'
            test_case.get_keystone_client().tenants.add_tenant(
                proj_id, proj_name)
            openstack_driver._ks = None  # force to re-connect on next poll

            def verify_down():
                conn_info = [
                    ConnectionState._connection_map[x]
                    for x in ConnectionState._connection_map
                    if x[1] == 'Keystone'
                ][0]
                self.assertThat(conn_info.status.lower(), Equals('down'))

            # verify up->down
            gevent.sleep(self.resync_interval)
            verify_down()
            self.assertThat(len(fake_list_invoked), Equals(1))
            # should remain down
            gevent.sleep(self.resync_interval)
            verify_down()
            self.assertThat(len(fake_list_invoked), Equals(2))

        # sleep for a retry and verify down->up
        gevent.sleep(self.resync_interval)
        conn_info = [
            ConnectionState._connection_map[x]
            for x in ConnectionState._connection_map if x[1] == 'Keystone'
        ][0]
        self.assertThat(conn_info.status.lower(), Equals('up'))
    def test_connection_status_change(self):
        # up->down->up transition check
        proj_id = str(uuid.uuid4())
        proj_name = self.id() + 'verify-active'
        test_case.get_keystone_client().tenants.add_tenant(proj_id, proj_name)
        proj_obj = self._vnc_lib.project_read(id=proj_id)
        conn_info = [
            ConnectionState._connection_map[x]
            for x in ConnectionState._connection_map if x[1] == 'Keystone'
        ][0]
        self.assertThat(conn_info.status.lower(), Equals('up'))

        fake_list_invoked = list()

        def fake_list(*args, **kwargs):
            fake_list_invoked.append(True)
            raise Exception("Fake Keystone Projects List exception")

        def verify_down():
            conn_info = [
                ConnectionState._connection_map[x]
                for x in ConnectionState._connection_map if x[1] == 'Keystone'
            ][0]
            self.assertThat(conn_info.status.lower(), Equals('down'))

        with test_common.flexmocks([(self.openstack_driver._ks.tenants, 'list',
                                     fake_list)]):
            # wait for tenants.list is invoked for 2*self.resync_interval max
            for x in range(10):
                if len(fake_list_invoked) >= 1:
                    break
                gevent.sleep(float(self.resync_interval) / 5.0)
            # check that tenants.list was called once
            self.assertThat(len(fake_list_invoked), Equals(1))
            # wait for 1/10 of self.resync_interval to let code reach reset_connection in service
            gevent.sleep(float(self.resync_interval) / 10.0)
            # verify up->down
            verify_down()
            # should remain down
            gevent.sleep(float(self.resync_interval) * 1.05)
            verify_down()
            self.assertThat(len(fake_list_invoked), Equals(2))

        # sleep for a retry and verify down->up
        gevent.sleep(self.resync_interval)
        conn_info = [
            ConnectionState._connection_map[x]
            for x in ConnectionState._connection_map if x[1] == 'Keystone'
        ][0]
        self.assertThat(conn_info.status.lower(), Equals('up'))
Example #11
0
    def test_dup_project(self):
        logger.info('Creating first project in "keystone"')
        proj_id = str(uuid.uuid4())
        proj_name = self.id()
        test_case.get_keystone_client().tenants.add_tenant(proj_id, proj_name)
        proj_obj = self._vnc_lib.project_read(id=proj_id)
        self.assertThat(proj_obj.name, Equals(proj_name))

        logger.info('Creating second project with same name diff id in "keystone"')
        new_proj_id = str(uuid.uuid4())
        test_case.get_keystone_client().tenants.add_tenant(new_proj_id, proj_name)
        new_proj_obj = self._vnc_lib.project_read(id=new_proj_id)
        self.assertThat(new_proj_obj.name, Not(Equals(proj_name)))
        self.assertThat(new_proj_obj.name, Contains(proj_name))

        self._vnc_lib.project_delete(id=proj_id)
        self._vnc_lib.project_delete(id=new_proj_id)
Example #12
0
    def test_dup_project(self):
        logger.info('Creating first project in "keystone"')
        proj_id = str(uuid.uuid4())
        proj_name = self.id()
        test_case.get_keystone_client().tenants.add_tenant(proj_id, proj_name)
        proj_obj = self._vnc_lib.project_read(id=proj_id)
        self.assertThat(proj_obj.name, Equals(proj_name))

        logger.info('Creating second project with same name diff id in "keystone"')
        new_proj_id = str(uuid.uuid4())
        test_case.get_keystone_client().tenants.add_tenant(new_proj_id, proj_name)
        new_proj_obj = self._vnc_lib.project_read(id=new_proj_id)
        self.assertThat(new_proj_obj.name, Not(Equals(proj_name)))
        self.assertThat(new_proj_obj.name, Contains(proj_name))

        self._vnc_lib.project_delete(id=proj_id)
        self._vnc_lib.project_delete(id=new_proj_id)
Example #13
0
    def test_connection_status_change(self):
        # up->down->up transition check
        openstack_driver = FakeExtensionManager.get_extension_objects(
            'vnc_cfg_api.resync')[0]
        proj_id = str(uuid.uuid4())
        proj_name = self.id()+'verify-active'
        test_case.get_keystone_client().tenants.add_tenant(proj_id, proj_name)
        proj_obj = self._vnc_lib.project_read(id=proj_id)
        conn_info = [ConnectionState._connection_map[x]
            for x in ConnectionState._connection_map if x[1] == 'Keystone'][0]
        self.assertThat(conn_info.status.lower(), Equals('up'))

        fake_list_invoked = []
        def fake_list(*args, **kwargs):
            fake_list_invoked.append(True)
            raise Exception("Fake Keystone Projects List exception")

        with test_common.flexmocks([
            (openstack_driver._ks.tenants, 'list', fake_list)]):
            proj_id = str(uuid.uuid4())
            proj_name = self.id()+'verify-down'
            test_case.get_keystone_client().tenants.add_tenant(
                proj_id, proj_name)
            openstack_driver._ks = None # force to re-connect on next poll
            def verify_down():
                conn_info = [ConnectionState._connection_map[x]
                    for x in ConnectionState._connection_map
                    if x[1] == 'Keystone'][0]
                self.assertThat(conn_info.status.lower(), Equals('down'))

            # verify up->down
            gevent.sleep(self.resync_interval)
            verify_down()
            self.assertThat(len(fake_list_invoked), Equals(1))
            # should remain down
            gevent.sleep(self.resync_interval)
            verify_down()
            self.assertThat(len(fake_list_invoked), Equals(2))

        # sleep for a retry and verify down->up
        gevent.sleep(self.resync_interval)
        conn_info = [ConnectionState._connection_map[x]
            for x in ConnectionState._connection_map if x[1] == 'Keystone'][0]
        self.assertThat(conn_info.status.lower(), Equals('up'))
    def test_connection_status_change(self):
        # up->down->up transition check
        proj_id = str(uuid.uuid4())
        proj_name = self.id()+'verify-active'
        test_case.get_keystone_client().tenants.add_tenant(proj_id, proj_name)
        proj_obj = self._vnc_lib.project_read(id=proj_id)
        conn_info = [ConnectionState._connection_map[x]
            for x in ConnectionState._connection_map if x[1] == 'Keystone'][0]
        self.assertThat(conn_info.status.lower(), Equals('up'))

        fake_list_invoked = list()
        def fake_list(*args, **kwargs):
            fake_list_invoked.append(True)
            raise Exception("Fake Keystone Projects List exception")

        def verify_down():
            conn_info = [ConnectionState._connection_map[x]
                for x in ConnectionState._connection_map
                if x[1] == 'Keystone'][0]
            self.assertThat(conn_info.status.lower(), Equals('down'))

        with test_common.flexmocks([(self.openstack_driver._ks.tenants, 'list', fake_list)]):
            # wait for tenants.list is invoked for 2*self.resync_interval max
            for x in range(10):
                if len(fake_list_invoked) >= 1:
                    break
                gevent.sleep(float(self.resync_interval)/5.0)
            # check that tenants.list was called once
            self.assertThat(len(fake_list_invoked), Equals(1))
            # wait for 1/10 of self.resync_interval to let code reach reset_connection in service
            gevent.sleep(float(self.resync_interval)/10.0)
            # verify up->down
            verify_down()
            # should remain down
            gevent.sleep(float(self.resync_interval)*1.05)
            verify_down()
            self.assertThat(len(fake_list_invoked), Equals(2))

        # sleep for a retry and verify down->up
        gevent.sleep(self.resync_interval)
        conn_info = [ConnectionState._connection_map[x]
            for x in ConnectionState._connection_map if x[1] == 'Keystone'][0]
        self.assertThat(conn_info.status.lower(), Equals('up'))
    def test_delete_synchronous_on_dup(self):
        logger.info('Creating project in "keystone" and syncing')
        proj_id1 = str(uuid.uuid4())
        proj_name = self.id()
        test_case.get_keystone_client().tenants.add_tenant(proj_id1, proj_name)
        proj_obj = self._vnc_lib.project_read(id=proj_id1)
        self.assertThat(proj_obj.name, Equals(proj_name))

        logger.info('Deleting project in keystone and immediately re-creating')

        def stub(*args, **kwargs):
            return

        with test_common.patch(self.openstack_driver, '_del_project_from_vnc',
                               stub):
            test_case.get_keystone_client().tenants.delete_tenant(proj_id1)
            proj_id2 = str(uuid.uuid4())
            test_case.get_keystone_client().tenants.add_tenant(
                proj_id2, proj_name)
            proj_obj = self._vnc_lib.project_read(id=proj_id2)
            self.assertThat(proj_obj.uuid, Equals(proj_id2))
            with ExpectedException(vnc_api.NoIdError):
                self._vnc_lib.project_read(id=proj_id1)

        self._vnc_lib.project_delete(id=proj_id2)
    def test_delete_synchronous_on_dup(self):
        openstack_driver = FakeExtensionManager.get_extension_objects(
            'vnc_cfg_api.resync')[0]

        logger.info('Creating project in "keystone" and syncing')
        proj_id1 = str(uuid.uuid4())
        proj_name = self.id()
        test_case.get_keystone_client().tenants.add_tenant(proj_id1, proj_name)
        proj_obj = self._vnc_lib.project_read(id=proj_id1)
        self.assertThat(proj_obj.name, Equals(proj_name))

        logger.info('Deleting project in keystone and immediately re-creating')
        def stub(*args, **kwargs):
            return
        with test_common.patch(openstack_driver,
            '_del_project_from_vnc', stub):
            test_case.get_keystone_client().tenants.delete_tenant(proj_id1)
            proj_id2 = str(uuid.uuid4())
            test_case.get_keystone_client().tenants.add_tenant(
                proj_id2, proj_name)
            proj_obj = self._vnc_lib.project_read(id=proj_id2)
            self.assertThat(proj_obj.uuid, Equals(proj_id2))
            with ExpectedException(vnc_api.NoIdError):
                self._vnc_lib.project_read(id=proj_id1)

        self._vnc_lib.project_delete(id=proj_id2)
Example #17
0
    def test_dup_project_new_unique_fqn(self):
        logger.info('Creating first project in "keystone"')
        proj_id = str(uuid.uuid4())
        proj_name = self.id()
        test_case.get_keystone_client().tenants.add_tenant(proj_id, proj_name)
        proj_obj = self._vnc_lib.project_read(id=proj_id)
        self.assertThat(proj_obj.name, Equals(proj_name))
        # create a VN in it so old isn't removed (due to synchronous delete)
        # when new with same name is created
        vn_obj = vnc_api.VirtualNetwork('vn-%s' %(self.id()), proj_obj)
        self._vnc_lib.virtual_network_create(vn_obj)

        logger.info('Creating second project with same name diff id in "keystone"')
        new_proj_id = str(uuid.uuid4())
        test_case.get_keystone_client().tenants.add_tenant(new_proj_id, proj_name)
        new_proj_obj = self._vnc_lib.project_read(id=new_proj_id)
        self.assertThat(new_proj_obj.name, Not(Equals(proj_name)))
        self.assertThat(new_proj_obj.name, Contains(proj_name))
        self.assertThat(new_proj_obj.display_name, Equals(proj_name))

        self._vnc_lib.virtual_network_delete(id=vn_obj.uuid)
        self._vnc_lib.project_delete(id=proj_id)
        self._vnc_lib.project_delete(id=new_proj_id)
    def test_floating_ip_list(self):
        proj_objs = []
        for i in range(3):
            proj_id = str(uuid.uuid4())
            proj_name = 'proj-%s-%s' %(self.id(), i)
            test_case.get_keystone_client().tenants.add_tenant(proj_id, proj_name)
            proj_objs.append(self._vnc_lib.project_read(id=proj_id))

        sg_q_list = [self.create_resource('security_group', proj_objs[i].uuid)
                     for i in range(3)]

        # public network on last project
        pub_net1_q = self.create_resource('network', proj_objs[-1].uuid,
            name='public-network-%s-1' %(self.id()),
            extra_res_fields={'router:external': True})
        self.create_resource('subnet', proj_objs[-1].uuid,
            name='public-subnet-%s-1' %(self.id()),
            extra_res_fields={
                'network_id': pub_net1_q['id'],
                'cidr': '10.1.1.0/24',
                'ip_version': 4,
            })
        pub_net2_q = self.create_resource('network', proj_objs[-1].uuid,
            name='public-network-%s-2' %(self.id()),
            extra_res_fields={'router:external': True})
        self.create_resource('subnet', proj_objs[-1].uuid,
            name='public-subnet-%s-2' %(self.id()),
            extra_res_fields={
                'network_id': pub_net2_q['id'],
                'cidr': '20.1.1.0/24',
                'ip_version': 4,
            })

        def create_net_subnet_port_assoc_fip(i, pub_net_q_list,
                                             has_routers=True):
            net_q_list = [self.create_resource('network', proj_objs[i].uuid,
                name='network-%s-%s-%s' %(self.id(), i, j)) for j in range(2)]
            subnet_q_list = [self.create_resource('subnet', proj_objs[i].uuid,
                name='subnet-%s-%s-%s' %(self.id(), i, j),
                extra_res_fields={
                    'network_id': net_q_list[j]['id'],
                    'cidr': '1.%s.%s.0/24' %(i, j),
                    'ip_version': 4,
                }) for j in range(2)]

            if has_routers:
                router_q_list = [self.create_resource('router', proj_objs[i].uuid,
                    name='router-%s-%s-%s' %(self.id(), i, j),
                    extra_res_fields={
                        'external_gateway_info': {
                            'network_id': pub_net_q_list[j]['id'],
                        }
                    }) for j in range(2)]
                [self.update_resource('router', router_q_list[j]['id'],
                    proj_objs[i].uuid, is_admin=True, operation='ADDINTERFACE',
                    extra_res_fields={'subnet_id': subnet_q_list[j]['id']})
                        for j in range(2)]
            else:
                router_q_list = None

            port_q_list = [self.create_resource('port', proj_objs[i].uuid,
                name='port-%s-%s-%s' %(self.id(), i, j),
                extra_res_fields={
                    'network_id': net_q_list[j]['id'],
                    'security_groups': [sg_q_list[i]['id']],
                }) for j in range(2)]

            fip_q_list = [self.create_resource('floatingip', proj_objs[i].uuid,
                name='fip-%s-%s-%s' %(self.id(), i, j),
                is_admin=True,
                extra_res_fields={'floating_network_id': pub_net_q_list[j]['id'],
                                  'port_id': port_q_list[j]['id']}) for j in range(2)]

            return {'network': net_q_list, 'subnet': subnet_q_list,
                    'ports': port_q_list, 'fips': fip_q_list,
                    'routers': router_q_list}
        # end create_net_subnet_port_assoc_fip

        created = []
        # without routers
        created.append(create_net_subnet_port_assoc_fip(
            0, [pub_net1_q, pub_net2_q], has_routers=False))

        # with routers
        created.append(create_net_subnet_port_assoc_fip(
            1, [pub_net1_q, pub_net2_q], has_routers=True))

        # 1. list as admin for all routers
        fip_dicts = self.list_resource('floatingip', is_admin=True)
        # convert list to dict by id
        fip_dicts = dict((fip['id'], fip) for fip in fip_dicts)
        # assert all floatingip we created recevied back
        for fip in created[0]['fips'] + created[1]['fips']:
            self.assertIn(fip['id'], fip_dicts.keys())

        # assert router-id present in fips of proj[1]
        self.assertEqual(created[1]['routers'][0]['id'],
            fip_dicts[created[1]['fips'][0]['id']]['router_id'])
        self.assertEqual(created[1]['routers'][1]['id'],
            fip_dicts[created[1]['fips'][1]['id']]['router_id'])

        # assert router-id not present in fips of proj[0]
        self.assertEqual(None,
            fip_dicts[created[0]['fips'][0]['id']]['router_id'])
        self.assertEqual(None,
            fip_dicts[created[0]['fips'][1]['id']]['router_id'])

        # 2. list routers within project
        fip_dicts = self.list_resource(
            'floatingip', proj_uuid=proj_objs[0].uuid)
        self.assertEqual(None,
            fip_dicts[0]['router_id'])
        self.assertEqual(None,
            fip_dicts[1]['router_id'])
        # convert list to dict by port-id
        fip_dicts = dict((fip['port_id'], fip) for fip in fip_dicts)
        # assert fips point to right port
        self.assertEqual(created[0]['ports'][0]['fixed_ips'][0]['ip_address'],
            fip_dicts[created[0]['ports'][0]['id']]['fixed_ip_address'])
        self.assertEqual(created[0]['ports'][1]['fixed_ips'][0]['ip_address'],
            fip_dicts[created[0]['ports'][1]['id']]['fixed_ip_address'])