def test_sync_nsx_failure_backoff(self):
     self.mock_api.return_value.request.side_effect = api_exc.RequestTimeout
     # chunk size won't matter here
     sp = sync.SyncParameters(999)
     for i in range(10):
         self.assertEqual(min(64, 2**i),
                          self._plugin._synchronizer._synchronize_state(sp))
 def _test_sync(self,
                exp_net_status,
                exp_port_status,
                exp_router_status,
                action_callback=None,
                sp=None):
     ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
     neutron_net_id = self._get_tag_dict(
         self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
     lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0]
     neutron_port_id = self._get_tag_dict(
         self.fc._fake_lswitch_lport_dict[lp_uuid]['tags'])['q_port_id']
     lr_uuid = self.fc._fake_lrouter_dict.keys()[0]
     neutron_rtr_id = self._get_tag_dict(
         self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
     if action_callback:
         action_callback(ls_uuid, lp_uuid, lr_uuid)
     # Make chunk big enough to read everything
     if not sp:
         sp = sync.SyncParameters(100)
     self._plugin._synchronizer._synchronize_state(sp)
     # Verify element is in expected status
     # TODO(salv-orlando): Verify status for all elements
     ctx = context.get_admin_context()
     neutron_net = self._plugin.get_network(ctx, neutron_net_id)
     neutron_port = self._plugin.get_port(ctx, neutron_port_id)
     neutron_rtr = self._plugin.get_router(ctx, neutron_rtr_id)
     self.assertEqual(exp_net_status, neutron_net['status'])
     self.assertEqual(exp_port_status, neutron_port['status'])
     self.assertEqual(exp_router_status, neutron_rtr['status'])
Exemple #3
0
 def test_resync_with_resources_removed(self):
     ctx = context.get_admin_context()
     with self._populate_data(ctx):
         sp = sync.SyncParameters(100)
         self._plugin._synchronizer._synchronize_state(sp)
         # Ensure the synchronizer performs a resync
         sp.init_sync_performed = True
         self._test_sync(
             constants.NET_STATUS_ERROR, constants.PORT_STATUS_ERROR,
             constants.NET_STATUS_ERROR, self._action_callback_del_resource,
             sp=sp)
    def test_sync_multi_chunk(self):
        # The fake NSX API client cannot be used for this test
        ctx = context.get_admin_context()
        # Generate 4 networks, 1 port per network, and 4 routers
        with self._populate_data(ctx, net_size=4, port_size=1, router_size=4):
            fake_lswitches = json.loads(
                self.fc.handle_get('/ws.v1/lswitch'))['results']
            fake_lrouters = json.loads(
                self.fc.handle_get('/ws.v1/lrouter'))['results']
            fake_lswitchports = json.loads(
                self.fc.handle_get('/ws.v1/lswitch/*/lport'))['results']
            return_values = [
                # Chunk 0 - lswitches
                (fake_lswitches, None, 4),
                # Chunk 0 - lrouters
                (fake_lrouters[:2], 'xxx', 4),
                # Chunk 0 - lports (size only)
                ([], 'start', 4),
                # Chunk 1 - lrouters (2 more) (lswitches are skipped)
                (fake_lrouters[2:], None, None),
                # Chunk 1 - lports
                (fake_lswitchports, None, 4)
            ]

            def fake_fetch_data(*args, **kwargs):
                return return_values.pop(0)

            # 2 Chunks, with 6 resources each.
            # 1st chunk lswitches and lrouters
            # 2nd chunk lrouters and lports
            # Mock _fetch_data
            with mock.patch.object(self._plugin._synchronizer,
                                   '_fetch_data',
                                   side_effect=fake_fetch_data):
                sp = sync.SyncParameters(6)

                def do_chunk(chunk_idx, ls_cursor, lr_cursor, lp_cursor):
                    self._plugin._synchronizer._synchronize_state(sp)
                    self.assertEqual(chunk_idx, sp.current_chunk)
                    self.assertEqual(ls_cursor, sp.ls_cursor)
                    self.assertEqual(lr_cursor, sp.lr_cursor)
                    self.assertEqual(lp_cursor, sp.lp_cursor)

                # check 1st chunk
                do_chunk(1, None, 'xxx', 'start')
                # check 2nd chunk
                do_chunk(0, None, None, None)
                # Chunk size should have stayed the same
                self.assertEqual(sp.chunk_size, 6)
Exemple #5
0
 def _test_sync_with_chunk_larger_maxpagesize(
     self, net_size, port_size, router_size, chunk_size, exp_calls):
     ctx = context.get_admin_context()
     real_func = nsxlib.get_single_query_page
     sp = sync.SyncParameters(chunk_size)
     with self._populate_data(ctx, net_size=net_size,
                              port_size=port_size,
                              router_size=router_size):
         with mock.patch.object(sync, 'MAX_PAGE_SIZE', 15):
             # The following mock is just for counting calls,
             # but we will still run the actual function
             with mock.patch.object(
                 nsxlib, 'get_single_query_page',
                 side_effect=real_func) as mock_get_page:
                 self._test_sync(
                     constants.NET_STATUS_ACTIVE,
                     constants.PORT_STATUS_ACTIVE,
                     constants.NET_STATUS_ACTIVE,
                     sp=sp)
         # As each resource type does not exceed the maximum page size,
         # the method should be called once for each resource type
         self.assertEqual(exp_calls, mock_get_page.call_count)