def _test_dependency_processing( self, test_operation, test_object, test_id, test_context, dep_operation, dep_object, dep_id, dep_context): # Mock sendjson to verify that it never gets called. mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, 'sendjson').start() # Create dependency db row and mark as 'processing' so it won't # be processed by the journal thread. db.create_pending_row(self.db_session, dep_object, dep_id, dep_operation, dep_context) row = db.get_all_db_rows_by_state(self.db_session, 'pending') db.update_pending_db_row_processing(self.db_session, row[0]) # Create test row with dependent ID. db.create_pending_row(self.db_session, test_object, test_id, test_operation, test_context) # Call journal thread. with mock.patch.object(self.thread.event, 'wait', return_value=False): self.thread.sync_pending_row(exit_after_run=True) # Verify that dependency row is still set at 'processing'. rows = db.get_all_db_rows_by_state(self.db_session, 'processing') self.assertEqual(1, len(rows)) # Verify that the test row was processed and set back to 'pending' # to be processed again. rows = db.get_all_db_rows_by_state(self.db_session, 'pending') self.assertEqual(1, len(rows)) # Verify that _json_data was not called. self.assertFalse(mock_sendjson.call_count)
def delete_subnet_precommit(self, context): # Use the journal row's data field to store parent object # uuids. This information is required for validation checking # when deleting parent objects. new_context = [context.current['network_id']] db.create_pending_row(context._plugin_context.session, 'subnet', context.current['id'], 'delete', new_context)
def delete_floatingip(self, context, floatingip_id): session = db_api.get_session() with session.begin(subtransactions=True): super(OpenDaylightL3RouterPlugin, self).delete_floatingip( context, floatingip_id) db.create_pending_row(context.session, odl_const.ODL_FLOATINGIP, floatingip_id, odl_const.ODL_DELETE, None)
def _test_update_row_state(self, from_state, to_state, dry_flush=False): # add new pending row db.create_pending_row(self.db_context, *self.UPDATE_ROW) mock_flush = mock.MagicMock( side_effect=self.db_context.session.flush) if dry_flush: patch_flush = mock.patch.object(self.db_context.session, 'flush', side_effect=mock_flush) row = db.get_all_db_rows(self.db_context)[0] for state in [from_state, to_state]: if dry_flush: patch_flush.start() try: # update the row state db.update_db_row_state(self.db_context, row, state, flush=not dry_flush) finally: if dry_flush: patch_flush.stop() # validate the new state row = db.get_all_db_rows(self.db_context)[0] self.assertEqual(state, row.state) return mock_flush
def update_port_precommit(self, context): port = context._plugin.get_port(context._plugin_context, context.current['id']) dbcontext = context._plugin_context new_context = copy.deepcopy(context.current) groups = [context._plugin.get_security_group(dbcontext, sg) for sg in port['security_groups']] new_context['security_groups'] = groups # Add the network_id in for validation new_context['network_id'] = port['network_id'] # NOTE(yamahata): work around for port creation for router # tenant_id=''(empty string) is passed when port is created # by l3 plugin internally for router. # On the other hand, ODL doesn't accept empty string for tenant_id. # In that case, deduce tenant_id from network_id for now. # Right fix: modify Neutron so that don't allow empty string # for tenant_id even for port for internal use. # TODO(yamahata): eliminate this work around when neutron side # is fixed # assert port['tenant_id'] != '' if ('tenant_id' not in context.current or context.current['tenant_id'] == ''): port['tenant_id'] = context._network_context._network['tenant_id'] db.create_pending_row(context._plugin_context.session, 'port', context.current['id'], 'update', new_context)
def _test_delete_row(self, by_row=False, by_row_id=False, dry_flush=False): db.create_pending_row(self.db_session, *self.UPDATE_ROW) db.create_pending_row(self.db_session, *self.UPDATE_ROW) rows = db.get_all_db_rows(self.db_session) self.assertEqual(len(rows), 2) row = rows[-1] params = {'flush': not dry_flush} if by_row: params['row'] = row elif by_row_id: params['row_id'] = row.seqnum mock_flush = None if dry_flush: patch_flush = mock.patch.object(self.db_session, 'flush', side_effect=self.db_session.flush) mock_flush = patch_flush.start() try: db.delete_row(self.db_session, **params) finally: if dry_flush: patch_flush.stop() self.db_session.flush() rows = db.get_all_db_rows(self.db_session) self.assertEqual(len(rows), 1) self.assertNotEqual(row.seqnum, rows[0].seqnum) return mock_flush
def record(plugin_context, object_type, object_uuid, operation, data, ml2_context=None): if (object_type == odl_const.ODL_PORT and operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)): data = _enrich_port(plugin_context, ml2_context, object_type, operation, data) # Calculate depending_on on other journal entries depending_on = dependency_validations.calculate(plugin_context.session, operation, object_type, object_uuid, data) # NOTE(mpeterson): Between the moment that a dependency is calculated and # the new entry is recorded in the journal, an operation can ocurr that # would make the dependency irrelevant. In that case we request a retry. # For more details, read the commit message that introduced this comment. try: db.create_pending_row(plugin_context.session, object_type, object_uuid, operation, data, depending_on=depending_on) except exception.DBReferenceError as e: raise exception.RetryRequest(e)
def test_journal_recovery_retries_exceptions(self): db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, 'id', odl_const.ODL_DELETE, {}) created_row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, created_row, odl_const.FAILED) with mock.patch.object(db, 'update_db_row_state') as m: self._test_retry_exceptions(recovery.journal_recovery, m)
def _test_dependency_processing(self, test_operation, test_object, test_id, test_context, dep_operation, dep_object, dep_id, dep_context): # Mock sendjson to verify that it never gets called. mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, 'sendjson').start() # Create dependency db row and mark as 'processing' so it won't # be processed by the journal thread. db.create_pending_row(self.db_session, dep_object, dep_id, dep_operation, dep_context) row = db.get_all_db_rows_by_state(self.db_session, 'pending') db.update_pending_db_row_processing(self.db_session, row[0]) # Create test row with dependent ID. db.create_pending_row(self.db_session, test_object, test_id, test_operation, test_context) # Call journal thread. with mock.patch.object(self.thread.event, 'wait', return_value=False): self.thread.sync_pending_row(exit_after_run=True) # Verify that dependency row is still set at 'processing'. rows = db.get_all_db_rows_by_state(self.db_session, 'processing') self.assertEqual(1, len(rows)) # Verify that the test row was processed and set back to 'pending' # to be processed again. rows = db.get_all_db_rows_by_state(self.db_session, 'pending') self.assertEqual(1, len(rows)) # Verify that _json_data was not called. self.assertFalse(mock_sendjson.call_count)
def update_port_precommit(self, context): port = context._plugin.get_port(context._plugin_context, context.current['id']) dbcontext = context._plugin_context new_context = copy.deepcopy(context.current) groups = [ context._plugin.get_security_group(dbcontext, sg) for sg in port['security_groups'] ] new_context['security_groups'] = groups # Add the network_id in for validation new_context['network_id'] = port['network_id'] # NOTE(yamahata): work around for port creation for router # tenant_id=''(empty string) is passed when port is created # by l3 plugin internally for router. # On the other hand, ODL doesn't accept empty string for tenant_id. # In that case, deduce tenant_id from network_id for now. # Right fix: modify Neutron so that don't allow empty string # for tenant_id even for port for internal use. # TODO(yamahata): eliminate this work around when neutron side # is fixed # assert port['tenant_id'] != '' if ('tenant_id' not in context.current or context.current['tenant_id'] == ''): port['tenant_id'] = context._network_context._network['tenant_id'] db.create_pending_row(context._plugin_context.session, 'port', context.current['id'], 'update', new_context)
def _test_delete_row(self, by_row=False, by_row_id=False, dry_flush=False): db.create_pending_row(self.db_context, *self.UPDATE_ROW) db.create_pending_row(self.db_context, *self.UPDATE_ROW) rows = db.get_all_db_rows(self.db_context) self.assertEqual(len(rows), 2) row = rows[-1] params = {'flush': not dry_flush} if by_row: params['row'] = row elif by_row_id: params['row_id'] = row.seqnum mock_flush = None if dry_flush: patch_flush = mock.patch.object( self.db_context.session, 'flush', side_effect=self.db_context.session.flush ) mock_flush = patch_flush.start() try: db.delete_row(self.db_context, **params) finally: if dry_flush: patch_flush.stop() self.db_context.session.flush() rows = db.get_all_db_rows(self.db_context) self.assertEqual(len(rows), 1) self.assertNotEqual(row.seqnum, rows[0].seqnum) return mock_flush
def delete_router(self, context, router_id): session = db_api.get_session() with session.begin(subtransactions=True): super(OpenDaylightL3RouterPlugin, self).delete_router(context, router_id) db.create_pending_row(context.session, odl_const.ODL_ROUTER, router_id, odl_const.ODL_DELETE, None)
def _test_update_row_state(self, from_state, to_state, dry_flush=False): # add new pending row db.create_pending_row(self.db_session, *self.UPDATE_ROW) mock_flush = mock.MagicMock(side_effect=self.db_session.flush) if dry_flush: patch_flush = mock.patch.object(self.db_session, 'flush', side_effect=mock_flush) row = db.get_all_db_rows(self.db_session)[0] for state in [from_state, to_state]: if dry_flush: patch_flush.start() try: # update the row state db.update_db_row_state(self.db_session, row, state, flush=not dry_flush) finally: if dry_flush: patch_flush.stop() # validate the new state row = db.get_all_db_rows(self.db_session)[0] self.assertEqual(state, row.state) return mock_flush
def _test_reset_processing_rows(self, session, last_retried, max_timedelta, quantity, dry_reset=False): db.create_pending_row(self.db_session, *self.UPDATE_ROW) expected_state = odl_const.PROCESSING row = db.get_all_db_rows(self.db_session)[-1] row.state = expected_state row.last_retried = row.last_retried - timedelta(seconds=last_retried) self._update_row(row) if not dry_reset: expected_state = odl_const.PENDING reset = db.reset_processing_rows(self.db_session, max_timedelta) self.assertIsInstance(reset, int) self.assertEqual(reset, quantity) rows = db.get_all_db_rows_by_state(self.db_session, expected_state) self.assertEqual(len(rows), quantity) for row in rows: self.assertEqual(row.state, expected_state)
def _sync_resources(session, plugin, dbcontext, object_type, collection_name): obj_getter = getattr(plugin, 'get_%s' % collection_name) resources = obj_getter(dbcontext) for resource in resources: db.create_pending_row(session, object_type, resource['id'], odl_const.ODL_CREATE, resource)
def test_get_oldest_pending_row_returns_parent_when_dep_pending(self): db.create_pending_row(self.db_context, *self.UPDATE_ROW) parent_row = db.get_all_db_rows(self.db_context)[0] db.create_pending_row(self.db_context, *self.UPDATE_ROW, depending_on=[parent_row]) row = db.get_oldest_pending_db_row_with_lock(self.db_context) self.assertEqual(parent_row, row)
def update_router(self, context, router_id, router): session = db_api.get_session() with session.begin(subtransactions=True): router_dict = super(OpenDaylightL3RouterPlugin, self).update_router(context, router_id, router) db.create_pending_row(context.session, odl_const.ODL_ROUTER, router_id, odl_const.ODL_UPDATE, router_dict) return router_dict
def _test_get_oldest_pending_row_none(self, state): db.create_pending_row(self.db_session, *self.UPDATE_ROW) row = db.get_all_db_rows(self.db_session)[0] row.state = state self._update_row(row) row = db.get_oldest_pending_db_row_with_lock(self.db_session) self.assertIsNone(row)
def record(db_session, object_type, object_uuid, operation, data, context=None): if (object_type == odl_const.ODL_PORT and operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)): _enrich_port(db_session, context, object_type, operation, data) db.create_pending_row(db_session, object_type, object_uuid, operation, data)
def test_get_oldest_pending_row_returns_parent_when_dep_pending(self): db.create_pending_row(self.db_session, *self.UPDATE_ROW) parent_row = db.get_all_db_rows(self.db_session)[0] db.create_pending_row(self.db_session, *self.UPDATE_ROW, depending_on=[parent_row]) row = db.get_oldest_pending_db_row_with_lock(self.db_session) self.assertEqual(parent_row, row)
def test_full_sync_removes_pending_rows(self): self._mock_canary_missing() db.create_pending_row(self.db_session, odl_const.ODL_NETWORK, "uuid", odl_const.ODL_CREATE, {'foo': 'bar'}) full_sync.full_sync(self.db_session) rows = self._assert_canary_created() self._assert_no_journal_rows(rows)
def _test_get_oldest_pending_row_none(self, state): db.create_pending_row(self.db_context, *self.UPDATE_ROW) row = db.get_all_db_rows(self.db_context)[0] row.state = state self._update_row(row) row = db.get_oldest_pending_db_row_with_lock(self.db_context) self.assertIsNone(row)
def _test_entry_complete(self, retention, expected_length): self.cfg.config(completed_rows_retention=retention, group='ml2_odl') db.create_pending_row(self.db_session, *test_db.DbTestCase.UPDATE_ROW) entry = db.get_all_db_rows(self.db_session)[-1] journal.entry_complete(self.db_context, entry) rows = db.get_all_db_rows(self.db_session) self.assertEqual(expected_length, len(rows)) self.assertTrue(all(row.state == odl_const.COMPLETED for row in rows))
def sync_from_callback(self, operation, res_type_uri, res_id, resource_dict): object_type = res_type_uri.replace('-', '_')[:-1] object_uuid = (resource_dict[object_type]['id'] if operation == 'create' else res_id) if resource_dict is not None: resource_dict = resource_dict[object_type] db.create_pending_row(db_api.get_session(), object_type, object_uuid, operation, resource_dict)
def test_get_oldest_pending_row_order(self): db.create_pending_row(self.db_context, *self.UPDATE_ROW) older_row = db.get_all_db_rows(self.db_context)[0] older_row.last_retried -= timedelta(minutes=1) self._update_row(older_row) db.create_pending_row(self.db_context, *self.UPDATE_ROW) row = db.get_oldest_pending_db_row_with_lock(self.db_context) self.assertEqual(older_row, row)
def update_router(self, context, router_id, router): session = db_api.get_session() with session.begin(subtransactions=True): router_dict = super( OpenDaylightL3RouterPlugin, self).update_router( context, router_id, router) db.create_pending_row(context.session, odl_const.ODL_ROUTER, router_id, odl_const.ODL_UPDATE, router_dict) return router_dict
def _test_entry_complete(self, retention, expected_length): self.cfg.config(completed_rows_retention=retention, group='ml2_odl') db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) entry = db.get_all_db_rows(self.db_context)[-1] journal.entry_complete(self.db_context, entry) rows = db.get_all_db_rows(self.db_context) self.assertEqual(expected_length, len(rows)) self.assertTrue( all(row.state == odl_const.COMPLETED for row in rows))
def record(plugin_context, ml2_context, object_type, object_uuid, operation, data): if (object_type == odl_const.ODL_PORT and operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)): data = _enrich_port(plugin_context, ml2_context, object_type, operation, data) db.create_pending_row(plugin_context.session, object_type, object_uuid, operation, data)
def test_get_oldest_pending_row_order(self): db.create_pending_row(self.db_session, *self.UPDATE_ROW) older_row = db.get_all_db_rows(self.db_session)[0] older_row.last_retried -= timedelta(minutes=1) self._update_row(older_row) db.create_pending_row(self.db_session, *self.UPDATE_ROW) row = db.get_oldest_pending_db_row_with_lock(self.db_session) self.assertEqual(older_row, row)
def test_dependency(self): db.create_pending_row(self.db_context, self.first_type, self.first_id, self.first_operation, get_data(self.first_type, self.first_operation)) for data in get_data(self.second_type, self.second_operation): deps = dependency_validations.calculate(self.db_context, self.second_operation, self.second_type, self.second_id, data) self.assertEqual(self.expected, len(deps))
def delete_router(self, context, router_id): session = db_api.get_session() router_dict = self.get_router(context, router_id) dependency_list = [router_dict['gw_port_id']] with session.begin(subtransactions=True): super(OpenDaylightL3RouterPlugin, self).delete_router(context, router_id) db.create_pending_row(context.session, odl_const.ODL_ROUTER, router_id, odl_const.ODL_DELETE, dependency_list)
def test_dependency(self): db.create_pending_row( self.db_context, self.first_type, self.first_id, self.first_operation, get_data(self.first_type, self.first_operation)) for data in get_data(self.second_type, self.second_operation): deps = dependency_validations.calculate( self.db_context, self.second_operation, self.second_type, self.second_id, data) self.assertEqual(self.expected, len(deps))
def create_floatingip(self, context, floatingip, initial_status=q_const.FLOATINGIP_STATUS_ACTIVE): session = db_api.get_session() with session.begin(subtransactions=True): fip_dict = super( OpenDaylightL3RouterPlugin, self).create_floatingip( context, floatingip, initial_status) db.create_pending_row(context.session, odl_const.ODL_FLOATINGIP, fip_dict['id'], odl_const.ODL_CREATE, fip_dict) return fip_dict
def test_get_oldest_pending_row_when_deadlock(self): db.create_pending_row(self.db_session, *self.UPDATE_ROW) update_mock = mock.MagicMock(side_effect=(DBDeadlock, mock.DEFAULT)) # Mocking is mandatory to achieve a deadlock regardless of the DB # backend being used when running the tests with mock.patch.object(db, 'update_db_row_state', new=update_mock): row = db.get_oldest_pending_db_row_with_lock(self.db_session) self.assertIsNotNone(row) self.assertEqual(2, update_mock.call_count)
def _test_validate_updates(self, first_entry, second_entry, expected_deps, state=None): db.create_pending_row(self.db_context, *first_entry) if state: row = db.get_all_db_rows(self.db_context)[0] row.state = state self._update_row(row) deps = db.get_pending_or_processing_ops( self.db_context, second_entry[1], second_entry[2]) self.assertEqual(expected_deps, len(deps) != 0)
def _test_get_oldest_pending_row_with_dep(self, dep_state): db.create_pending_row(self.db_context, *self.UPDATE_ROW) parent_row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, parent_row, dep_state) db.create_pending_row(self.db_context, *self.UPDATE_ROW, depending_on=[parent_row]) row = db.get_oldest_pending_db_row_with_lock(self.db_context) if row is not None: self.assertNotEqual(parent_row.seqnum, row.seqnum) return row
def delete_floatingip(self, context, floatingip_id): session = db_api.get_session() floatingip_dict = self.get_floatingip(context, floatingip_id) dependency_list = [floatingip_dict['router_id']] dependency_list.append(floatingip_dict['floating_network_id']) with session.begin(subtransactions=True): super(OpenDaylightL3RouterPlugin, self).delete_floatingip( context, floatingip_id) db.create_pending_row(context.session, odl_const.ODL_FLOATINGIP, floatingip_id, odl_const.ODL_DELETE, dependency_list)
def delete_floatingip(self, context, floatingip_id): session = db_api.get_session() floatingip_dict = self.get_floatingip(context, floatingip_id) dependency_list = [floatingip_dict['router_id']] dependency_list.append(floatingip_dict['floating_network_id']) with session.begin(subtransactions=True): super(OpenDaylightL3RouterPlugin, self).delete_floatingip(context, floatingip_id) db.create_pending_row(context.session, odl_const.ODL_FLOATINGIP, floatingip_id, odl_const.ODL_DELETE, dependency_list)
def test_entry_complete_with_retention_deletes_dependencies(self): self.cfg.config(completed_rows_retention=1, group='ml2_odl') db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) entry = db.get_all_db_rows(self.db_context)[-1] db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW, depending_on=[entry]) dependant = db.get_all_db_rows(self.db_context)[-1] journal.entry_complete(self.db_context, entry) rows = db.get_all_db_rows(self.db_context) self.assertIn(entry, rows) self.assertEqual([], entry.dependencies) self.assertEqual([], dependant.depending_on)
def remove_router_interface(self, context, router_id, interface_info): session = db_api.get_session() with session.begin(subtransactions=True): new_router = super(OpenDaylightL3RouterPlugin, self).remove_router_interface( context, router_id, interface_info) router_dict = self._generate_router_dict(router_id, interface_info, new_router) db.create_pending_row(context.session, odl_const.ODL_ROUTER_INTF, odl_const.ODL_UUID_NOT_USED, odl_const.ODL_REMOVE, router_dict) return new_router
def _test_update_row_state(self, from_state, to_state): # add new pending row db.create_pending_row(self.db_session, *self.UPDATE_ROW) row = db.get_all_db_rows(self.db_session)[0] for state in [from_state, to_state]: # update the row state db.update_db_row_state(self.db_session, row, state) # validate the new state row = db.get_all_db_rows(self.db_session)[0] self.assertEqual(state, row.state)
def remove_router_interface(self, context, router_id, interface_info): session = db_api.get_session() with session.begin(subtransactions=True): new_router = super( OpenDaylightL3RouterPlugin, self).remove_router_interface( context, router_id, interface_info) router_dict = self._generate_router_dict(router_id, interface_info, new_router) db.create_pending_row(context.session, odl_const.ODL_ROUTER_INTF, odl_const.ODL_UUID_NOT_USED, odl_const.ODL_REMOVE, router_dict) return new_router
def _test_journal_recovery(self, operation, odl_resource, expected_state): db.create_pending_row(self.db_session, odl_const.ODL_NETWORK, 'id', operation, {}) row = db.get_all_db_rows(self.db_session)[0] db.update_db_row_state(self.db_session, row, odl_const.FAILED) self._CLIENT.get_resource.return_value = odl_resource recovery.journal_recovery(self.db_session) row = db.get_all_db_rows(self.db_session)[0] self.assertEqual(expected_state, row['state'])
def _test_get_oldest_pending_row_with_dep(self, dep_state): db.create_pending_row(self.db_session, *self.UPDATE_ROW) parent_row = db.get_all_db_rows(self.db_session)[0] db.update_db_row_state(self.db_session, parent_row, dep_state) db.create_pending_row(self.db_session, *self.UPDATE_ROW, depending_on=[parent_row]) row = db.get_oldest_pending_db_row_with_lock(self.db_session) if row is not None: self.assertNotEqual(parent_row.seqnum, row.seqnum) return row
def test_entry_reset(self): db.create_pending_row(self.db_session, *test_db.DbTestCase.UPDATE_ROW) db.create_pending_row(self.db_session, *test_db.DbTestCase.UPDATE_ROW) entry = db.get_all_db_rows(self.db_session)[-1] entry.state = odl_const.PROCESSING self.db_session.merge(entry) self.db_session.flush() entry = db.get_all_db_rows(self.db_session)[-1] self.assertEqual(entry.state, odl_const.PROCESSING) journal.entry_reset(self.db_context, entry) rows = db.get_all_db_rows(self.db_session) self.assertEqual(2, len(rows)) self.assertTrue(all(row.state == odl_const.PENDING for row in rows))
def _test_no_full_sync_when_canary_in_journal(self, state): self._mock_canary_missing() self._mock_plugin_resources() db.create_pending_row(self.db_session, odl_const.ODL_NETWORK, full_sync._CANARY_NETWORK_ID, odl_const.ODL_CREATE, {}) row = db.get_all_db_rows(self.db_session)[0] db.update_db_row_state(self.db_session, row, state) full_sync.full_sync(self.db_session) rows = db.get_all_db_rows(self.db_session) self._assert_no_journal_rows(rows)
def test_entry_reset(self): db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) entry = db.get_all_db_rows(self.db_context)[-1] entry.state = odl_const.PROCESSING self.db_context.session.merge(entry) self.db_context.session.flush() entry = db.get_all_db_rows(self.db_context)[-1] self.assertEqual(entry.state, odl_const.PROCESSING) journal.entry_reset(self.db_context, entry) rows = db.get_all_db_rows(self.db_context) self.assertEqual(2, len(rows)) self.assertTrue(all(row.state == odl_const.PENDING for row in rows))
def create_floatingip(self, context, floatingip, initial_status=q_const.FLOATINGIP_STATUS_ACTIVE): session = db_api.get_session() with session.begin(subtransactions=True): fip_dict = super(OpenDaylightL3RouterPlugin, self).create_floatingip(context, floatingip, initial_status) db.create_pending_row(context.session, odl_const.ODL_FLOATINGIP, fip_dict['id'], odl_const.ODL_CREATE, fip_dict) return fip_dict
def _test_no_full_sync_when_canary_in_journal(self, state): self._mock_canary_missing() self._mock_l2_resources() db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, full_sync._CANARY_NETWORK_ID, odl_const.ODL_CREATE, {}) row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, row, state) full_sync.full_sync(self.db_context) rows = db.get_all_db_rows(self.db_context) self.assertEqual([], self._filter_out_canary(rows))
def _test_no_full_sync_when_canary_in_journal(self, state): self._mock_canary_missing() self._mock_l2_resources() db.create_pending_row(self.db_session, odl_const.ODL_NETWORK, full_sync._CANARY_NETWORK_ID, odl_const.ODL_CREATE, {}) row = db.get_all_db_rows(self.db_session)[0] db.update_db_row_state(self.db_session, row, state) full_sync.full_sync(self.db_session) rows = db.get_all_db_rows(self.db_session) self.assertEqual([], self._filter_out_canary(rows))
def _test_retry_count(self, retry_num, max_retry, expected_retry_count, expected_state): # add new pending row db.create_pending_row(self.db_context, *self.UPDATE_ROW) # update the row with the requested retry_num row = db.get_all_db_rows(self.db_context)[0] row.retry_count = retry_num - 1 db.update_pending_db_row_retry(self.db_context, row, max_retry) # validate the state and the retry_count of the row row = db.get_all_db_rows(self.db_context)[0] self.assertEqual(expected_state, row.state) self.assertEqual(expected_retry_count, row.retry_count)