def _test_update_row_state(self, from_state, to_state, dry_flush=False): # add new pending row db.create_pending_row(self.db_context, *self.UPDATE_ROW) mock_flush = mock.MagicMock( side_effect=self.db_context.session.flush) if dry_flush: patch_flush = mock.patch.object(self.db_context.session, 'flush', side_effect=mock_flush) row = db.get_all_db_rows(self.db_context)[0] for state in [from_state, to_state]: if dry_flush: patch_flush.start() try: # update the row state db.update_db_row_state(self.db_context, row, state, flush=not dry_flush) finally: if dry_flush: patch_flush.stop() # validate the new state row = db.get_all_db_rows(self.db_context)[0] self.assertEqual(state, row.state) return mock_flush
def _test_delete_row(self, by_row=False, by_row_id=False, dry_flush=False): db.create_pending_row(self.db_context, *self.UPDATE_ROW) db.create_pending_row(self.db_context, *self.UPDATE_ROW) rows = db.get_all_db_rows(self.db_context) self.assertEqual(len(rows), 2) row = rows[-1] params = {'flush': not dry_flush} if by_row: params['row'] = row elif by_row_id: params['row_id'] = row.seqnum mock_flush = None if dry_flush: patch_flush = mock.patch.object( self.db_context.session, 'flush', side_effect=self.db_context.session.flush ) mock_flush = patch_flush.start() try: db.delete_row(self.db_context, **params) finally: if dry_flush: patch_flush.stop() self.db_context.session.flush() rows = db.get_all_db_rows(self.db_context) self.assertEqual(len(rows), 1) self.assertNotEqual(row.seqnum, rows[0].seqnum) return mock_flush
def _test_entry_complete(self, retention, expected_length): self.cfg.config(completed_rows_retention=retention, group='ml2_odl') db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) entry = db.get_all_db_rows(self.db_context)[-1] journal.entry_complete(self.db_context, entry) rows = db.get_all_db_rows(self.db_context) self.assertEqual(expected_length, len(rows)) self.assertTrue( all(row.state == odl_const.COMPLETED for row in rows))
def test_entry_complete_with_retention_deletes_dependencies(self): self.cfg.config(completed_rows_retention=1, group='ml2_odl') db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) entry = db.get_all_db_rows(self.db_context)[-1] db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW, depending_on=[entry]) dependant = db.get_all_db_rows(self.db_context)[-1] journal.entry_complete(self.db_context, entry) rows = db.get_all_db_rows(self.db_context) self.assertIn(entry, rows) self.assertEqual([], entry.dependencies) self.assertEqual([], dependant.depending_on)
def _test_update_row_state(self, from_state, to_state): # add new pending row db.create_pending_row(self.db_session, *self.UPDATE_ROW) row = db.get_all_db_rows(self.db_session)[0] for state in [from_state, to_state]: # update the row state db.update_db_row_state(self.db_session, row, state) # validate the new state row = db.get_all_db_rows(self.db_session)[0] self.assertEqual(state, row.state)
def test_entry_reset(self): db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) entry = db.get_all_db_rows(self.db_context)[-1] entry.state = odl_const.PROCESSING self.db_context.session.merge(entry) self.db_context.session.flush() entry = db.get_all_db_rows(self.db_context)[-1] self.assertEqual(entry.state, odl_const.PROCESSING) journal.entry_reset(self.db_context, entry) rows = db.get_all_db_rows(self.db_context) self.assertEqual(2, len(rows)) self.assertTrue(all(row.state == odl_const.PENDING for row in rows))
def _test_no_full_sync_when_canary_in_journal(self, state): self._mock_canary_missing() self._mock_plugin_resources() db.create_pending_row(self.db_session, odl_const.ODL_NETWORK, full_sync._CANARY_NETWORK_ID, odl_const.ODL_CREATE, {}) row = db.get_all_db_rows(self.db_session)[0] db.update_db_row_state(self.db_session, row, state) full_sync.full_sync(self.db_session) rows = db.get_all_db_rows(self.db_session) self._assert_no_journal_rows(rows)
def _test_no_full_sync_when_canary_in_journal(self, state): self._mock_canary_missing() self._mock_l2_resources() db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, full_sync._CANARY_NETWORK_ID, odl_const.ODL_CREATE, {}) row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, row, state) full_sync.full_sync(self.db_context) rows = db.get_all_db_rows(self.db_context) self.assertEqual([], self._filter_out_canary(rows))
def _test_retry_count(self, retry_num, max_retry, expected_retry_count, expected_state): # add new pending row db.create_pending_row(self.db_context, *self.UPDATE_ROW) # update the row with the requested retry_num row = db.get_all_db_rows(self.db_context)[0] row.retry_count = retry_num - 1 db.update_pending_db_row_retry(self.db_context, row, max_retry) # validate the state and the retry_count of the row row = db.get_all_db_rows(self.db_context)[0] self.assertEqual(expected_state, row.state) self.assertEqual(expected_retry_count, row.retry_count)
def test_sync_multiple_updates(self): # add 2 updates for i in range(2): self._call_operation_object(odl_const.ODL_UPDATE, odl_const.ODL_NETWORK) # get the last update row rows = db.get_all_db_rows(self.db_context) rows.sort(key=operator.attrgetter("seqnum")) first_row = rows[0] # change the state to processing db.update_db_row_state(self.db_context, first_row, odl_const.PROCESSING) # create 1 more operation to trigger the sync thread # verify that there are no calls to ODL controller, because the # first row was processing (exit_after_run = true) self._test_thread_processing(odl_const.ODL_UPDATE, odl_const.ODL_NETWORK, expected_calls=0) # validate that all the pending rows stays in 'pending' state # first row should be 'processing' because it was not processed processing = db.get_all_db_rows_by_state(self.db_context, 'processing') self.assertEqual(1, len(processing)) rows = db.get_all_db_rows_by_state(self.db_context, 'pending') self.assertEqual(2, len(rows))
def test_get_oldest_pending_row_returns_parent_when_dep_pending(self): db.create_pending_row(self.db_context, *self.UPDATE_ROW) parent_row = db.get_all_db_rows(self.db_context)[0] db.create_pending_row(self.db_context, *self.UPDATE_ROW, depending_on=[parent_row]) row = db.get_oldest_pending_db_row_with_lock(self.db_context) self.assertEqual(parent_row, row)
def _test_delete_rows_by_state_and_time(self, last_retried, row_retention, state, expected_rows): db.create_pending_row(self.db_session, *self.UPDATE_ROW) # update state and last retried row = db.get_all_db_rows(self.db_session)[0] row.state = state row.last_retried = row.last_retried - timedelta(seconds=last_retried) self._update_row(row) db.delete_rows_by_state_and_time(self.db_session, odl_const.COMPLETED, timedelta(seconds=row_retention)) # validate the number of rows in the journal rows = db.get_all_db_rows(self.db_session) self.assertEqual(expected_rows, len(rows))
def _test_get_oldest_pending_row_none(self, state): db.create_pending_row(self.db_context, *self.UPDATE_ROW) row = db.get_all_db_rows(self.db_context)[0] row.state = state self._update_row(row) row = db.get_oldest_pending_db_row_with_lock(self.db_context) self.assertIsNone(row)
def test_journal_recovery_retries_exceptions(self): db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, 'id', odl_const.ODL_DELETE, {}) created_row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, created_row, odl_const.FAILED) with mock.patch.object(db, 'update_db_row_state') as m: self._test_retry_exceptions(recovery.journal_recovery, m)
def test_get_oldest_pending_row_order(self): db.create_pending_row(self.db_context, *self.UPDATE_ROW) older_row = db.get_all_db_rows(self.db_context)[0] older_row.last_retried -= timedelta(minutes=1) self._update_row(older_row) db.create_pending_row(self.db_context, *self.UPDATE_ROW) row = db.get_oldest_pending_db_row_with_lock(self.db_context) self.assertEqual(older_row, row)
def _test_db_results(self, object_id, operation, object_type): rows = db.get_all_db_rows(self.db_session) self.assertEqual(1, len(rows)) self.assertEqual(operation, rows[0]['operation']) self.assertEqual(object_type, rows[0]['object_type']) self.assertEqual(object_id, rows[0]['object_uuid']) self._db_cleanup()
def test_get_oldest_pending_row_order(self): db.create_pending_row(self.db_session, *self.UPDATE_ROW) older_row = db.get_all_db_rows(self.db_session)[0] older_row.last_retried -= timedelta(minutes=1) self._update_row(older_row) db.create_pending_row(self.db_session, *self.UPDATE_ROW) row = db.get_oldest_pending_db_row_with_lock(self.db_session) self.assertEqual(older_row, row)
def test_sync_resources(self): self._register_resources() plugin = helper.TestPlugin() self.add_plugin(helper.TEST_PLUGIN, plugin) resources = plugin.get_test_resource1s(self.db_context) full_sync.sync_resources(self.db_context, helper.TEST_RESOURCE1) entries = [entry.data for entry in db.get_all_db_rows(self.db_context)] for resource in resources: self.assertIn(resource, entries) self.assertEqual(len(resources), len(entries))
def test_plugin_not_registered(self): self._register_resources() # NOTE(rajivk): workaround, as we don't have delete method for plugin plugin = directory.get_plugin(helper.TEST_PLUGIN) directory.add_plugin(helper.TEST_PLUGIN, None) self.addCleanup(self.add_plugin, helper.TEST_PLUGIN, plugin) self.assertRaises(exceptions.PluginMethodNotFound, full_sync.sync_resources, self.db_context, helper.TEST_RESOURCE1) self.assertEqual([], db.get_all_db_rows(self.db_context))
def _test_validate_updates(self, first_entry, second_entry, expected_deps, state=None): db.create_pending_row(self.db_context, *first_entry) if state: row = db.get_all_db_rows(self.db_context)[0] row.state = state self._update_row(row) deps = db.get_pending_or_processing_ops( self.db_context, second_entry[1], second_entry[2]) self.assertEqual(expected_deps, len(deps) != 0)
def _test_get_oldest_pending_row_with_dep(self, dep_state): db.create_pending_row(self.db_context, *self.UPDATE_ROW) parent_row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, parent_row, dep_state) db.create_pending_row(self.db_context, *self.UPDATE_ROW, depending_on=[parent_row]) row = db.get_oldest_pending_db_row_with_lock(self.db_context) if row is not None: self.assertNotEqual(parent_row.seqnum, row.seqnum) return row
def test_entry_set_retry_count(self): db.create_pending_row(self.db_session, *test_db.DbTestCase.UPDATE_ROW) entry_baseline = db.get_all_db_rows(self.db_session)[-1] db.create_pending_row(self.db_session, *test_db.DbTestCase.UPDATE_ROW) entry_target = db.get_all_db_rows(self.db_session)[-1] self.assertEqual(entry_target.retry_count, 0) self.assertEqual(entry_target.retry_count, entry_baseline.retry_count) self.assertEqual(entry_target.state, entry_baseline.state) journal.entry_update_state_by_retry_count(self.db_context, entry_target, 1) self.assertEqual(entry_target.retry_count, 1) self.assertEqual(entry_target.state, odl_const.PENDING) journal.entry_update_state_by_retry_count(self.db_context, entry_target, 1) self.assertEqual(entry_target.retry_count, 1) self.assertEqual(entry_target.state, odl_const.FAILED) self.assertNotEqual(entry_target.state, entry_baseline.state) self.assertNotEqual(entry_target.retry_count, entry_baseline.retry_count)
def test_entry_set_retry_count(self): db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) entry_baseline = db.get_all_db_rows(self.db_context)[-1] db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) entry_target = db.get_all_db_rows(self.db_context)[-1] self.assertEqual(entry_target.retry_count, 0) self.assertEqual(entry_target.retry_count, entry_baseline.retry_count) self.assertEqual(entry_target.state, entry_baseline.state) journal.entry_update_state_by_retry_count( self.db_context, entry_target, 1) self.assertEqual(entry_target.retry_count, 1) self.assertEqual(entry_target.state, odl_const.PENDING) journal.entry_update_state_by_retry_count( self.db_context, entry_target, 1) self.assertEqual(entry_target.retry_count, 1) self.assertEqual(entry_target.state, odl_const.FAILED) self.assertNotEqual(entry_target.state, entry_baseline.state) self.assertNotEqual(entry_target.retry_count, entry_baseline.retry_count)
def _test_get_oldest_pending_row_with_dep(self, dep_state): db.create_pending_row(self.db_session, *self.UPDATE_ROW) parent_row = db.get_all_db_rows(self.db_session)[0] db.update_db_row_state(self.db_session, parent_row, dep_state) db.create_pending_row(self.db_session, *self.UPDATE_ROW, depending_on=[parent_row]) row = db.get_oldest_pending_db_row_with_lock(self.db_session) if row is not None: self.assertNotEqual(parent_row.seqnum, row.seqnum) return row
def test_get_resources_failed(self, mock_get_resources): self._register_resources() mock_get_resources.side_effect = exceptions.UnsupportedResourceType() resource_name = helper.TEST_RESOURCE1 self.assertRaises(exceptions.UnsupportedResourceType, full_sync.sync_resources, self.db_context, resource_name) mock_get_resources.assert_called_once_with(self.db_context, resource_name) self.assertEqual([], db.get_all_db_rows(self.db_context))
def _test_recovery(self, operation, odl_resource, expected_state): db.create_pending_row( self.db_session, odl_const.ODL_NETWORK, 'id', operation, {}) created_row = db.get_all_db_rows(self.db_session)[0] db.update_db_row_state(self.db_session, created_row, odl_const.FAILED) self._CLIENT.get_resource.return_value = odl_resource recovery.journal_recovery(self.db_session) row = db.get_all_db_rows_by_state(self.db_session, expected_state)[0] self.assertEqual(created_row['seqnum'], row['seqnum']) return created_row
def _test_delete_rows_by_state_and_time(self, last_retried, row_retention, state, expected_rows, dry_delete=False): db.create_pending_row(self.db_session, *self.UPDATE_ROW) # update state and last retried row = db.get_all_db_rows(self.db_session)[-1] row.state = state row.last_retried = row.last_retried - timedelta(seconds=last_retried) self._update_row(row) if not dry_delete: db.delete_rows_by_state_and_time(self.db_session, odl_const.COMPLETED, timedelta(seconds=row_retention)) # validate the number of rows in the journal rows = db.get_all_db_rows(self.db_session) self.assertEqual(expected_rows, len(rows))
def _test_validate_updates(self, first_entry, second_entry, expected_deps, state=None): db.create_pending_row(self.db_session, *first_entry) if state: row = db.get_all_db_rows(self.db_session)[0] row.state = state self._update_row(row) deps = db.get_pending_or_processing_ops(self.db_session, second_entry[1], second_entry[2]) self.assertEqual(expected_deps, len(deps) != 0)
def _test_validate_updates(self, rows, time_deltas, expected_validations): for row in rows: db.create_pending_row(self.db_session, *row) # update row created_at rows = db.get_all_db_rows(self.db_session) now = datetime.now() for row, time_delta in zip(rows, time_deltas): row.created_at = now - timedelta(hours=time_delta) self._update_row(row) # validate if there are older rows for row, expected_valid in zip(rows, expected_validations): valid = not db.check_for_older_ops(self.db_session, row) self.assertEqual(expected_valid, valid)
def _test_validate_updates(self, rows, expected_validations, states=None): states = states or [] for row in rows: db.create_pending_row(self.db_session, *row) # update row created_at rows = db.get_all_db_rows(self.db_session) rows.sort(key=operator.attrgetter("seqnum")) for row, state in zip(rows, states): row.state = state self._update_row(row) # validate if there are older rows for row, expected_valid in zip(rows, expected_validations): valid = not db.check_for_older_ops(self.db_session, row) self.assertEqual(expected_valid, valid)
def test_dependency(self): db.create_pending_row(self.db_session, self.first_type, self.first_id, self.first_operation, get_data(self.first_type, self.first_operation)) db.create_pending_row( self.db_session, self.second_type, self.second_id, self.second_operation, get_data(self.second_type, self.second_operation)) for idx, row in enumerate( sorted(db.get_all_db_rows(self.db_session), key=lambda x: x.seqnum)): if self.expected[idx] is not None: self.assertEqual( self.expected[idx], dependency_validations.validate(self.db_session, row))
def _test_recovery(self, operation, odl_resource, expected_state): db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, 'id', operation, {}) created_row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, created_row, odl_const.FAILED) self._CLIENT.get_resource.return_value = odl_resource recovery.journal_recovery(self.db_context) if expected_state is None: completed_rows = db.get_all_db_rows_by_state( self.db_context, odl_const.COMPLETED) self.assertEqual([], completed_rows) else: row = db.get_all_db_rows_by_state(self.db_context, expected_state)[0] self.assertEqual(created_row['seqnum'], row['seqnum']) return created_row
def _test_reset_processing_rows(self, last_retried, max_timedelta, quantity, dry_reset=False): db.create_pending_row(self.db_context, *self.UPDATE_ROW) expected_state = odl_const.PROCESSING row = db.get_all_db_rows(self.db_context)[-1] row.state = expected_state row.last_retried = row.last_retried - timedelta(seconds=last_retried) self._update_row(row) if not dry_reset: expected_state = odl_const.PENDING reset = db.reset_processing_rows(self.db_context, max_timedelta) self.assertIsInstance(reset, int) self.assertEqual(reset, quantity) rows = db.get_all_db_rows_by_state(self.db_context, expected_state) self.assertEqual(len(rows), quantity) for row in rows: self.assertEqual(row.state, expected_state)
def test_sync_multiple_updates(self): # add 2 updates for i in range(2): self._call_operation_object(odl_const.ODL_UPDATE, odl_const.ODL_NETWORK) # get the last update row last_row = db.get_all_db_rows(self.db_session)[-1] # change the last update created time self._decrease_row_created_time(last_row) # create 1 more operation to trigger the sync thread # verify that there are no calls to ODL controller, because the # first row was not valid (exit_after_run = true) self._test_thread_processing(odl_const.ODL_UPDATE, odl_const.ODL_NETWORK, expected_calls=0) # validate that all the rows are in 'pending' state # first row should be set back to 'pending' because it was not valid rows = db.get_all_db_rows_by_state(self.db_session, 'pending') self.assertEqual(3, len(rows))
def test_no_full_sync_when_canary_exists(self): full_sync.full_sync(self.db_session) self.assertEqual([], db.get_all_db_rows(self.db_session))
def test_create_pending_row(self): row = db.create_pending_row(self.db_context, *self.UPDATE_ROW) self.assertIsNotNone(row) rows = db.get_all_db_rows(self.db_context) self.assertTrue(row in rows)
def test_no_full_sync_when_canary_exists(self): full_sync.full_sync(self.db_context) self.assertEqual([], db.get_all_db_rows(self.db_context))
def _db_cleanup(self): rows = db.get_all_db_rows(self.db_session) for row in rows: db.delete_row(self.db_session, row=row)
def test_object_not_registered(self): self.assertRaises(exceptions.ResourceNotRegistered, full_sync.sync_resources, self.db_context, 'test-object-type') self.assertEqual([], db.get_all_db_rows(self.db_context))
def test_create_pending_row(self): row = db.create_pending_row(self.db_session, *self.UPDATE_ROW) self.assertIsNotNone(row) rows = db.get_all_db_rows(self.db_session) self.assertTrue(row in rows)
def _create_row(self): db.create_pending_row(self.db_session, *self.UPDATE_ROW) rows = db.get_all_db_rows(self.db_session) self.assertEqual(1, len(rows)) return rows[0]
def _assert_canary_created(self): rows = db.get_all_db_rows(self.db_session) self.assertTrue( any(r['object_uuid'] == full_sync._CANARY_NETWORK_ID for r in rows)) return rows