def _handle_existing_resource(session, row): if row.operation == odl_const.ODL_CREATE: db.update_db_row_state(session, row, odl_const.COMPLETED) elif row.operation == odl_const.ODL_DELETE: db.update_db_row_state(session, row, odl_const.PENDING) else: _sync_resource_to_odl(session, row, odl_const.ODL_UPDATE, True)
def _test_update_row_state(self, from_state, to_state, dry_flush=False): # add new pending row db.create_pending_row(self.db_context, *self.UPDATE_ROW) mock_flush = mock.MagicMock( side_effect=self.db_context.session.flush) if dry_flush: patch_flush = mock.patch.object(self.db_context.session, 'flush', side_effect=mock_flush) row = db.get_all_db_rows(self.db_context)[0] for state in [from_state, to_state]: if dry_flush: patch_flush.start() try: # update the row state db.update_db_row_state(self.db_context, row, state, flush=not dry_flush) finally: if dry_flush: patch_flush.stop() # validate the new state row = db.get_all_db_rows(self.db_context)[0] self.assertEqual(state, row.state) return mock_flush
def test_sync_multiple_updates(self): # add 2 updates for i in range(2): self._call_operation_object(odl_const.ODL_UPDATE, odl_const.ODL_NETWORK) # get the last update row rows = db.get_all_db_rows(self.db_context) rows.sort(key=operator.attrgetter("seqnum")) first_row = rows[0] # change the state to processing db.update_db_row_state(self.db_context, first_row, odl_const.PROCESSING) # create 1 more operation to trigger the sync thread # verify that there are no calls to ODL controller, because the # first row was processing (exit_after_run = true) self._test_thread_processing(odl_const.ODL_UPDATE, odl_const.ODL_NETWORK, expected_calls=0) # validate that all the pending rows stays in 'pending' state # first row should be 'processing' because it was not processed processing = db.get_all_db_rows_by_state(self.db_context, 'processing') self.assertEqual(1, len(processing)) rows = db.get_all_db_rows_by_state(self.db_context, 'pending') self.assertEqual(2, len(rows))
def _handle_existing_resource(context, row): if row.operation == odl_const.ODL_CREATE: journal.entry_complete(context, row) elif row.operation == odl_const.ODL_DELETE: db.update_db_row_state(context.session, row, odl_const.PENDING) else: _sync_resource_to_odl(context, row, odl_const.ODL_UPDATE, True)
def _test_dependency_processing(self, test_operation, test_object, test_id, test_context, dep_operation, dep_object, dep_id, dep_context): # Mock sendjson to verify that it never gets called. mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, 'sendjson').start() # Create dependency db row and mark as 'processing' so it won't # be processed by the journal thread. db.create_pending_row(self.db_session, dep_object, dep_id, dep_operation, dep_context) row = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) db.update_db_row_state(self.db_session, row[0], odl_const.PROCESSING) # Create test row with dependent ID. db.create_pending_row(self.db_session, test_object, test_id, test_operation, test_context) # Call journal thread. with mock.patch.object(self.thread.event, 'wait', return_value=False): self.thread.run_sync_thread(exit_after_run=True) # Verify that dependency row is still set at 'processing'. rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PROCESSING) self.assertEqual(1, len(rows)) # Verify that the test row was processed and set back to 'pending' # to be processed again. rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) self.assertEqual(1, len(rows)) # Verify that _json_data was not called. self.assertFalse(mock_sendjson.call_count)
def _test_dependency_processing( self, test_operation, test_object, test_id, test_data, dep_operation, dep_object, dep_id, dep_data): # Mock sendjson to verify that it never gets called. mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, 'sendjson').start() # Create dependency db row and mark as 'processing' so it won't # be processed by the journal thread. ctxt = self.db_context journal.record(ctxt, dep_object, dep_id, dep_operation, dep_data) row = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING) db.update_db_row_state(self.db_context, row[0], odl_const.PROCESSING) # Create test row with dependent ID. journal.record(ctxt, test_object, test_id, test_operation, test_data) # Call journal thread. self.thread.sync_pending_entries() # Verify that dependency row is still set at 'processing'. rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PROCESSING) self.assertEqual(1, len(rows)) # Verify that the test row was processed and set back to 'pending' # to be processed again. rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING) self.assertEqual(1, len(rows)) # Verify that _json_data was not called. self.assertFalse(mock_sendjson.call_count)
def test_journal_recovery_retries_exceptions(self): db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, 'id', odl_const.ODL_DELETE, {}) created_row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, created_row, odl_const.FAILED) with mock.patch.object(db, 'update_db_row_state') as m: self._test_retry_exceptions(recovery.journal_recovery, m)
def _test_update_row_state(self, from_state, to_state, dry_flush=False): # add new pending row db.create_pending_row(self.db_session, *self.UPDATE_ROW) mock_flush = mock.MagicMock(side_effect=self.db_session.flush) if dry_flush: patch_flush = mock.patch.object(self.db_session, 'flush', side_effect=mock_flush) row = db.get_all_db_rows(self.db_session)[0] for state in [from_state, to_state]: if dry_flush: patch_flush.start() try: # update the row state db.update_db_row_state(self.db_session, row, state, flush=not dry_flush) finally: if dry_flush: patch_flush.stop() # validate the new state row = db.get_all_db_rows(self.db_session)[0] self.assertEqual(state, row.state) return mock_flush
def _handle_existing_resource(context, row): if row.operation == odl_const.ODL_CREATE: journal.entry_complete(context, row) elif row.operation == odl_const.ODL_DELETE: db.update_db_row_state(context, row, odl_const.PENDING) else: _sync_resource_to_odl(context, row, odl_const.ODL_UPDATE, True)
def _test_dependency_processing(self, test_operation, test_object, test_id, test_data, dep_operation, dep_object, dep_id, dep_data): # Mock sendjson to verify that it never gets called. mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, 'sendjson').start() # Create dependency db row and mark as 'processing' so it won't # be processed by the journal thread. ctxt = self.db_context journal.record(ctxt, dep_object, dep_id, dep_operation, dep_data) row = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) db.update_db_row_state(self.db_session, row[0], odl_const.PROCESSING) # Create test row with dependent ID. journal.record(ctxt, test_object, test_id, test_operation, test_data) # Call journal thread. self.thread.sync_pending_entries() # Verify that dependency row is still set at 'processing'. rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PROCESSING) self.assertEqual(1, len(rows)) # Verify that the test row was processed and set back to 'pending' # to be processed again. rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) self.assertEqual(1, len(rows)) # Verify that _json_data was not called. self.assertFalse(mock_sendjson.call_count)
def entry_complete(context, entry): session = context.session with db_api.autonested_transaction(session): if cfg.CONF.ml2_odl.completed_rows_retention == 0: db.delete_row(session, entry) else: db.update_db_row_state(session, entry, odl_const.COMPLETED) db.delete_dependency(session, entry)
def _test_get_oldest_pending_row_with_dep(self, dep_state): db.create_pending_row(self.db_context, *self.UPDATE_ROW) parent_row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, parent_row, dep_state) db.create_pending_row(self.db_context, *self.UPDATE_ROW, depending_on=[parent_row]) row = db.get_oldest_pending_db_row_with_lock(self.db_context) if row is not None: self.assertNotEqual(parent_row.seqnum, row.seqnum) return row
def _test_journal_recovery(self, operation, odl_resource, expected_state): db.create_pending_row(self.db_session, odl_const.ODL_NETWORK, 'id', operation, {}) row = db.get_all_db_rows(self.db_session)[0] db.update_db_row_state(self.db_session, row, odl_const.FAILED) self._CLIENT.get_resource.return_value = odl_resource recovery.journal_recovery(self.db_session) row = db.get_all_db_rows(self.db_session)[0] self.assertEqual(expected_state, row['state'])
def _test_get_oldest_pending_row_with_dep(self, dep_state): db.create_pending_row(self.db_session, *self.UPDATE_ROW) parent_row = db.get_all_db_rows(self.db_session)[0] db.update_db_row_state(self.db_session, parent_row, dep_state) db.create_pending_row(self.db_session, *self.UPDATE_ROW, depending_on=[parent_row]) row = db.get_oldest_pending_db_row_with_lock(self.db_session) if row is not None: self.assertNotEqual(parent_row.seqnum, row.seqnum) return row
def _test_update_row_state(self, from_state, to_state): # add new pending row db.create_pending_row(self.db_session, *self.UPDATE_ROW) row = db.get_all_db_rows(self.db_session)[0] for state in [from_state, to_state]: # update the row state db.update_db_row_state(self.db_session, row, state) # validate the new state row = db.get_all_db_rows(self.db_session)[0] self.assertEqual(state, row.state)
def _test_no_full_sync_when_canary_in_journal(self, state): self._mock_canary_missing() self._mock_l2_resources() db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, full_sync._CANARY_NETWORK_ID, odl_const.ODL_CREATE, {}) row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, row, state) full_sync.full_sync(self.db_context) rows = db.get_all_db_rows(self.db_context) self.assertEqual([], self._filter_out_canary(rows))
def _test_no_full_sync_when_canary_in_journal(self, state): self._mock_canary_missing() self._mock_plugin_resources() db.create_pending_row(self.db_session, odl_const.ODL_NETWORK, full_sync._CANARY_NETWORK_ID, odl_const.ODL_CREATE, {}) row = db.get_all_db_rows(self.db_session)[0] db.update_db_row_state(self.db_session, row, state) full_sync.full_sync(self.db_session) rows = db.get_all_db_rows(self.db_session) self._assert_no_journal_rows(rows)
def _test_no_full_sync_when_canary_in_journal(self, state): self._mock_canary_missing() self._mock_l2_resources() db.create_pending_row(self.db_session, odl_const.ODL_NETWORK, full_sync._CANARY_NETWORK_ID, odl_const.ODL_CREATE, {}) row = db.get_all_db_rows(self.db_session)[0] db.update_db_row_state(self.db_session, row, state) full_sync.full_sync(self.db_session) rows = db.get_all_db_rows(self.db_session) self.assertEqual([], self._filter_out_canary(rows))
def _sync_resource_to_odl(context, row, operation_type, exists_on_odl): resource = None try: resource = _get_latest_resource(context, row) except nexc.NotFound: if exists_on_odl: journal.record(context, row.object_type, row.object_uuid, odl_const.ODL_DELETE, []) else: journal.record(context, row.object_type, row.object_uuid, operation_type, resource) db.update_db_row_state(context.session, row, odl_const.COMPLETED)
def _test_cleanup_processing_rows(self, last_retried, expected_state): # Create a dummy network (creates db row in pending state). self._call_operation_object(odl_const.ODL_CREATE, odl_const.ODL_NETWORK) # Get pending row and mark as processing and update # the last_retried time row = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING)[0] row.last_retried = last_retried db.update_db_row_state(self.db_session, row, odl_const.PROCESSING) # Test if the cleanup marks this in the desired state # based on the last_retried timestamp cleanup.JournalCleanup().cleanup_processing_rows(self.db_session) # Verify that the Db row is in the desired state rows = db.get_all_db_rows_by_state(self.db_session, expected_state) self.assertEqual(1, len(rows))
def _test_cleanup_processing_rows(self, last_retried, expected_state): # Create a dummy network (creates db row in pending state). self._call_operation_object(odl_const.ODL_CREATE, odl_const.ODL_NETWORK) # Get pending row and mark as processing and update # the last_retried time row = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING)[0] row.last_retried = last_retried db.update_db_row_state(self.db_context, row, odl_const.PROCESSING) # Test if the cleanup marks this in the desired state # based on the last_retried timestamp cleanup.cleanup_processing_rows(self.db_context) # Verify that the Db row is in the desired state rows = db.get_all_db_rows_by_state(self.db_context, expected_state) self.assertEqual(1, len(rows))
def _test_recovery(self, operation, odl_resource, expected_state): db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, 'id', operation, {}) created_row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, created_row, odl_const.FAILED) self._CLIENT.get_resource.return_value = odl_resource recovery.journal_recovery(self.db_context) if expected_state is None: completed_rows = db.get_all_db_rows_by_state( self.db_context, odl_const.COMPLETED) self.assertEqual([], completed_rows) else: row = db.get_all_db_rows_by_state(self.db_context, expected_state)[0] self.assertEqual(created_row['seqnum'], row['seqnum']) return created_row
def _test_object_operation_pending_another_object_operation( self, object_type, operation, pending_type, pending_operation): # Create the object_type (creates db row in pending state). self._call_operation_object(pending_operation, pending_type) # Get pending row and mark as processing so that # this row will not be processed by journal thread. row = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING) db.update_db_row_state(self.db_context, row[0], odl_const.PROCESSING) # Create the object_type database row and process. # Verify that object request is not processed because the # dependent object operation has not been marked as 'completed'. self._test_thread_processing(operation, object_type, expected_calls=0) # Verify that all rows are still in the database. rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PROCESSING) self.assertEqual(1, len(rows)) rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING) self.assertEqual(1, len(rows))
def _sync_pending_rows(self, session, exit_after_run): while True: LOG.debug("Thread walking database") row = db.get_oldest_pending_db_row_with_lock(session) if not row: LOG.debug("No rows to sync") break # Validate the operation validate_func = ( dependency_validations.VALIDATION_MAP[row.object_type]) valid = validate_func(session, row) if not valid: LOG.info( _LI("%(operation)s %(type)s %(uuid)s is not a " "valid operation yet, skipping for now"), { 'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid }) # Set row back to pending. db.update_db_row_state(session, row, odl_const.PENDING) if exit_after_run: break continue LOG.info( _LI("Syncing %(operation)s %(type)s %(uuid)s"), { 'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid }) # Add code to sync this to ODL method, urlpath, to_send = self._json_data(row) try: self.client.sendjson(method, urlpath, to_send) db.update_db_row_state(session, row, odl_const.COMPLETED) except exceptions.ConnectionError as e: # Don't raise the retry count, just log an error LOG.error(_LE("Cannot connect to the Opendaylight Controller")) # Set row back to pending db.update_db_row_state(session, row, odl_const.PENDING) # Break our of the loop and retry with the next # timer interval break except Exception as e: LOG.error( _LE("Error syncing %(type)s %(operation)s," " id %(uuid)s Error: %(error)s"), { 'type': row.object_type, 'uuid': row.object_uuid, 'operation': row.operation, 'error': e.message }) db.update_pending_db_row_retry(session, row, self._row_retry_count)
def _sync_entry(self, context, entry): log_dict = { 'op': entry.operation, 'type': entry.object_type, 'id': entry.object_uuid } LOG.info("Processing - %(op)s %(type)s %(id)s", log_dict) method, urlpath, to_send = self._json_data(entry) session = context.session try: self.client.sendjson(method, urlpath, to_send) registry.notify(entry.object_type, odl_const.BEFORE_COMPLETE, self, context=context, operation=entry.operation, row=entry) with session.begin(): db.update_db_row_state(session, entry, odl_const.COMPLETED) db.delete_dependency(session, entry) self._retry_reset() except exceptions.ConnectionError: # Don't raise the retry count, just log an error & break db.update_db_row_state(session, entry, odl_const.PENDING) LOG.error("Cannot connect to the OpenDaylight Controller," " will not process additional entries") self._retry_sleep() return True except Exception: LOG.error("Error while processing %(op)s %(type)s %(id)s", log_dict, exc_info=True) db.update_pending_db_row_retry(session, entry, self._max_retry_count) self._retry_sleep() return False
def _test_object_operation_pending_another_object_operation( self, object_type, operation, pending_type, pending_operation): # Create the object_type (creates db row in pending state). self._call_operation_object(pending_operation, pending_type) # Get pending row and mark as processing so that # this row will not be processed by journal thread. row = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) db.update_db_row_state(self.db_session, row[0], odl_const.PROCESSING) # Create the object_type database row and process. # Verify that object request is not processed because the # dependent object operation has not been marked as 'completed'. self._test_thread_processing(operation, object_type, expected_calls=0) # Verify that all rows are still in the database. rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PROCESSING) self.assertEqual(1, len(rows)) rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) self.assertEqual(1, len(rows))
def _test_dependency_processing( self, test_operation, test_object, test_id, test_context, dep_operation, dep_object, dep_id, dep_context): # Mock sendjson to verify that it never gets called. mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, 'sendjson').start() # Create dependency db row and mark as 'processing' so it won't # be processed by the journal thread. db.create_pending_row(self.db_session, dep_object, dep_id, dep_operation, dep_context) row = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) db.update_db_row_state(self.db_session, row[0], odl_const.PROCESSING) # Create test row with dependent ID. db.create_pending_row(self.db_session, test_object, test_id, test_operation, test_context) # Call journal thread. with mock.patch.object(self.thread.event, 'wait', return_value=False): self.thread.run_sync_thread(exit_after_run=True) # Verify that dependency row is still set at 'processing'. rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PROCESSING) self.assertEqual(1, len(rows)) # Verify that the test row was processed and set back to 'pending' # to be processed again. rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) self.assertEqual(1, len(rows)) # Verify that _json_data was not called. self.assertFalse(mock_sendjson.call_count)
def _sync_pending_rows(self, session, exit_after_run): while True: LOG.debug("Thread walking database") row = db.get_oldest_pending_db_row_with_lock(session) if not row: LOG.debug("No rows to sync") break # Validate the operation validate_func = (dependency_validations. VALIDATION_MAP[row.object_type]) valid = validate_func(session, row) if not valid: LOG.info(_LI("%(operation)s %(type)s %(uuid)s is not a " "valid operation yet, skipping for now"), {'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid}) # Set row back to pending. db.update_db_row_state(session, row, odl_const.PENDING) if exit_after_run: break continue LOG.info(_LI("Syncing %(operation)s %(type)s %(uuid)s"), {'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid}) # Add code to sync this to ODL method, urlpath, to_send = self._json_data(row) try: self.client.sendjson(method, urlpath, to_send) db.update_db_row_state(session, row, odl_const.COMPLETED) except exceptions.ConnectionError as e: # Don't raise the retry count, just log an error LOG.error(_LE("Cannot connect to the Opendaylight Controller")) # Set row back to pending db.update_db_row_state(session, row, odl_const.PENDING) # Break our of the loop and retry with the next # timer interval break except Exception as e: LOG.error(_LE("Error syncing %(type)s %(operation)s," " id %(uuid)s Error: %(error)s"), {'type': row.object_type, 'uuid': row.object_uuid, 'operation': row.operation, 'error': e.message}) db.update_pending_db_row_retry(session, row, self._row_retry_count)
def _sync_pending_entries(self, session, exit_after_run): LOG.debug("Start processing journal entries") entry = db.get_oldest_pending_db_row_with_lock(session) if entry is None: LOG.debug("No journal entries to process") return while entry is not None: log_dict = { 'op': entry.operation, 'type': entry.object_type, 'id': entry.object_uuid } valid = dependency_validations.validate(session, entry) if not valid: db.update_db_row_state(session, entry, odl_const.PENDING) LOG.info( "Skipping %(op)s %(type)s %(id)s due to " "unprocessed dependencies.", log_dict) if exit_after_run: break continue LOG.info("Processing - %(op)s %(type)s %(id)s", log_dict) method, urlpath, to_send = self._json_data(entry) try: self.client.sendjson(method, urlpath, to_send) db.update_db_row_state(session, entry, odl_const.COMPLETED) except exceptions.ConnectionError as e: # Don't raise the retry count, just log an error & break db.update_db_row_state(session, entry, odl_const.PENDING) LOG.error("Cannot connect to the OpenDaylight Controller," " will not process additional entries") break except Exception as e: log_dict['error'] = e.message LOG.error( "Error while processing %(op)s %(type)s %(id)s;" " Error: %(error)s", log_dict) db.update_pending_db_row_retry(session, entry, self._max_retry_count) entry = db.get_oldest_pending_db_row_with_lock(session) LOG.debug("Finished processing journal entries")
def _handle_non_existing_resource(context, row): if row.operation == odl_const.ODL_DELETE: db.update_db_row_state(context.session, row, odl_const.COMPLETED) else: _sync_resource_to_odl(context, row, odl_const.ODL_CREATE, False)
def entry_reset(context, entry): session = context.session with db_api.autonested_transaction(session): db.update_db_row_state(session, entry, odl_const.PENDING)
def entry_complete(context, entry): if cfg.CONF.ml2_odl.completed_rows_retention == 0: db.delete_row(context, entry) else: db.update_db_row_state(context, entry, odl_const.COMPLETED) db.delete_dependency(context, entry)
def _handle_non_existing_resource(session, row): if row.operation in _DELETE_OPS: db.update_db_row_state(session, row, odl_const.COMPLETED)
def entry_reset(context, entry): db.update_db_row_state(context, entry, odl_const.PENDING)
def sync_pending_row(self, exit_after_run=False): # Block until all pending rows are processed session = neutron_db_api.get_session() while not self.event.is_set(): self.event.wait() # Clear the event and go back to waiting after # the sync block exits self.event.clear() while True: LOG.debug("Thread walking database") row = db.get_oldest_pending_db_row_with_lock(session) if not row: LOG.debug("No rows to sync") break # Validate the operation validate_func = (dependency_validations. VALIDATION_MAP[row.object_type]) valid = validate_func(session, row) if not valid: LOG.info(_LI("%(operation)s %(type)s %(uuid)s is not a " "valid operation yet, skipping for now"), {'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid}) # Set row back to pending. db.update_db_row_state(session, row, odl_const.PENDING) if exit_after_run: break continue LOG.info(_LI("Syncing %(operation)s %(type)s %(uuid)s"), {'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid}) # Add code to sync this to ODL method, urlpath, to_send = self._json_data(row) try: self.client.sendjson(method, urlpath, to_send) db.update_db_row_state(session, row, odl_const.COMPLETED) except exceptions.ConnectionError as e: # Don't raise the retry count, just log an error LOG.error(_LE("Cannot connect to the Opendaylight " "Controller")) # Set row back to pending db.update_db_row_state(session, row, odl_const.PENDING) # Break our of the loop and retry with the next # timer interval break except Exception as e: LOG.error(_LE("Error syncing %(type)s %(operation)s," " id %(uuid)s Error: %(error)s"), {'type': row.object_type, 'uuid': row.object_uuid, 'operation': row.operation, 'error': e.message}) db.update_pending_db_row_retry(session, row, self._row_retry_count) LOG.debug("Clearing sync thread event") if exit_after_run: # Permanently waiting thread model breaks unit tests # Adding this arg to exit here only for unit tests break
def sync_pending_row(self, exit_after_run=False): # Block until all pending rows are processed session = neutron_db_api.get_session() while not self.event.is_set(): self.event.wait() # Clear the event and go back to waiting after # the sync block exits self.event.clear() while True: LOG.debug("Thread walking database") row = db.get_oldest_pending_db_row_with_lock(session) if not row: LOG.debug("No rows to sync") break # Validate the operation validate_func = ( dependency_validations.VALIDATION_MAP[row.object_type]) valid = validate_func(session, row) if not valid: LOG.info( _LI("%(operation)s %(type)s %(uuid)s is not a " "valid operation yet, skipping for now"), { 'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid }) # Set row back to pending. db.update_db_row_state(session, row, odl_const.PENDING) if exit_after_run: break continue LOG.info( _LI("Syncing %(operation)s %(type)s %(uuid)s"), { 'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid }) # Add code to sync this to ODL method, urlpath, to_send = self._json_data(row) try: self.client.sendjson(method, urlpath, to_send) db.update_db_row_state(session, row, odl_const.COMPLETED) except exceptions.ConnectionError as e: # Don't raise the retry count, just log an error LOG.error( _LE("Cannot connect to the Opendaylight " "Controller")) # Set row back to pending db.update_db_row_state(session, row, odl_const.PENDING) # Break our of the loop and retry with the next # timer interval break except Exception as e: LOG.error( _LE("Error syncing %(type)s %(operation)s," " id %(uuid)s Error: %(error)s"), { 'type': row.object_type, 'uuid': row.object_uuid, 'operation': row.operation, 'error': e.message }) db.update_pending_db_row_retry(session, row, self._row_retry_count) LOG.debug("Clearing sync thread event") if exit_after_run: # Permanently waiting thread model breaks unit tests # Adding this arg to exit here only for unit tests break