def _test_object_type_processing_network(self, object_type): # Create a network (creates db row in pending state). self._call_operation_object(odl_const.ODL_CREATE, odl_const.ODL_NETWORK) # Get pending network row and mark as processing so that # this row will not be processed by journal thread. row = db.get_all_db_rows_by_state(self.db_session, 'pending') db.update_pending_db_row_processing(self.db_session, row[0]) # Create the object_type database row and process. # Verify that object request is not processed because the # dependent row has not been marked as 'completed'. self._test_thread_processing(odl_const.ODL_CREATE, object_type, expected_calls=0) # Verify that row is still set at 'processing'. rows = db.get_all_db_rows_by_state(self.db_session, 'processing') self.assertEqual(1, len(rows)) # Verify that the test row was processed and set back to 'pending' # to be processed again. rows = db.get_all_db_rows_by_state(self.db_session, 'pending') self.assertEqual(1, len(rows))
def _test_dependency_processing( self, test_operation, test_object, test_id, test_context, dep_operation, dep_object, dep_id, dep_context): # Mock sendjson to verify that it never gets called. mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, 'sendjson').start() # Create dependency db row and mark as 'processing' so it won't # be processed by the journal thread. db.create_pending_row(self.db_session, dep_object, dep_id, dep_operation, dep_context) row = db.get_all_db_rows_by_state(self.db_session, 'pending') db.update_pending_db_row_processing(self.db_session, row[0]) # Create test row with dependent ID. db.create_pending_row(self.db_session, test_object, test_id, test_operation, test_context) # Call journal thread. with mock.patch.object(self.thread.event, 'wait', return_value=False): self.thread.sync_pending_row(exit_after_run=True) # Verify that dependency row is still set at 'processing'. rows = db.get_all_db_rows_by_state(self.db_session, 'processing') self.assertEqual(1, len(rows)) # Verify that the test row was processed and set back to 'pending' # to be processed again. rows = db.get_all_db_rows_by_state(self.db_session, 'pending') self.assertEqual(1, len(rows)) # Verify that _json_data was not called. self.assertFalse(mock_sendjson.call_count)
def test_sync_multiple_updates(self): # add 2 updates for i in range(2): self._call_operation_object(odl_const.ODL_UPDATE, odl_const.ODL_NETWORK) # get the last update row rows = db.get_all_db_rows(self.db_context) rows.sort(key=operator.attrgetter("seqnum")) first_row = rows[0] # change the state to processing db.update_db_row_state(self.db_context, first_row, odl_const.PROCESSING) # create 1 more operation to trigger the sync thread # verify that there are no calls to ODL controller, because the # first row was processing (exit_after_run = true) self._test_thread_processing(odl_const.ODL_UPDATE, odl_const.ODL_NETWORK, expected_calls=0) # validate that all the pending rows stays in 'pending' state # first row should be 'processing' because it was not processed processing = db.get_all_db_rows_by_state(self.db_context, 'processing') self.assertEqual(1, len(processing)) rows = db.get_all_db_rows_by_state(self.db_context, 'pending') self.assertEqual(2, len(rows))
def _test_dependency_processing(self, test_operation, test_object, test_id, test_context, dep_operation, dep_object, dep_id, dep_context): # Mock sendjson to verify that it never gets called. mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, 'sendjson').start() # Create dependency db row and mark as 'processing' so it won't # be processed by the journal thread. db.create_pending_row(self.db_session, dep_object, dep_id, dep_operation, dep_context) row = db.get_all_db_rows_by_state(self.db_session, 'pending') db.update_pending_db_row_processing(self.db_session, row[0]) # Create test row with dependent ID. db.create_pending_row(self.db_session, test_object, test_id, test_operation, test_context) # Call journal thread. with mock.patch.object(self.thread.event, 'wait', return_value=False): self.thread.sync_pending_row(exit_after_run=True) # Verify that dependency row is still set at 'processing'. rows = db.get_all_db_rows_by_state(self.db_session, 'processing') self.assertEqual(1, len(rows)) # Verify that the test row was processed and set back to 'pending' # to be processed again. rows = db.get_all_db_rows_by_state(self.db_session, 'pending') self.assertEqual(1, len(rows)) # Verify that _json_data was not called. self.assertFalse(mock_sendjson.call_count)
def _test_dependency_processing( self, test_operation, test_object, test_id, test_data, dep_operation, dep_object, dep_id, dep_data): # Mock sendjson to verify that it never gets called. mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, 'sendjson').start() # Create dependency db row and mark as 'processing' so it won't # be processed by the journal thread. ctxt = self.db_context journal.record(ctxt, dep_object, dep_id, dep_operation, dep_data) row = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING) db.update_db_row_state(self.db_context, row[0], odl_const.PROCESSING) # Create test row with dependent ID. journal.record(ctxt, test_object, test_id, test_operation, test_data) # Call journal thread. self.thread.sync_pending_entries() # Verify that dependency row is still set at 'processing'. rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PROCESSING) self.assertEqual(1, len(rows)) # Verify that the test row was processed and set back to 'pending' # to be processed again. rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING) self.assertEqual(1, len(rows)) # Verify that _json_data was not called. self.assertFalse(mock_sendjson.call_count)
def test_router_intf_threading(self): # Create network, subnet and router for testing. kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} with self.network(**kwargs) as network: with self.subnet(cidr='10.0.0.0/24') as subnet: router_context, router_dict = ( self._get_mock_router_operation_info(network, None)) new_router_dict = self.driver.create_router(router_context, router_dict) router_id = new_router_dict['id'] object_type = odl_const.ODL_ROUTER_INTF # Add and process router interface 'add' request. Adds to # database. Expected calls = 2 because the create_router db # entry is also processed. self._test_operation_thread_processing( object_type, odl_const.ODL_ADD, network, subnet, router_id, expected_calls=2) rows = db.get_all_db_rows_by_state(self.db_session, odl_const.COMPLETED) self.assertEqual(2, len(rows)) # Add and process 'remove' request. Adds to database. self._test_operation_thread_processing( object_type, odl_const.ODL_REMOVE, network, subnet, router_id) rows = db.get_all_db_rows_by_state(self.db_session, odl_const.COMPLETED) self.assertEqual(3, len(rows))
def _test_thread_processing(self, object_type): # Create network and subnet. kwargs = { 'arg_list': (external_net.EXTERNAL, ), external_net.EXTERNAL: True } with self.network(**kwargs) as network: with self.subnet(network=network, cidr='10.0.0.0/24'): # Add and process create request. new_object_dict = self._test_operation_thread_processing( object_type, odl_const.ODL_CREATE, network, None, None) object_id = new_object_dict['id'] rows = db.get_all_db_rows_by_state(self.db_session, odl_const.COMPLETED) self.assertEqual(1, len(rows)) # Add and process 'update' request. Adds to database. self._test_operation_thread_processing(object_type, odl_const.ODL_UPDATE, network, None, object_id) rows = db.get_all_db_rows_by_state(self.db_session, odl_const.COMPLETED) self.assertEqual(2, len(rows)) # Add and process 'delete' request. Adds to database. self._test_operation_thread_processing(object_type, odl_const.ODL_DELETE, network, None, object_id) rows = db.get_all_db_rows_by_state(self.db_session, odl_const.COMPLETED) self.assertEqual(3, len(rows))
def _test_dependency_processing(self, test_operation, test_object, test_id, test_data, dep_operation, dep_object, dep_id, dep_data): # Mock sendjson to verify that it never gets called. mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, 'sendjson').start() # Create dependency db row and mark as 'processing' so it won't # be processed by the journal thread. ctxt = self.db_context journal.record(ctxt, dep_object, dep_id, dep_operation, dep_data) row = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) db.update_db_row_state(self.db_session, row[0], odl_const.PROCESSING) # Create test row with dependent ID. journal.record(ctxt, test_object, test_id, test_operation, test_data) # Call journal thread. self.thread.sync_pending_entries() # Verify that dependency row is still set at 'processing'. rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PROCESSING) self.assertEqual(1, len(rows)) # Verify that the test row was processed and set back to 'pending' # to be processed again. rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) self.assertEqual(1, len(rows)) # Verify that _json_data was not called. self.assertFalse(mock_sendjson.call_count)
def _test_thread_processing(self, object_type): # Create network and subnet. kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} with self.network(**kwargs) as network: with self.subnet(network=network, cidr='10.0.0.0/24'): # Add and process create request. new_object_dict = self._test_operation_thread_processing( object_type, odl_const.ODL_CREATE, network, None, None) object_id = new_object_dict['id'] rows = db.get_all_db_rows_by_state(self.db_session, odl_const.COMPLETED) self.assertEqual(1, len(rows)) # Add and process 'update' request. Adds to database. self._test_operation_thread_processing( object_type, odl_const.ODL_UPDATE, network, None, object_id) rows = db.get_all_db_rows_by_state(self.db_session, odl_const.COMPLETED) self.assertEqual(2, len(rows)) # Add and process 'delete' request. Adds to database. self._test_operation_thread_processing( object_type, odl_const.ODL_DELETE, network, None, object_id) rows = db.get_all_db_rows_by_state(self.db_session, odl_const.COMPLETED) self.assertEqual(3, len(rows))
def _test_object_type(self, object_type): # Add and process create request. self._test_thread_processing(odl_const.ODL_CREATE, object_type) rows = db.get_all_db_rows_by_state(self.db_session, 'completed') self.assertEqual(1, len(rows)) # Add and process update request. Adds to database. self._test_thread_processing(odl_const.ODL_UPDATE, object_type) rows = db.get_all_db_rows_by_state(self.db_session, 'completed') self.assertEqual(2, len(rows)) # Add and process update request. Adds to database. self._test_thread_processing(odl_const.ODL_DELETE, object_type) rows = db.get_all_db_rows_by_state(self.db_session, 'completed') self.assertEqual(3, len(rows))
def _test_reset_processing_rows(self, session, last_retried, max_timedelta, quantity, dry_reset=False): db.create_pending_row(self.db_session, *self.UPDATE_ROW) expected_state = odl_const.PROCESSING row = db.get_all_db_rows(self.db_session)[-1] row.state = expected_state row.last_retried = row.last_retried - timedelta(seconds=last_retried) self._update_row(row) if not dry_reset: expected_state = odl_const.PENDING reset = db.reset_processing_rows(self.db_session, max_timedelta) self.assertIsInstance(reset, int) self.assertEqual(reset, quantity) rows = db.get_all_db_rows_by_state(self.db_session, expected_state) self.assertEqual(len(rows), quantity) for row in rows: self.assertEqual(row.state, expected_state)
def _test_object_type(self, object_type, delete_expected_calls=1): # Add and process create request. self._test_thread_processing(odl_const.ODL_CREATE, object_type) rows = db.get_all_db_rows_by_state(self.db_context, odl_const.COMPLETED) self.assertEqual(1, len(rows)) # Add and process update request. Adds to database. self._test_thread_processing(odl_const.ODL_UPDATE, object_type) rows = db.get_all_db_rows_by_state(self.db_context, odl_const.COMPLETED) self.assertEqual(2, len(rows)) # Add and process update request. Adds to database. self._test_thread_processing(odl_const.ODL_DELETE, object_type, delete_expected_calls) rows = db.get_all_db_rows_by_state(self.db_context, odl_const.COMPLETED) self.assertEqual(2 + delete_expected_calls, len(rows))
def _test_cleanup_processing_rows(self, last_retried, expected_state): # Create a dummy network (creates db row in pending state). self._call_operation_object(odl_const.ODL_CREATE, odl_const.ODL_NETWORK) # Get pending row and mark as processing and update # the last_retried time row = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING)[0] row.last_retried = last_retried db.update_db_row_state(self.db_context, row, odl_const.PROCESSING) # Test if the cleanup marks this in the desired state # based on the last_retried timestamp cleanup.cleanup_processing_rows(self.db_context) # Verify that the Db row is in the desired state rows = db.get_all_db_rows_by_state(self.db_context, expected_state) self.assertEqual(1, len(rows))
def _test_cleanup_processing_rows(self, last_retried, expected_state): # Create a dummy network (creates db row in pending state). self._call_operation_object(odl_const.ODL_CREATE, odl_const.ODL_NETWORK) # Get pending row and mark as processing and update # the last_retried time row = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING)[0] row.last_retried = last_retried db.update_db_row_state(self.db_session, row, odl_const.PROCESSING) # Test if the cleanup marks this in the desired state # based on the last_retried timestamp cleanup.JournalCleanup().cleanup_processing_rows(self.db_session) # Verify that the Db row is in the desired state rows = db.get_all_db_rows_by_state(self.db_session, expected_state) self.assertEqual(1, len(rows))
def _assert_op(self, operation, object_type, data, precommit=True): rows = sorted(db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING), key=lambda x: x.seqnum) if precommit: self.assertEqual(operation, rows[0]['operation']) self.assertEqual(object_type, rows[0]['object_type']) self.assertEqual(data['id'], rows[0]['object_uuid']) else: self.assertEqual([], rows)
def _test_parent_delete_pending_child_delete(self, parent, child): # Delete a child (creates db row in pending state). self._call_operation_object(odl_const.ODL_DELETE, child) # Get pending child delete row and mark as processing so that # this row will not be processed by journal thread. row = db.get_all_db_rows_by_state(self.db_session, 'pending') db.update_pending_db_row_processing(self.db_session, row[0]) # Verify that parent delete request is not processed because the # dependent child delete row has not been marked as 'completed'. self._test_thread_processing(odl_const.ODL_DELETE, parent, expected_calls=0) # Verify that all rows are still in the database. rows = db.get_all_db_rows_by_state(self.db_session, 'processing') self.assertEqual(1, len(rows)) rows = db.get_all_db_rows_by_state(self.db_session, 'pending') self.assertEqual(1, len(rows))
def _test_recovery(self, operation, odl_resource, expected_state): db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, 'id', operation, {}) created_row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, created_row, odl_const.FAILED) self._CLIENT.get_resource.return_value = odl_resource recovery.journal_recovery(self.db_context) if expected_state is None: completed_rows = db.get_all_db_rows_by_state( self.db_context, odl_const.COMPLETED) self.assertEqual([], completed_rows) else: row = db.get_all_db_rows_by_state(self.db_context, expected_state)[0] self.assertEqual(created_row['seqnum'], row['seqnum']) return created_row
def _assert_op(self, operation, object_type, data, precommit=True): rows = sorted(db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING), key=lambda x: x.seqnum) if precommit: self.db_context.session.flush() self.assertEqual(operation, rows[0]['operation']) self.assertEqual(object_type, rows[0]['object_type']) self.assertEqual(data['id'], rows[0]['object_uuid']) else: self.assertEqual([], rows)
def _test_object_operation_pending_object_operation( self, object_type, operation, pending_operation): # Create the object_type (creates db row in pending state). self._call_operation_object(pending_operation, object_type) # Get pending row and mark as processing so that # this row will not be processed by journal thread. row = db.get_all_db_rows_by_state(self.db_session, 'pending') db.update_pending_db_row_processing(self.db_session, row[0]) # Create the object_type database row and process. # Verify that object request is not processed because the # dependent object operation has not been marked as 'completed'. self._test_thread_processing(operation, object_type, expected_calls=0) # Verify that all rows are still in the database. rows = db.get_all_db_rows_by_state(self.db_session, 'processing') self.assertEqual(1, len(rows)) rows = db.get_all_db_rows_by_state(self.db_session, 'pending') self.assertEqual(1, len(rows))
def test_port_precommit_no_tenant(self): context = self._get_mock_operation_context(odl_const.ODL_PORT) context.current['tenant_id'] = '' method = getattr(self.mech, 'create_port_precommit') method(context) self.db_context.session.flush() # Verify that the Db row has a tenant rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING) self.assertEqual(1, len(rows)) _network = self._get_mock_network_operation_context().current self.assertEqual(_network['tenant_id'], rows[0]['data']['tenant_id'])
def journal_recovery(session): for row in db.get_all_db_rows_by_state(session, odl_const.FAILED): try: LOG.debug("Attempting recovery of journal entry %s.", row) odl_resource = _CLIENT.get_client().get_resource(row.object_type, row.object_uuid) if odl_resource is not None: _handle_existing_resource(session, row) else: _handle_non_existing_resource(session, row) except Exception: LOG.exception( _LE("Failure while recovering journal entry %s."), row)
def _test_object_type_pending_network(self, object_type): # Create a network (creates db row in pending state). self._call_operation_object(odl_const.ODL_CREATE, odl_const.ODL_NETWORK) # Create object_type database row and process. This results in both # the object_type and network rows being processed. self._test_thread_processing(odl_const.ODL_CREATE, object_type, expected_calls=2) # Verify both rows are now marked as completed. rows = db.get_all_db_rows_by_state(self.db_session, 'completed') self.assertEqual(2, len(rows))
def _test_recovery(self, operation, odl_resource, expected_state): db.create_pending_row( self.db_session, odl_const.ODL_NETWORK, 'id', operation, {}) created_row = db.get_all_db_rows(self.db_session)[0] db.update_db_row_state(self.db_session, created_row, odl_const.FAILED) self._CLIENT.get_resource.return_value = odl_resource recovery.journal_recovery(self.db_session) row = db.get_all_db_rows_by_state(self.db_session, expected_state)[0] self.assertEqual(created_row['seqnum'], row['seqnum']) return created_row
def _test_object_type_pending_network(self, object_type): # Create a network (creates db row in pending state). self._call_operation_object(odl_const.ODL_CREATE, odl_const.ODL_NETWORK) # Create object_type database row and process. This results in both # the object_type and network rows being processed. self._test_thread_processing(odl_const.ODL_CREATE, object_type, expected_calls=2) # Verify both rows are now marked as completed. rows = db.get_all_db_rows_by_state(self.db_session, odl_const.COMPLETED) self.assertEqual(2, len(rows))
def journal_recovery(context): for row in db.get_all_db_rows_by_state(context.session, odl_const.FAILED): try: LOG.debug("Attempting recovery of journal entry %s.", row) odl_resource = _CLIENT.get_client().get_resource( row.object_type, row.object_uuid) if odl_resource is not None: _handle_existing_resource(context, row) else: _handle_non_existing_resource(context, row) except UnsupportedResourceType: LOG.warning('Unsupported resource %s', row.object_type) except Exception: LOG.exception("Failure while recovering journal entry %s.", row)
def test_router_intf_threading(self): # Create network, subnet and router for testing. kwargs = { 'arg_list': (external_net.EXTERNAL, ), external_net.EXTERNAL: True } with self.network(**kwargs) as network: with self.subnet(cidr='10.0.0.0/24') as subnet: router_context, router_dict = ( self._get_mock_router_operation_info(network, None)) new_router_dict = self.driver.create_router( router_context, router_dict) router_id = new_router_dict['id'] object_type = odl_const.ODL_ROUTER_INTF # Add and process router interface 'add' request. Adds to # database. Expected calls = 2 because the create_router db # entry is also processed. self._test_operation_thread_processing(object_type, odl_const.ODL_ADD, network, subnet, router_id, expected_calls=2) rows = db.get_all_db_rows_by_state(self.db_session, 'completed') self.assertEqual(2, len(rows)) # Add and process 'remove' request. Adds to database. self._test_operation_thread_processing(object_type, odl_const.ODL_REMOVE, network, subnet, router_id) rows = db.get_all_db_rows_by_state(self.db_session, 'completed') self.assertEqual(3, len(rows))
def journal_recovery(context): for row in db.get_all_db_rows_by_state(context, odl_const.FAILED): LOG.debug("Attempting recovery of journal entry %s.", row) try: odl_resource = _CLIENT.get_client().get_resource( row.object_type, row.object_uuid) except exceptions.UnsupportedResourceType: LOG.warning('Unsupported resource %s', row.object_type) except Exception: LOG.exception("Failure while recovering journal entry %s.", row) else: with db_api.CONTEXT_WRITER.savepoint.using(context): if odl_resource is not None: _handle_existing_resource(context, row) else: _handle_non_existing_resource(context, row)
def _test_recovery_creates_operation( self, operation, resource, odl_resource, expected_operation, recovery_mock): if resource is not None: recovery_mock.return_value = resource else: recovery_mock.side_effect = nexc.NotFound original_row = self._test_recovery( operation, odl_resource, odl_const.COMPLETED) pending_row = db.get_all_db_rows_by_state( self.db_session, odl_const.PENDING)[0] self.assertEqual(expected_operation, pending_row['operation']) self.assertEqual(original_row['object_type'], pending_row['object_type']) self.assertEqual(original_row['object_uuid'], pending_row['object_uuid'])
def _test_recovery_creates_operation( self, operation, resource, odl_resource, expected_operation, recovery_mock): if resource is not None: recovery_mock.return_value = resource else: recovery_mock.side_effect = nexc.NotFound original_row = self._test_recovery( operation, odl_resource, odl_const.COMPLETED) pending_row = db.get_all_db_rows_by_state( self.db_context, odl_const.PENDING)[0] self.assertEqual(expected_operation, pending_row['operation']) self.assertEqual(original_row['object_type'], pending_row['object_type']) self.assertEqual(original_row['object_uuid'], pending_row['object_uuid'])
def _test_reset_processing_rows(self, last_retried, max_timedelta, quantity, dry_reset=False): db.create_pending_row(self.db_context, *self.UPDATE_ROW) expected_state = odl_const.PROCESSING row = db.get_all_db_rows(self.db_context)[-1] row.state = expected_state row.last_retried = row.last_retried - timedelta(seconds=last_retried) self._update_row(row) if not dry_reset: expected_state = odl_const.PENDING reset = db.reset_processing_rows(self.db_context, max_timedelta) self.assertIsInstance(reset, int) self.assertEqual(reset, quantity) rows = db.get_all_db_rows_by_state(self.db_context, expected_state) self.assertEqual(len(rows), quantity) for row in rows: self.assertEqual(row.state, expected_state)
def test_sync_multiple_updates(self): # add 2 updates for i in range(2): self._call_operation_object(odl_const.ODL_UPDATE, odl_const.ODL_NETWORK) # get the last update row last_row = db.get_all_db_rows(self.db_session)[-1] # change the last update created time self._decrease_row_created_time(last_row) # create 1 more operation to trigger the sync thread # verify that there are no calls to ODL controller, because the # first row was not valid (exit_after_run = true) self._test_thread_processing(odl_const.ODL_UPDATE, odl_const.ODL_NETWORK, expected_calls=0) # validate that all the rows are in 'pending' state # first row should be set back to 'pending' because it was not valid rows = db.get_all_db_rows_by_state(self.db_session, 'pending') self.assertEqual(3, len(rows))
def no_journal_rows(): pending_rows = db.get_all_db_rows_by_state( self.db_context, odl_const.PENDING) processing_rows = db.get_all_db_rows_by_state( self.db_context, odl_const.PROCESSING) return len(pending_rows) == 0 and len(processing_rows) == 0
def no_journal_rows(): pending_rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) processing_rows = db.get_all_db_rows_by_state( self.db_session, odl_const.PROCESSING) return len(pending_rows) == 0 and len(processing_rows) == 0