def test_production(self): """Check the behavior closer to a production environment. Instead of using mock classes, just for confirming we didn't miss something. """ cfg_data = MockConfigData({ "_generation_id": 1, "classes": { "IN": [{ "type": "sqlite3", "cache-enable": True, "cache-type": "mapped", "cache-zones": [], "params": { "database_file": self.__sqlite3_dbfile } }] } }) cmgr = DataSrcClientsMgr(use_cache=True) cmgr.reconfigure({}, cfg_data) genid, clients_map = cmgr.get_clients_map() datasrc_info = DataSrcInfo(genid, clients_map, self.__mgr_config) self.assertEqual(1, datasrc_info.gen_id) self.assertEqual(clients_map, datasrc_info.clients_map) self.assertEqual(1, len(datasrc_info.segment_info_map)) sgmt_info = datasrc_info.segment_info_map[(RRClass.IN, 'sqlite3')] self.assertIsNotNone(sgmt_info.get_reset_param(SegmentInfo.READER))
def test_production(self): """Check the behavior closer to a production environment. Instead of using a mock classes, just for confirming we didn't miss something. """ cfg_data = MockConfigData( {"classes": {"IN": [{"type": "sqlite3", "cache-enable": True, "cache-type": "mapped", "cache-zones": [], "params": {"database_file": self.__sqlite3_dbfile}}] } }) cmgr = DataSrcClientsMgr(use_cache=True) cmgr.reconfigure({}, cfg_data) genid, clients_map = cmgr.get_clients_map() datasrc_info = DataSrcInfo(genid, clients_map, self.__mgr_config) self.assertEqual(1, datasrc_info.gen_id) self.assertEqual(clients_map, datasrc_info.clients_map) self.assertEqual(1, len(datasrc_info.segment_info_map)) sgmt_info = datasrc_info.segment_info_map[(RRClass.IN, 'sqlite3')] self.assertIsNone(sgmt_info.get_reset_param(SegmentInfo.READER))
def test_load(self): """ Test "load" command. """ mapped_file_dir = os.environ["TESTDATA_WRITE_PATH"] mgr_config = {"mapped_file_dir": mapped_file_dir} cfg_data = MockConfigData( { "classes": { "IN": [ { "type": "MasterFiles", "params": {"example.com": TESTDATA_PATH + "example.com.zone"}, "cache-enable": True, "cache-type": "mapped", } ] } } ) cmgr = DataSrcClientsMgr(use_cache=True) cmgr.reconfigure({}, cfg_data) genid, clients_map = cmgr.get_clients_map() datasrc_info = DataSrcInfo(genid, clients_map, mgr_config) self.assertEqual(1, datasrc_info.gen_id) self.assertEqual(clients_map, datasrc_info.clients_map) self.assertEqual(1, len(datasrc_info.segment_info_map)) sgmt_info = datasrc_info.segment_info_map[(RRClass.IN, "MasterFiles")] self.assertIsNone(sgmt_info.get_reset_param(SegmentInfo.READER)) self.assertIsNotNone(sgmt_info.get_reset_param(SegmentInfo.WRITER)) param = sgmt_info.get_reset_param(SegmentInfo.WRITER) self.__mapped_file_path = param["mapped-file"] self._builder_thread.start() # Now that the builder thread is running, send it the "load" # command. We should be notified when the load operation is # complete. with self._builder_cv: self._builder_command_queue.append( ("load", bundy.dns.Name("example.com"), datasrc_info, RRClass.IN, "MasterFiles") ) self._builder_cv.notify_all() # Wait 60 seconds to receive a notification on the socket from # the builder. (reads, _, _) = select.select([self._master_sock], [], [], 60) self.assertTrue(self._master_sock in reads) # Reading 1 byte should not block us here, especially as the # socket is ready to read. It's a hack, but this is just a # testcase. got = self._master_sock.recv(1) self.assertEqual(got, b"x") with self._builder_lock: # The command queue must be cleared, and the response queue # must contain a response that a bad command was sent. The # thread is no longer running, so we can use the queues # without a lock. self.assertEqual(len(self._builder_command_queue), 0) self.assertEqual(len(self._builder_response_queue), 1) response = self._builder_response_queue[0] self.assertTrue(isinstance(response, tuple)) self.assertTupleEqual(response, ("load-completed", datasrc_info, RRClass.IN, "MasterFiles")) del self._builder_response_queue[:] # Now try looking for some loaded data clist = datasrc_info.clients_map[RRClass.IN] dsrc, finder, exact = clist.find(bundy.dns.Name("example.com")) self.assertIsNotNone(dsrc) self.assertTrue(isinstance(dsrc, bundy.datasrc.DataSourceClient)) self.assertIsNotNone(finder) self.assertTrue(isinstance(finder, bundy.datasrc.ZoneFinder)) self.assertTrue(exact) # Send the builder thread the "shutdown" command. The thread # should exit its main loop and be joinable. with self._builder_cv: self._builder_command_queue.append(("shutdown",)) self._builder_cv.notify_all() # Wait 5 seconds at most for the main loop of the builder to # exit. self._builder_thread.join(5) self.assertFalse(self._builder_thread.isAlive()) # The command queue must be cleared, and the response queue must # be untouched (we don't use it in this test). The thread is no # longer running, so we can use the queues without a lock. self.assertEqual(len(self._builder_command_queue), 0) self.assertEqual(len(self._builder_response_queue), 0)
def test_load(self): """ Test "load" command. """ mapped_file_dir = os.environ['TESTDATA_WRITE_PATH'] mgr_config = {'mapped_file_dir': mapped_file_dir} cfg_data = MockConfigData( {"_generation_id": 1, "classes": {"IN": [{"type": "MasterFiles", "params": { "example.com": TESTDATA_PATH + "example.com.zone" }, "cache-enable": True, "cache-type": "mapped"}] } }) cmgr = DataSrcClientsMgr(use_cache=True) cmgr.reconfigure({}, cfg_data) genid, clients_map = cmgr.get_clients_map() datasrc_info = DataSrcInfo(genid, clients_map, mgr_config) self.assertEqual(1, datasrc_info.gen_id) self.assertEqual(clients_map, datasrc_info.clients_map) self.assertEqual(1, len(datasrc_info.segment_info_map)) sgmt_info = datasrc_info.segment_info_map[(RRClass.IN, 'MasterFiles')] self.assertIsNotNone(sgmt_info.get_reset_param(SegmentInfo.READER)) self.assertIsNotNone(sgmt_info.get_reset_param(SegmentInfo.WRITER)) param = sgmt_info.get_reset_param(SegmentInfo.WRITER) self.__mapped_file_path = param['mapped-file'] self._builder_thread.start() # Now that the builder thread is running, send it the "load" # command. We should be notified when the load operation is # complete. with self._builder_cv: self._builder_command_queue.append(('load', bundy.dns.Name("example.com"), datasrc_info, RRClass.IN, 'MasterFiles')) self._builder_cv.notify_all() # Wait 60 seconds to receive a notification on the socket from # the builder. (reads, _, _) = select.select([self._master_sock], [], [], 60) self.assertTrue(self._master_sock in reads) # Reading 1 byte should not block us here, especially as the # socket is ready to read. It's a hack, but this is just a # testcase. got = self._master_sock.recv(1) self.assertEqual(got, b'x') with self._builder_lock: # The command queue must be cleared, and the response queue # must contain a response that a bad command was sent. The # thread is no longer running, so we can use the queues # without a lock. self.assertEqual(len(self._builder_command_queue), 0) self.assertEqual(len(self._builder_response_queue), 1) response = self._builder_response_queue[0] self.assertTrue(isinstance(response, tuple)) self.assertTupleEqual(response, ('load-completed', datasrc_info, RRClass.IN, 'MasterFiles', True)) del self._builder_response_queue[:] # Now try looking for some loaded data clist = datasrc_info.clients_map[RRClass.IN] dsrc, finder, exact = clist.find(bundy.dns.Name("example.com")) self.assertIsNotNone(dsrc) self.assertTrue(isinstance(dsrc, bundy.datasrc.DataSourceClient)) self.assertIsNotNone(finder) self.assertTrue(isinstance(finder, bundy.datasrc.ZoneFinder)) self.assertTrue(exact) # Send the builder thread the "shutdown" command. The thread # should exit its main loop and be joinable. with self._builder_cv: self._builder_command_queue.append(('shutdown',)) self._builder_cv.notify_all() # Wait 5 seconds at most for the main loop of the builder to # exit. self._builder_thread.join(5) self.assertFalse(self._builder_thread.isAlive()) # The command queue must be cleared, and the response queue must # be untouched (we don't use it in this test). The thread is no # longer running, so we can use the queues without a lock. self.assertEqual(len(self._builder_command_queue), 0) self.assertEqual(len(self._builder_response_queue), 0)