def clear(): cat = CatalogAPI(zport) cat.clear() commit()
class TestCheckWorkingPath(BaseTestCase): def afterSetUp(self): super(TestCheckWorkingPath, self).afterSetUp() self.dmd.Devices.createOrganizer('/Network/Router/Cisco') self.cat = CatalogAPI(self.dmd.zport, name='test_l2') self.cat.clear() zcml.load_config('testing.zcml', Products.ZenTestCase) zcml.load_config('configure.zcml', ZenPacks.zenoss.Layer2) def topology(self, topology): devices = create_topology(topology, self.dmd, False) for device in devices: self.cat.add_node(device) def test_check_nearest_down(self): self.topology(''' a b b c ''') self.cat.get_status = lambda x: x != router('b') self.assertFalse(self.cat.check_working_path(router('a'), router('c'))) def test_check_next_nearest_down(self): self.topology(''' a b b c c d ''') self.cat.get_status = lambda x: x != router('c') self.assertFalse(self.cat.check_working_path(router('a'), router('d'))) def test_check_one_way_down(self): self.topology(''' a b a c c d ''') self.cat.get_status = lambda x: x != router('c') self.assertFalse(self.cat.check_working_path(router('a'), router('d'))) def test_check_two_ways_down(self): self.topology(''' a b a c b d c d ''') self.cat.get_status = lambda x: x in (router('a'), router('d')) self.assertFalse(self.cat.check_working_path(router('a'), router('d'))) def test_get_bsf_connected(self): self.topology(''' a b b c b d c d ''') self.assertItemsEqual( self.cat.get_bfs_connected(router('a'), self.cat.get_directly_connected, 6), [router('c'), router('d')])
class TestCatalogAPI(BaseTestCase): def afterSetUp(self): super(TestCatalogAPI, self).afterSetUp() self.cat = CatalogAPI(self.app.zport, name="test_l2") self.cat.clear() self.connection = fake_connection('test_id') def test_catalog_is_empty(self): self.assertEqual(len(self.cat.search()), 0) def test_connection_is_added_to_catalog(self): self.cat.add_connection(self.connection) brains = self.cat.search() self.assertEqual(len(brains), 3) self.assertTrue('test_id' in [x.entity_id for x in brains]) self.assertEqual(brains[0].layers, ('layer1', 'layer2', 'layer1')) def test_remove_connection(self): self.cat.add_connection(self.connection) self.cat.remove_connection(self.connection) brains = self.cat.search() self.assertEqual(len(brains), 0) def test_validate_connection(self): self.assertEqual(self.cat.validate_connection(self.connection), self.connection) def test_search(self): self.cat.add_connection(self.connection) self.assertEqual(len(self.cat.search(entity_id='test_id')), 1) self.assertEqual(len(self.cat.search(entity_id='incorrect_id')), 0) def test_add_remove_node(self): cp = fake_connections_provider(self.dmd) self.assertEqual(len(self.cat.search()), 0) self.cat.add_node(cp) self.assertEqual(len(self.cat.search(entity_id='connection_id')), 1) self.cat.remove_node(cp) self.assertEqual(len(self.cat.search(entity_id='connection_id')), 0) def test_get_directly_connected(self): self.cat.add_node(fake_connections_provider(self.dmd)) self.assertTrue( list(self.cat.get_directly_connected('connection_id')) == ['connected_to1', 'connected_to2']) def test_get_reverse_connected(self): self.cat.add_node(fake_connections_provider(self.dmd)) self.assertTrue( list(self.cat.get_reverse_connected('connected_to1')) == ['connection_id']) def test_get_existing_layers(self): self.cat.add_node(fake_connections_provider(self.dmd)) self.assertEqual(len(self.cat.get_existing_layers()), 2) def test_clear(self): self.cat.add_node(fake_connections_provider(self.dmd, 'con_id1')) self.cat.add_node(fake_connections_provider(self.dmd, 'con_id2')) self.cat.add_node(fake_connections_provider(self.dmd, 'con_id3')) self.cat.add_node(fake_connections_provider(self.dmd, 'con_id4')) self.cat.add_node(fake_connections_provider(self.dmd, 'con_id5')) self.assertEqual(len(self.cat.search()), 15) # 5*3 self.cat.clear() self.assertEqual(len(self.cat.search()), 0)
class ZenMapper(CyclingDaemon): name = 'zenmapper' mname = name def __init__(self, noopts=0, app=None, keeproot=False): super(ZenMapper, self).__init__(noopts, app, keeproot) self._workers = {} def buildOptions(self): super(CyclingDaemon, self).buildOptions() self.parser.add_option( '--cycletime', dest='cycletime', default=300, type="int", help="update connections every CYCLETIME seconds. 300 by default" ) self.parser.add_option( "--monitor", dest="monitor", default=DEFAULT_MONITOR, help="Name of monitor instance to use for heartbeat " " events. Default is %s." % DEFAULT_MONITOR ) self.parser.add_option( "--clear", dest="clear", action="store_true", help="Clear MACs catalog" ) self.parser.add_option( "--force", dest="force", action="store_true", help="Force reindex" ) self.parser.add_option( '-d', '--device', dest='device', help="Fully qualified device name ie www.confmon.com" ) self.parser.add_option( '--redis-url', dest='redis_url', type='string', help='redis connection string: redis://[hostname]:[port]/[db]' ) self.parser.add_option( '--workers', dest='workers', default=2, type="int", help='Workers number' ) self.parser.add_option( "--worker", dest="worker", action="store_true", help="Run as worker" ) self.parser.add_option( '--offset', dest='offset', type="int", help='Start point to process in worker' ) self.parser.add_option( '--chunk', dest='chunk', type="int", help='Chunk size to process in worker' ) def get_nodes_list(self, sort=False): """ Returns list of devices and networks to index """ if self.options.device: device = self.dmd.Devices.findDevice(self.options.device) if device: log.info( "Updating connections for device %s", self.options.device ) return [device] else: log.error( "Device with id %s was not found", self.options.device ) return [] nodes = chain.from_iterable([ self.dmd.Devices.getSubDevicesGen(), self.dmd.Networks.getSubNetworks()]) if not sort: return list(nodes) return sorted(nodes, key=lambda x: IGlobalIdentifier(x).getGUID()) def start_worker(self, worker_id, chunk): """ Creates new process of zenmapper with a task to process chunk of nodes """ if worker_id in self._workers and self._workers[worker_id].is_alive(): log.info('Worker %i still running.' % worker_id) else: log.info('Starting worker %i with chunk %i' % (worker_id, chunk)) p = multiprocessing.Process( target=exec_worker, args=(worker_id, chunk) ) p.daemon = True p.start() self._workers[worker_id] = p def _do_job(self, offset, chunk): """ Do actual indexing of nodes into L2 catalog """ if chunk: log.info('Worker %i: updating catalog' % offset) nodes = self.get_nodes_list(sort=True)[offset*chunk:offset*chunk + chunk] for node in nodes: self.cat.add_node(node, force=self.options.force) node._p_invalidate() log.info('Worker %i: finished job.' % offset) else: log.info('Updating catalog.') for node in self.get_nodes_list(sort=True): self.cat.add_node(node, force=self.options.force) node._p_invalidate() def _compact_catalog(self): """ Removes records for deleted devices. """ for worker in self._workers.values(): if worker.is_alive(): return if self.options.device: return guids = [IGlobalIdentifier(x).getGUID() for x in self.get_nodes_list()] log.info('Compacting catalog') self.cat.compact_catalog(guids) def main_loop(self): """ zenmapper main loop """ self.cat = CatalogAPI(self.dmd.zport, redis_url=self.options.redis_url) if self.options.clear: log.info('Clearing catalog') self.cat.clear() elif self.options.cycle and self.options.workers > 0: self._compact_catalog() chunk = len(self.get_nodes_list()) / self.options.workers + 1 for i in xrange(self.options.workers): self.start_worker(i, chunk) elif self.options.worker: offset = self.options.offset chunk = self.options.chunk self._do_job(offset, chunk) else: self._compact_catalog() self._do_job(offset=0, chunk=0)