def setUp(self): self.c = ElasticSearch(logging.getLogger()) self.d = ElasticSearchClusterStatus(logging.getLogger()) self.process = None try: # Start elasticsearch self.process = subprocess.Popen( ["elasticsearch", "-f", "elasticsearch"], executable="elasticsearch", stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Wait for it to really start self._wait("http://localhost:%s" % PORT) except: logging.getLogger().exception("Cannot instantiate elasticsearch")
def setUp(self): self.c = ElasticSearch(logging.getLogger()) self.d = ElasticSearchClusterStatus(logging.getLogger()) self.process = None try: # Start elasticsearch self.process = subprocess.Popen(["elasticsearch","-f","elasticsearch"], executable="elasticsearch", stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Wait for it to really start self._wait("http://localhost:%s" % PORT) except: logging.getLogger().exception("Cannot instantiate elasticsearch")
class TestElastic(unittest.TestCase): def _wait(self, url): loop = 0 while True: try: req = urllib2.Request(url, None) request = urllib2.urlopen(req) break except: time.sleep(0.5) loop = loop + 1 if loop >= MAX_WAIT: break def setUp(self): self.c = ElasticSearch(logging.getLogger()) self.process = None try: # Start elasticsearch self.process = subprocess.Popen( ["elasticsearch", "-f", "elasticsearch"], executable="elasticsearch", stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) # Wait for it to really start self._wait("http://localhost:%s" % PORT) except: logging.getLogger().exception("Cannot instantiate elasticsearch") def tearDown(self): if self.process is not None: self.process.terminate() def testCheck(self): agentConfig = { "elasticsearch": "http://localhost:%s/_cluster/nodes/stats?all=true" % PORT, "version": "0.1", "apiKey": "toto", } r = self.c.check(agentConfig) def _check(slf, r): slf.assertTrue(type(r) == type([])) slf.assertTrue(len(r) > 0) slf.assertEquals(len([t for t in r if t[0] == "elasticsearch.get.total"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "elasticsearch.search.fetch.total"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "jvm.gc.collection_time"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "jvm.mem.heap_committed"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "jvm.mem.heap_used"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "jvm.threads.count"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "jvm.threads.peak_count"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "elasticsearch.transport.rx_count"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "elasticsearch.transport.tx_size"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "elasticsearch.transport.server_open"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "elasticsearch.thread_pool.snapshot.queue"]), 1, r) _check(self, r) # Same check, only given hostname agentConfig = {"elasticsearch": "http://localhost:%s" % PORT, "version": "0.1", "apiKey": "toto"} r = self.c.check(agentConfig) _check(self, r) # Same check, only given hostname agentConfig = {"elasticsearch": "http://localhost:%s/wrong_url" % PORT, "version": "0.1", "apiKey": "toto"} r = self.c.check(agentConfig) self.assertFalse(r)
class TestElastic(unittest.TestCase): def _wait(self, url): loop = 0 while True: try: req = urllib2.Request(url, None) request = urllib2.urlopen(req) break except: time.sleep(0.5) loop = loop + 1 if loop >= MAX_WAIT: break def setUp(self): self.c = ElasticSearch(logging.getLogger()) self.d = ElasticSearchClusterStatus(logging.getLogger()) self.process = None try: # Start elasticsearch self.process = subprocess.Popen( ["elasticsearch", "-f", "elasticsearch"], executable="elasticsearch", stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Wait for it to really start self._wait("http://localhost:%s" % PORT) except: logging.getLogger().exception("Cannot instantiate elasticsearch") def tearDown(self): if self.process is not None: self.process.terminate() def testEvent(self): agentConfig = { 'elasticsearch': 'http://localhost:%s/_cluster/nodes/stats?all=true' % PORT, 'version': '0.1', 'api_key': 'toto' } url = urlparse.urljoin(agentConfig['elasticsearch'], HEALTH_URL) data = _get_data(agentConfig, url) self.assertEquals(len(data), 10, data) data['status'] = "green" events = self.d.check(logging.getLogger(), agentConfig, data) self.assertEquals(len(events), 0, events) data = _get_data(agentConfig, url) data['status'] = "red" events = self.d.check(logging.getLogger, agentConfig, data) self.assertEquals(len(events), 1, events) def testCheck(self): agentConfig = { 'elasticsearch': 'http://localhost:%s/_cluster/nodes/stats?all=true' % PORT, 'version': '0.1', 'api_key': 'toto' } r = self.c.check(agentConfig) def _check(slf, r): slf.assertTrue(type(r) == type([])) slf.assertTrue(len(r) > 0) slf.assertEquals( len([t for t in r if t[0] == "elasticsearch.get.total"]), 1, r) slf.assertEquals( len([ t for t in r if t[0] == "elasticsearch.search.fetch.total" ]), 1, r) slf.assertEquals( len([t for t in r if t[0] == "jvm.gc.collection_time"]), 1, r) slf.assertEquals( len([t for t in r if t[0] == "jvm.mem.heap_committed"]), 1, r) slf.assertEquals( len([t for t in r if t[0] == "jvm.mem.heap_used"]), 1, r) slf.assertEquals( len([t for t in r if t[0] == "jvm.threads.count"]), 1, r) slf.assertEquals( len([t for t in r if t[0] == "jvm.threads.peak_count"]), 1, r) slf.assertEquals( len([ t for t in r if t[0] == "elasticsearch.transport.rx_count" ]), 1, r) slf.assertEquals( len([ t for t in r if t[0] == "elasticsearch.transport.tx_size" ]), 1, r) slf.assertEquals( len([ t for t in r if t[0] == "elasticsearch.transport.server_open" ]), 1, r) slf.assertEquals( len([ t for t in r if t[0] == "elasticsearch.thread_pool.snapshot.queue" ]), 1, r) _check(self, r) # Same check, only given hostname agentConfig = { 'elasticsearch': 'http://localhost:%s' % PORT, 'version': '0.1', 'api_key': 'toto' } r = self.c.check(agentConfig) _check(self, r) # Same check, only given hostname agentConfig = { 'elasticsearch': 'http://localhost:%s/wrong_url' % PORT, 'version': '0.1', 'api_key': 'toto' } r = self.c.check(agentConfig) self.assertFalse(r)
def __init__(self, agentConfig, emitters, systemStats): self.agentConfig = agentConfig # system stats is generated by config.get_system_stats self.agentConfig['system_stats'] = systemStats # agent config is used during checks, system_stats can be accessed through the config self.os = getOS() self.plugins = None self.emitters = emitters self.metadata_interval = int( agentConfig.get('metadata_interval', 10 * 60)) self.metadata_start = time.time() socket.setdefaulttimeout(15) self.run_count = 0 self.continue_running = True # Unix System Checks self._unix_system_checks = { 'disk': u.Disk(checks_logger), 'io': u.IO(), 'load': u.Load(checks_logger), 'memory': u.Memory(checks_logger), 'network': u.Network(checks_logger), 'processes': u.Processes(), 'cpu': u.Cpu(checks_logger) } # Win32 System `Checks self._win32_system_checks = { 'disk': w32.Disk(checks_logger), 'io': w32.IO(checks_logger), 'proc': w32.Processes(checks_logger), 'memory': w32.Memory(checks_logger), 'network': w32.Network(checks_logger), 'cpu': w32.Cpu(checks_logger) } # Old-style metric checks self._couchdb = CouchDb(checks_logger) self._mongodb = MongoDb(checks_logger) self._mysql = MySql(checks_logger) self._rabbitmq = RabbitMq() self._ganglia = Ganglia(checks_logger) self._cassandra = Cassandra() self._dogstream = Dogstreams.init(checks_logger, self.agentConfig) self._ddforwarder = DdForwarder(checks_logger, self.agentConfig) self._ec2 = EC2(checks_logger) # Metric Checks self._metrics_checks = [ ElasticSearch(checks_logger), Jvm(checks_logger), Tomcat(checks_logger), ActiveMQ(checks_logger), Solr(checks_logger), WMICheck(checks_logger), Memcache(checks_logger), ] # Custom metric checks for module_spec in [ s.strip() for s in self.agentConfig.get('custom_checks', '').split(',') ]: if len(module_spec) == 0: continue try: self._metrics_checks.append( modules.load(module_spec, 'Check')(checks_logger)) logger.info("Registered custom check %s" % module_spec) except Exception, e: logger.exception('Unable to load custom check module %s' % module_spec)
class TestElastic(unittest.TestCase): def _wait(self, url): loop = 0 while True: try: req = urllib2.Request(url, None) request = urllib2.urlopen(req) break except: time.sleep(0.5) loop = loop + 1 if loop >= MAX_WAIT: break def setUp(self): self.c = ElasticSearch(logging.getLogger()) self.d = ElasticSearchClusterStatus(logging.getLogger()) self.process = None try: # Start elasticsearch self.process = subprocess.Popen(["elasticsearch","-f","elasticsearch"], executable="elasticsearch", stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Wait for it to really start self._wait("http://localhost:%s" % PORT) except: logging.getLogger().exception("Cannot instantiate elasticsearch") def tearDown(self): if self.process is not None: self.process.terminate() def testEvent(self): agentConfig = { 'elasticsearch': 'http://localhost:%s/_cluster/nodes/stats?all=true' % PORT, 'version': '0.1', 'api_key': 'toto' } url = urlparse.urljoin(agentConfig['elasticsearch'], HEALTH_URL) data = _get_data(agentConfig, url) self.assertEquals(len(data), 10,data) data['status'] = "green" events = self.d.check(logging.getLogger(), agentConfig,data) self.assertEquals(len(events),0,events) data = _get_data(agentConfig, url) data['status'] = "red" events = self.d.check(logging.getLogger,agentConfig, data) self.assertEquals(len(events),1,events) def testCheck(self): agentConfig = { 'elasticsearch': 'http://localhost:%s/_cluster/nodes/stats?all=true' % PORT, 'version': '0.1', 'api_key': 'toto' } r = self.c.check(agentConfig) def _check(slf, r): slf.assertTrue(type(r) == type([])) slf.assertTrue(len(r) > 0) slf.assertEquals(len([t for t in r if t[0] == "elasticsearch.get.total"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "elasticsearch.search.fetch.total"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "jvm.gc.collection_time"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "jvm.mem.heap_committed"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "jvm.mem.heap_used"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "jvm.threads.count"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "jvm.threads.peak_count"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "elasticsearch.transport.rx_count"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "elasticsearch.transport.tx_size"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "elasticsearch.transport.server_open"]), 1, r) slf.assertEquals(len([t for t in r if t[0] == "elasticsearch.thread_pool.snapshot.queue"]), 1, r) _check(self, r) # Same check, only given hostname agentConfig = { 'elasticsearch': 'http://localhost:%s' % PORT, 'version': '0.1', 'api_key': 'toto' } r = self.c.check(agentConfig) _check(self, r) # Same check, only given hostname agentConfig = { 'elasticsearch': 'http://localhost:%s/wrong_url' % PORT, 'version': '0.1', 'api_key': 'toto' } r = self.c.check(agentConfig) self.assertFalse(r)