def setUp(self): super(TestFull_WaitQuery, self).setUp() time_hacker.set_real_time() self.testid = str(os.getpid() + random.randint(1, 1000)) self.modconf = Module({'module_name': 'LiveStatus', 'module_type': 'livestatus', 'port': str(random.randint(50000, 65534)), 'pnp_path': 'tmp/pnp4nagios_test' + self.testid, 'host': '127.0.0.1', 'name': 'test', 'modules': '' }) self.init_livestatus(self.modconf)
def setUpClass(cls): # temp path for mongod files : # as you can see it's relative path, that'll be relative to where the test is launched, # which should be in the Shinken test directory. mongo_path = cls._mongo_tmp_path = tempfile.mkdtemp(dir="./tmp/", prefix="mongo") mongo_db = os.path.join(mongo_path, "db") mongo_log = os.path.join(mongo_path, "log.txt") os.system("/bin/rm -rf %r" % mongo_path) os.makedirs(mongo_db) print("Starting embedded mongo daemon..") sock = socket.socket() sock.bind(("127.0.0.1", 0)) port = sock.getsockname()[1] sock.close() cls.mongo_db_uri = "mongodb://127.0.0.1:%s" % port mongo_args = [ "/usr/bin/mongod", "--dbpath", mongo_db, "--port", str(port), "--logpath", mongo_log, "--smallfiles", ] mp = cls._mongo_proc = subprocess.Popen( mongo_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False ) print("Giving it some secs to correctly start..") time_hacker.set_real_time() # mongo takes some time to startup as it creates freshly new database files # so we need a relatively big timeout: timeout = time.time() + cls.mongod_start_timeout while time.time() < timeout: time.sleep(1) mp.poll() if mp.returncode is not None: cls._read_mongolog_and_raise(mongo_log, mp, "Launched mongod but it's directly died") sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) errno = sock.connect_ex(("127.0.0.1", port)) if not errno: sock.close() break else: mp.kill() cls._read_mongolog_and_raise( mongo_log, mp, "could not connect to port %s : mongod failed to correctly start?" % port ) time_hacker.set_my_time()
def tearDownClass(cls): mp = cls._mongo_proc mp.terminate() print("Waiting mongod server to exit ..") time_hacker.set_real_time() for _ in range(10): time.sleep(2) if mp.poll() is not None: break else: print("didn't exited after 10 secs ! killing it..") mp.kill() mp.wait() os.system("/bin/rm -rf %r" % cls._mongo_tmp_path)
def tearDownClass(cls): mp = cls._mongo_proc mp.terminate() print('Waiting mongod server to exit ..') time_hacker.set_real_time() for _ in range(10): time.sleep(2) if mp.poll() is not None: break else: print("didn't exited after 10 secs ! killing it..") mp.kill() mp.wait() os.system('/bin/rm -rf %r' % cls._mongo_tmp_path)
def setUpClass(cls): # temp path for mongod files : # as you can see it's relative path, that'll be relative to where the test is launched, # which should be in the Shinken test directory. mongo_path = cls._mongo_tmp_path = tempfile.mkdtemp(dir="./tmp/", prefix="mongo") mongo_db = os.path.join(mongo_path, 'db') mongo_log = os.path.join(mongo_path, 'log.txt') os.system('/bin/rm -rf %r' % mongo_path) os.makedirs(mongo_db) print('Starting embedded mongo daemon..') sock = socket.socket() sock.bind(('127.0.0.1', 0)) port = sock.getsockname()[1] sock.close() cls.mongo_db_uri = "mongodb://127.0.0.1:%s" % port mongo_args = ['/usr/bin/mongod', '--dbpath', mongo_db, '--port', str(port), '--logpath', mongo_log, '--smallfiles'] mp = cls._mongo_proc = subprocess.Popen( mongo_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False) print('Giving it some secs to correctly start..') time_hacker.set_real_time() # mongo takes some time to startup as it creates freshly new database files # so we need a relatively big timeout: timeout = time.time() + cls.mongod_start_timeout while time.time() < timeout: time.sleep(1) mp.poll() if mp.returncode is not None: cls._read_mongolog_and_raise(mongo_log, mp, "Launched mongod but it's directly died") sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) errno = sock.connect_ex(('127.0.0.1', port)) if not errno: sock.close() break else: mp.kill() cls._read_mongolog_and_raise( mongo_log, mp, "could not connect to port %s : mongod failed to correctly start?" % port) time_hacker.set_my_time()
def test_a_long_history(self): #return test_host_005 = self.sched.hosts.find_by_name("test_host_005") test_host_099 = self.sched.hosts.find_by_name("test_host_099") test_ok_00 = self.sched.services.find_srv_by_name_and_hostname( "test_host_005", "test_ok_00") test_ok_01 = self.sched.services.find_srv_by_name_and_hostname( "test_host_005", "test_ok_01") test_ok_04 = self.sched.services.find_srv_by_name_and_hostname( "test_host_005", "test_ok_04") test_ok_16 = self.sched.services.find_srv_by_name_and_hostname( "test_host_005", "test_ok_16") test_ok_99 = self.sched.services.find_srv_by_name_and_hostname( "test_host_099", "test_ok_01") days = 4 etime = time.time() print "now it is", time.ctime(etime) print "now it is", time.gmtime(etime) etime_midnight = (etime - (etime % 86400)) + time.altzone print "midnight was", time.ctime(etime_midnight) print "midnight was", time.gmtime(etime_midnight) query_start = etime_midnight - (days - 1) * 86400 query_end = etime_midnight print "query_start", time.ctime(query_start) print "query_end ", time.ctime(query_end) # |----------|----------|----------|----------|----------|---x # etime # etime_midnight # ---x------ # etime - 4 days # |--- # query_start # # ............................................ # events in the log database ranging till now # # |________________________________| # events which will be read from db # loops = int(86400 / 192) time_hacker.time_warp(-1 * days * 86400) print "warp back to", time.ctime(time.time()) # run silently old_stdout = sys.stdout sys.stdout = open(os.devnull, "w") should_be = 0 for day in xrange(days): sys.stderr.write("day %d now it is %s i run %d loops\n" % (day, time.ctime(time.time()), loops)) self.scheduler_loop(2, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], [test_ok_04, 0, "OK"], [test_ok_16, 0, "OK"], [test_ok_99, 0, "OK"], ]) self.update_broker() #for i in xrange(3600 * 24 * 7): for i in xrange(loops): if i % 10000 == 0: sys.stderr.write(str(i)) if i % 399 == 0: self.scheduler_loop(3, [ [test_ok_00, 1, "WARN"], [test_ok_01, 2, "CRIT"], [test_ok_04, 3, "UNKN"], [test_ok_16, 1, "WARN"], [test_ok_99, 2, "CRIT"], ]) if int(time.time()) >= query_start and int( time.time()) <= query_end: should_be += 3 sys.stderr.write("now it should be %s\n" % should_be) time.sleep(62) if i % 399 == 0: self.scheduler_loop(1, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], [test_ok_04, 0, "OK"], [test_ok_16, 0, "OK"], [test_ok_99, 0, "OK"], ]) if int(time.time()) >= query_start and int( time.time()) <= query_end: should_be += 1 sys.stderr.write("now it should be %s\n" % should_be) time.sleep(2) if i % 17 == 0: self.scheduler_loop(3, [ [test_ok_00, 1, "WARN"], [test_ok_01, 2, "CRIT"], ]) time.sleep(62) if i % 17 == 0: self.scheduler_loop(1, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], ]) time.sleep(2) if i % 14 == 0: self.scheduler_loop(3, [ [test_host_005, 2, "DOWN"], ]) if i % 12 == 0: self.scheduler_loop(3, [ [test_host_099, 2, "DOWN"], ]) time.sleep(62) if i % 14 == 0: self.scheduler_loop(3, [ [test_host_005, 0, "UP"], ]) if i % 12 == 0: self.scheduler_loop(3, [ [test_host_099, 0, "UP"], ]) time.sleep(2) self.update_broker() if i % 1000 == 0: self.livestatus_broker.db.commit() endtime = time.time() self.livestatus_broker.db.commit() sys.stderr.write("day %d end it is %s\n" % (day, time.ctime(time.time()))) sys.stdout.close() sys.stdout = old_stdout self.livestatus_broker.db.commit_and_rotate_log_db() numlogs = self.livestatus_broker.db.execute( "SELECT COUNT(*) FROM logs") print "numlogs is", numlogs # now we have a lot of events # find type = HOST ALERT for test_host_005 request = """GET log Columns: class time type state host_name service_description plugin_output message options contact_name command_name state_type current_host_groups current_service_groups Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: type = SERVICE ALERT And: 1 Filter: type = HOST ALERT And: 1 Filter: type = SERVICE FLAPPING ALERT Filter: type = HOST FLAPPING ALERT Filter: type = SERVICE DOWNTIME ALERT Filter: type = HOST DOWNTIME ALERT Filter: type ~ starting... Filter: type ~ shutting down... Or: 8 Filter: host_name = test_host_099 Filter: service_description = test_ok_01 And: 5 OutputFormat: json""" # switch back to realtime. we want to know how long it takes time_hacker.set_real_time() print request print "query 1 --------------------------------------------------" tic = time.time() response, keepalive = self.livestatus_broker.livestatus.handle_request( request) tac = time.time() pyresponse = eval(response) print "number of records with test_ok_01", len(pyresponse) self.assertEqual(should_be, len(pyresponse)) # and now test Negate: request = """GET log Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: type = SERVICE ALERT And: 1 Filter: type = HOST ALERT And: 1 Filter: type = SERVICE FLAPPING ALERT Filter: type = HOST FLAPPING ALERT Filter: type = SERVICE DOWNTIME ALERT Filter: type = HOST DOWNTIME ALERT Filter: type ~ starting... Filter: type ~ shutting down... Or: 8 Filter: host_name = test_host_099 Filter: service_description = test_ok_01 And: 2 Negate: And: 2 OutputFormat: json""" response, keepalive = self.livestatus_broker.livestatus.handle_request( request) print "got response with true instead of negate" notpyresponse = eval(response) print "number of records without test_ok_01", len(notpyresponse) request = """GET log Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: type = SERVICE ALERT And: 1 Filter: type = HOST ALERT And: 1 Filter: type = SERVICE FLAPPING ALERT Filter: type = HOST FLAPPING ALERT Filter: type = SERVICE DOWNTIME ALERT Filter: type = HOST DOWNTIME ALERT Filter: type ~ starting... Filter: type ~ shutting down... Or: 8 OutputFormat: json""" response, keepalive = self.livestatus_broker.livestatus.handle_request( request) allpyresponse = eval(response) print "all records", len(allpyresponse) self.assert_( len(allpyresponse) == len(notpyresponse) + len(pyresponse)) # the numlogs above only counts records in the currently attached db numlogs = self.livestatus_broker.db.execute( "SELECT COUNT(*) FROM logs WHERE time >= %d AND time <= %d" % (int(query_start), int(query_end))) print "numlogs is", numlogs time_hacker.set_my_time()
def test_a_long_history(self): # copied from test_livestatus_cache test_host_005 = self.sched.hosts.find_by_name("test_host_005") test_host_099 = self.sched.hosts.find_by_name("test_host_099") find = self.sched.services.find_srv_by_name_and_hostname test_ok_00 = find("test_host_005", "test_ok_00") test_ok_01 = find("test_host_005", "test_ok_01") test_ok_04 = find("test_host_005", "test_ok_04") test_ok_16 = find("test_host_005", "test_ok_16") test_ok_99 = find("test_host_099", "test_ok_01") days = 4 etime = time.time() print("now it is", time.ctime(etime)) print("now it is", time.gmtime(etime)) etime_midnight = (etime - (etime % 86400)) + time.altzone print("midnight was", time.ctime(etime_midnight)) print("midnight was", time.gmtime(etime_midnight)) query_start = etime_midnight - (days - 1) * 86400 query_end = etime_midnight print("query_start", time.ctime(query_start)) print("query_end ", time.ctime(query_end)) # |----------|----------|----------|----------|----------|---x # etime # etime_midnight # ---x------ # etime - 4 days # |--- # query_start # # ............................................ # events in the log database ranging till now # # |________________________________| # events which will be read from db # loops = int(86400 / 192) time_hacker.time_warp(-1 * days * 86400) print("warp back to", time.ctime(time.time())) # run silently old_stdout = sys.stdout sys.stdout = open(os.devnull, "w") should_be = 0 for day in xrange(days): sys.stderr.write("day %d now it is %s i run %d loops\n" % ( day, time.ctime(time.time()), loops)) self.scheduler_loop(2, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], [test_ok_04, 0, "OK"], [test_ok_16, 0, "OK"], [test_ok_99, 0, "OK"], ]) self.update_broker() #for i in xrange(3600 * 24 * 7): for i in xrange(loops): if i % 10000 == 0: sys.stderr.write(str(i)) if i % 399 == 0: self.scheduler_loop(3, [ [test_ok_00, 1, "WARN"], [test_ok_01, 2, "CRIT"], [test_ok_04, 3, "UNKN"], [test_ok_16, 1, "WARN"], [test_ok_99, 2, "CRIT"], ]) if query_start <= int(time.time()) <= query_end: should_be += 3 sys.stderr.write("now it should be %s\n" % should_be) time.sleep(62) if i % 399 == 0: self.scheduler_loop(1, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], [test_ok_04, 0, "OK"], [test_ok_16, 0, "OK"], [test_ok_99, 0, "OK"], ]) if query_start <= int(time.time()) <= query_end: should_be += 1 sys.stderr.write("now it should be %s\n" % should_be) time.sleep(2) if i % 9 == 0: self.scheduler_loop(3, [ [test_ok_00, 1, "WARN"], [test_ok_01, 2, "CRIT"], ]) time.sleep(62) if i % 9 == 0: self.scheduler_loop(1, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], ]) time.sleep(2) if i % 9 == 0: self.scheduler_loop(3, [ [test_host_005, 2, "DOWN"], ]) if i % 2 == 0: self.scheduler_loop(3, [ [test_host_099, 2, "DOWN"], ]) time.sleep(62) if i % 9 == 0: self.scheduler_loop(3, [ [test_host_005, 0, "UP"], ]) if i % 2 == 0: self.scheduler_loop(3, [ [test_host_099, 0, "UP"], ]) time.sleep(2) self.update_broker() if i % 1000 == 0: self.livestatus_broker.db.commit() endtime = time.time() self.livestatus_broker.db.commit() sys.stderr.write("day %d end it is %s\n" % (day, time.ctime(time.time()))) sys.stdout.close() sys.stdout = old_stdout self.livestatus_broker.db.commit_and_rotate_log_db() name = 'testtest' + self.testid numlogs = self.livestatus_broker.db.conn[name].logs.find().count() print("numlogs is", numlogs) # now we have a lot of events # find type = HOST ALERT for test_host_005 columns = ( "class time type state host_name service_description plugin_output message options " "contact_name command_name state_type current_host_groups current_service_groups" ) request = """GET log Columns: """ + columns + """ Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: type = SERVICE ALERT And: 1 Filter: type = HOST ALERT And: 1 Filter: type = SERVICE FLAPPING ALERT Filter: type = HOST FLAPPING ALERT Filter: type = SERVICE DOWNTIME ALERT Filter: type = HOST DOWNTIME ALERT Filter: type ~ starting... Filter: type ~ shutting down... Or: 8 Filter: host_name = test_host_099 Filter: service_description = test_ok_01 And: 5 OutputFormat: json""" # switch back to realtime. we want to know how long it takes time_hacker.set_real_time() print(request) print("query 1 --------------------------------------------------") tic = time.time() response, keepalive = self.livestatus_broker.livestatus.handle_request(request) tac = time.time() pyresponse = eval(response) print(response) print("number of records with test_ok_01", len(pyresponse)) self.assert_(len(pyresponse) == should_be) # and now test Negate: request = """GET log Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: type = SERVICE ALERT And: 1 Filter: type = HOST ALERT And: 1 Filter: type = SERVICE FLAPPING ALERT Filter: type = HOST FLAPPING ALERT Filter: type = SERVICE DOWNTIME ALERT Filter: type = HOST DOWNTIME ALERT Filter: type ~ starting... Filter: type ~ shutting down... Or: 8 Filter: host_name = test_host_099 Filter: service_description = test_ok_01 And: 2 Negate: And: 2 OutputFormat: json""" response, keepalive = self.livestatus_broker.livestatus.handle_request(request) notpyresponse = eval(response) print("number of records without test_ok_01", len(notpyresponse)) request = """GET log Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: type = SERVICE ALERT And: 1 Filter: type = HOST ALERT And: 1 Filter: type = SERVICE FLAPPING ALERT Filter: type = HOST FLAPPING ALERT Filter: type = SERVICE DOWNTIME ALERT Filter: type = HOST DOWNTIME ALERT Filter: type ~ starting... Filter: type ~ shutting down... Or: 8 OutputFormat: json""" response, keepalive = self.livestatus_broker.livestatus.handle_request(request) allpyresponse = eval(response) print("all records", len(allpyresponse)) self.assert_(len(allpyresponse) == len(notpyresponse) + len(pyresponse)) # Now a pure class check query request = """GET log Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: class = 1 OutputFormat: json""" response, keepalive = self.livestatus_broker.livestatus.handle_request(request) allpyresponse = eval(response) print("all records", len(allpyresponse)) self.assert_(len(allpyresponse) == len(notpyresponse) + len(pyresponse)) # now delete too old entries from the database (> 14days) # that's the job of commit_and_rotate_log_db() numlogs = self.livestatus_broker.db.conn[name].logs.find().count() times = [x['time'] for x in self.livestatus_broker.db.conn[name].logs.find()] self.assert_(times != []) print("whole database", numlogs, min(times), max(times)) numlogs = self.livestatus_broker.db.conn[name].logs.find({ '$and': [ {'time': {'$gt': min(times)}}, {'time': {'$lte': max(times)}} ]}).count() now = max(times) daycount = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] for day in xrange(25): one_day_earlier = now - 3600*24 numlogs = self.livestatus_broker.db.conn[name].logs.find({ '$and': [ {'time': {'$gt': one_day_earlier}}, {'time': {'$lte': now}} ]}).count() daycount[day] = numlogs print("day -%02d %d..%d - %d" % (day, one_day_earlier, now, numlogs)) now = one_day_earlier self.livestatus_broker.db.commit_and_rotate_log_db() now = max(times) for day in xrange(25): one_day_earlier = now - 3600*24 numlogs = self.livestatus_broker.db.conn[name].logs.find({ '$and': [ {'time': {'$gt': one_day_earlier}}, {'time': {'$lte': now}} ]}).count() print("day -%02d %d..%d - %d" % (day, one_day_earlier, now, numlogs)) now = one_day_earlier numlogs = self.livestatus_broker.db.conn[name].logs.find().count() # simply an estimation. the cleanup-routine in the mongodb logstore # cuts off the old data at midnight, but here in the test we have # only accuracy of a day. self.assert_(numlogs >= sum(daycount[:7])) self.assert_(numlogs <= sum(daycount[:8])) time_hacker.set_my_time()
def setUp(self): self.setup_with_file('etc/shinken_1r_1h_1s.cfg') time_hacker.set_real_time() Comment.id = 1 self.testid = str(os.getpid() + random.randint(1, 1000))
# GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Shinken. If not, see <http://www.gnu.org/licenses/>. """ Test the hot dependencies arbiter module. """ import os, time from shinken_test import unittest, ShinkenTest, time_hacker # Need to use the real time-functions as we are reading timestamps # from the filesystem. time_hacker.set_real_time() from shinken.objects.module import Module from shinken.modulesctx import modulesctx hot_dependencies_arbiter = modulesctx.get_module('hot_dependencies') Hot_dependencies_arbiter = hot_dependencies_arbiter.Hot_dependencies_arbiter get_instance = hot_dependencies_arbiter.get_instance modconf = Module() modconf.module_name = "PickleRetention" modconf.module_type = hot_dependencies_arbiter.properties['type'] modconf.modules = [] modconf.properties = hot_dependencies_arbiter.properties.copy()
def test_a_long_history(self): # copied from test_livestatus_cache test_host_005 = self.sched.hosts.find_by_name("test_host_005") test_host_099 = self.sched.hosts.find_by_name("test_host_099") find = self.sched.services.find_srv_by_name_and_hostname test_ok_00 = find("test_host_005", "test_ok_00") test_ok_01 = find("test_host_005", "test_ok_01") test_ok_04 = find("test_host_005", "test_ok_04") test_ok_16 = find("test_host_005", "test_ok_16") test_ok_99 = find("test_host_099", "test_ok_01") days = 4 etime = time.time() print("now it is", time.ctime(etime)) print("now it is", time.gmtime(etime)) etime_midnight = (etime - (etime % 86400)) + time.altzone print("midnight was", time.ctime(etime_midnight)) print("midnight was", time.gmtime(etime_midnight)) query_start = etime_midnight - (days - 1) * 86400 query_end = etime_midnight print("query_start", time.ctime(query_start)) print("query_end ", time.ctime(query_end)) # |----------|----------|----------|----------|----------|---x # etime # etime_midnight # ---x------ # etime - 4 days # |--- # query_start # # ............................................ # events in the log database ranging till now # # |________________________________| # events which will be read from db # loops = int(86400 / 192) time_hacker.time_warp(-1 * days * 86400) print("warp back to", time.ctime(time.time())) # run silently old_stdout = sys.stdout sys.stdout = open(os.devnull, "w") should_be = 0 for day in xrange(days): sys.stderr.write("day %d now it is %s i run %d loops\n" % (day, time.ctime(time.time()), loops)) self.scheduler_loop( 2, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], [test_ok_04, 0, "OK"], [test_ok_16, 0, "OK"], [test_ok_99, 0, "OK"], ], ) self.update_broker() # for i in xrange(3600 * 24 * 7): for i in xrange(loops): if i % 10000 == 0: sys.stderr.write(str(i)) if i % 399 == 0: self.scheduler_loop( 3, [ [test_ok_00, 1, "WARN"], [test_ok_01, 2, "CRIT"], [test_ok_04, 3, "UNKN"], [test_ok_16, 1, "WARN"], [test_ok_99, 2, "CRIT"], ], ) if query_start <= int(time.time()) <= query_end: should_be += 3 sys.stderr.write("now it should be %s\n" % should_be) time.sleep(62) if i % 399 == 0: self.scheduler_loop( 1, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], [test_ok_04, 0, "OK"], [test_ok_16, 0, "OK"], [test_ok_99, 0, "OK"], ], ) if query_start <= int(time.time()) <= query_end: should_be += 1 sys.stderr.write("now it should be %s\n" % should_be) time.sleep(2) if i % 9 == 0: self.scheduler_loop(3, [[test_ok_00, 1, "WARN"], [test_ok_01, 2, "CRIT"]]) time.sleep(62) if i % 9 == 0: self.scheduler_loop(1, [[test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"]]) time.sleep(2) if i % 9 == 0: self.scheduler_loop(3, [[test_host_005, 2, "DOWN"]]) if i % 2 == 0: self.scheduler_loop(3, [[test_host_099, 2, "DOWN"]]) time.sleep(62) if i % 9 == 0: self.scheduler_loop(3, [[test_host_005, 0, "UP"]]) if i % 2 == 0: self.scheduler_loop(3, [[test_host_099, 0, "UP"]]) time.sleep(2) self.update_broker() if i % 1000 == 0: self.livestatus_broker.db.commit() endtime = time.time() self.livestatus_broker.db.commit() sys.stderr.write("day %d end it is %s\n" % (day, time.ctime(time.time()))) sys.stdout.close() sys.stdout = old_stdout self.livestatus_broker.db.commit_and_rotate_log_db() name = "testtest" + self.testid numlogs = self.livestatus_broker.db.conn[name].logs.find().count() print("numlogs is", numlogs) # now we have a lot of events # find type = HOST ALERT for test_host_005 columns = ( "class time type state host_name service_description plugin_output message options " "contact_name command_name state_type current_host_groups current_service_groups" ) request = ( """GET log Columns: """ + columns + """ Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: type = SERVICE ALERT And: 1 Filter: type = HOST ALERT And: 1 Filter: type = SERVICE FLAPPING ALERT Filter: type = HOST FLAPPING ALERT Filter: type = SERVICE DOWNTIME ALERT Filter: type = HOST DOWNTIME ALERT Filter: type ~ starting... Filter: type ~ shutting down... Or: 8 Filter: host_name = test_host_099 Filter: service_description = test_ok_01 And: 5 OutputFormat: json""" ) # switch back to realtime. we want to know how long it takes time_hacker.set_real_time() print(request) print("query 1 --------------------------------------------------") tic = time.time() response, keepalive = self.livestatus_broker.livestatus.handle_request(request) tac = time.time() pyresponse = eval(response) print(response) print("number of records with test_ok_01", len(pyresponse)) self.assert_(len(pyresponse) == should_be) # and now test Negate: request = ( """GET log Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: type = SERVICE ALERT And: 1 Filter: type = HOST ALERT And: 1 Filter: type = SERVICE FLAPPING ALERT Filter: type = HOST FLAPPING ALERT Filter: type = SERVICE DOWNTIME ALERT Filter: type = HOST DOWNTIME ALERT Filter: type ~ starting... Filter: type ~ shutting down... Or: 8 Filter: host_name = test_host_099 Filter: service_description = test_ok_01 And: 2 Negate: And: 2 OutputFormat: json""" ) response, keepalive = self.livestatus_broker.livestatus.handle_request(request) notpyresponse = eval(response) print("number of records without test_ok_01", len(notpyresponse)) request = ( """GET log Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: type = SERVICE ALERT And: 1 Filter: type = HOST ALERT And: 1 Filter: type = SERVICE FLAPPING ALERT Filter: type = HOST FLAPPING ALERT Filter: type = SERVICE DOWNTIME ALERT Filter: type = HOST DOWNTIME ALERT Filter: type ~ starting... Filter: type ~ shutting down... Or: 8 OutputFormat: json""" ) response, keepalive = self.livestatus_broker.livestatus.handle_request(request) allpyresponse = eval(response) print("all records", len(allpyresponse)) self.assert_(len(allpyresponse) == len(notpyresponse) + len(pyresponse)) # Now a pure class check query request = ( """GET log Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: class = 1 OutputFormat: json""" ) response, keepalive = self.livestatus_broker.livestatus.handle_request(request) allpyresponse = eval(response) print("all records", len(allpyresponse)) self.assert_(len(allpyresponse) == len(notpyresponse) + len(pyresponse)) # now delete too old entries from the database (> 14days) # that's the job of commit_and_rotate_log_db() numlogs = self.livestatus_broker.db.conn[name].logs.find().count() times = [x["time"] for x in self.livestatus_broker.db.conn[name].logs.find()] self.assert_(times != []) print("whole database", numlogs, min(times), max(times)) numlogs = ( self.livestatus_broker.db.conn[name] .logs.find({"$and": [{"time": {"$gt": min(times)}}, {"time": {"$lte": max(times)}}]}) .count() ) now = max(times) daycount = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] for day in xrange(25): one_day_earlier = now - 3600 * 24 numlogs = ( self.livestatus_broker.db.conn[name] .logs.find({"$and": [{"time": {"$gt": one_day_earlier}}, {"time": {"$lte": now}}]}) .count() ) daycount[day] = numlogs print("day -%02d %d..%d - %d" % (day, one_day_earlier, now, numlogs)) now = one_day_earlier self.livestatus_broker.db.commit_and_rotate_log_db() now = max(times) for day in xrange(25): one_day_earlier = now - 3600 * 24 numlogs = ( self.livestatus_broker.db.conn[name] .logs.find({"$and": [{"time": {"$gt": one_day_earlier}}, {"time": {"$lte": now}}]}) .count() ) print("day -%02d %d..%d - %d" % (day, one_day_earlier, now, numlogs)) now = one_day_earlier numlogs = self.livestatus_broker.db.conn[name].logs.find().count() # simply an estimation. the cleanup-routine in the mongodb logstore # cuts off the old data at midnight, but here in the test we have # only accuracy of a day. self.assert_(numlogs >= sum(daycount[:7])) self.assert_(numlogs <= sum(daycount[:8])) time_hacker.set_my_time()
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Shinken. If not, see <http://www.gnu.org/licenses/>. """ Test the hot dependencies arbiter module. """ import os, time from shinken_test import unittest, ShinkenTest, time_hacker # Need to use the real time-functions as we are reading timestamps # from the filesystem. time_hacker.set_real_time() from shinken.objects.module import Module from shinken.modulesctx import modulesctx hot_dependencies_arbiter = modulesctx.get_module('hot_dependencies') Hot_dependencies_arbiter = hot_dependencies_arbiter.Hot_dependencies_arbiter get_instance = hot_dependencies_arbiter.get_instance modconf = Module() modconf.module_name = "PickleRetention" modconf.module_type = hot_dependencies_arbiter.properties['type'] modconf.modules = [] modconf.properties = hot_dependencies_arbiter.properties.copy()
def setUp(self): self.setup_with_file('etc/shinken_1r_1h_1s.cfg') time_hacker.set_real_time()
def test_a_long_history(self): #return test_host_005 = self.sched.hosts.find_by_name("test_host_005") test_host_099 = self.sched.hosts.find_by_name("test_host_099") test_ok_00 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_00") test_ok_01 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_01") test_ok_04 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_04") test_ok_16 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_16") test_ok_99 = self.sched.services.find_srv_by_name_and_hostname("test_host_099", "test_ok_01") days = 4 etime = time.time() print "now it is", time.ctime(etime) print "now it is", time.gmtime(etime) etime_midnight = (etime - (etime % 86400)) + time.altzone print "midnight was", time.ctime(etime_midnight) print "midnight was", time.gmtime(etime_midnight) query_start = etime_midnight - (days - 1) * 86400 query_end = etime_midnight print "query_start", time.ctime(query_start) print "query_end ", time.ctime(query_end) # |----------|----------|----------|----------|----------|---x # etime # etime_midnight # ---x------ # etime - 4 days # |--- # query_start # # ............................................ # events in the log database ranging till now # # |________________________________| # events which will be read from db # loops = int(86400 / 192) time_hacker.time_warp(-1 * days * 86400) print "warp back to", time.ctime(time.time()) # run silently old_stdout = sys.stdout sys.stdout = open(os.devnull, "w") should_be = 0 for day in xrange(days): sys.stderr.write("day %d now it is %s i run %d loops\n" % (day, time.ctime(time.time()), loops)) self.scheduler_loop(2, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], [test_ok_04, 0, "OK"], [test_ok_16, 0, "OK"], [test_ok_99, 0, "OK"], ]) self.update_broker() #for i in xrange(3600 * 24 * 7): for i in xrange(loops): if i % 10000 == 0: sys.stderr.write(str(i)) if i % 399 == 0: self.scheduler_loop(3, [ [test_ok_00, 1, "WARN"], [test_ok_01, 2, "CRIT"], [test_ok_04, 3, "UNKN"], [test_ok_16, 1, "WARN"], [test_ok_99, 2, "CRIT"], ]) if int(time.time()) >= query_start and int(time.time()) <= query_end: should_be += 3 sys.stderr.write("now it should be %s\n" % should_be) time.sleep(62) if i % 399 == 0: self.scheduler_loop(1, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], [test_ok_04, 0, "OK"], [test_ok_16, 0, "OK"], [test_ok_99, 0, "OK"], ]) if int(time.time()) >= query_start and int(time.time()) <= query_end: should_be += 1 sys.stderr.write("now it should be %s\n" % should_be) time.sleep(2) if i % 17 == 0: self.scheduler_loop(3, [ [test_ok_00, 1, "WARN"], [test_ok_01, 2, "CRIT"], ]) time.sleep(62) if i % 17 == 0: self.scheduler_loop(1, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], ]) time.sleep(2) if i % 14 == 0: self.scheduler_loop(3, [ [test_host_005, 2, "DOWN"], ]) if i % 12 == 0: self.scheduler_loop(3, [ [test_host_099, 2, "DOWN"], ]) time.sleep(62) if i % 14 == 0: self.scheduler_loop(3, [ [test_host_005, 0, "UP"], ]) if i % 12 == 0: self.scheduler_loop(3, [ [test_host_099, 0, "UP"], ]) time.sleep(2) self.update_broker() if i % 1000 == 0: self.livestatus_broker.db.commit() endtime = time.time() self.livestatus_broker.db.commit() sys.stderr.write("day %d end it is %s\n" % (day, time.ctime(time.time()))) sys.stdout.close() sys.stdout = old_stdout self.livestatus_broker.db.commit_and_rotate_log_db() numlogs = self.livestatus_broker.db.execute("SELECT COUNT(*) FROM logs") print "numlogs is", numlogs # now we have a lot of events # find type = HOST ALERT for test_host_005 request = """GET log Columns: class time type state host_name service_description plugin_output message options contact_name command_name state_type current_host_groups current_service_groups Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: type = SERVICE ALERT And: 1 Filter: type = HOST ALERT And: 1 Filter: type = SERVICE FLAPPING ALERT Filter: type = HOST FLAPPING ALERT Filter: type = SERVICE DOWNTIME ALERT Filter: type = HOST DOWNTIME ALERT Filter: type ~ starting... Filter: type ~ shutting down... Or: 8 Filter: host_name = test_host_099 Filter: service_description = test_ok_01 And: 5 OutputFormat: json""" # switch back to realtime. we want to know how long it takes time_hacker.set_real_time() print request print "query 1 --------------------------------------------------" tic = time.time() response, keepalive = self.livestatus_broker.livestatus.handle_request(request) tac = time.time() pyresponse = eval(response) print "number of records with test_ok_01", len(pyresponse) self.assertEqual(should_be, len(pyresponse)) # and now test Negate: request = """GET log Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: type = SERVICE ALERT And: 1 Filter: type = HOST ALERT And: 1 Filter: type = SERVICE FLAPPING ALERT Filter: type = HOST FLAPPING ALERT Filter: type = SERVICE DOWNTIME ALERT Filter: type = HOST DOWNTIME ALERT Filter: type ~ starting... Filter: type ~ shutting down... Or: 8 Filter: host_name = test_host_099 Filter: service_description = test_ok_01 And: 2 Negate: And: 2 OutputFormat: json""" response, keepalive = self.livestatus_broker.livestatus.handle_request(request) print "got response with true instead of negate" notpyresponse = eval(response) print "number of records without test_ok_01", len(notpyresponse) request = """GET log Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: type = SERVICE ALERT And: 1 Filter: type = HOST ALERT And: 1 Filter: type = SERVICE FLAPPING ALERT Filter: type = HOST FLAPPING ALERT Filter: type = SERVICE DOWNTIME ALERT Filter: type = HOST DOWNTIME ALERT Filter: type ~ starting... Filter: type ~ shutting down... Or: 8 OutputFormat: json""" response, keepalive = self.livestatus_broker.livestatus.handle_request(request) allpyresponse = eval(response) print "all records", len(allpyresponse) self.assert_(len(allpyresponse) == len(notpyresponse) + len(pyresponse)) # the numlogs above only counts records in the currently attached db numlogs = self.livestatus_broker.db.execute("SELECT COUNT(*) FROM logs WHERE time >= %d AND time <= %d" % (int(query_start), int(query_end))) print "numlogs is", numlogs time_hacker.set_my_time()
def test_a_long_history(self): print("Now: %s" % datetime.datetime.now()) print("Today: %s" % datetime.datetime.today()) test_host_005 = self.sched.hosts.find_by_name("test_host_005") test_host_099 = self.sched.hosts.find_by_name("test_host_099") test_ok_00 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_00") test_ok_01 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_01") test_ok_04 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_04") test_ok_16 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_16") test_ok_99 = self.sched.services.find_srv_by_name_and_hostname("test_host_099", "test_ok_01") days = 4 etime = time.time() now = time.time() print("************* Now:", now) print("now it is %s" % time.ctime(etime)) print("now it is %s" % time.gmtime(etime)) etime_midnight = (etime - (etime % 86400)) + time.altzone print("midnight was %s" % time.ctime(etime_midnight)) print("midnight was %s" % time.gmtime(etime_midnight)) query_start = etime_midnight - (days - 1) * 86400 query_end = etime_midnight print("query_start %s" % time.ctime(query_start)) print("query_end %s" % time.ctime(query_end)) # |----------|----------|----------|----------|----------|---x # etime # etime_midnight # ---x------ # etime - 4 days # |--- # query_start # # ............................................ # events in the log database ranging till now # # |________________________________| # events which will be read from db # loops = int(86400 / 192) time_hacker.time_warp(-1 * days * 86400) print("warp back to %s" % time.ctime(time.time())) # run silently old_stdout = sys.stdout sys.stdout = open(os.devnull, "w") should_be = 0 for day in xrange(days): sys.stderr.write("day %d now it is %s i run %d loops\n" % (day, time.ctime(time.time()), loops)) self.scheduler_loop(2, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], [test_ok_04, 0, "OK"], [test_ok_16, 0, "OK"], [test_ok_99, 0, "OK"], ]) self.update_broker() for i in xrange(loops): if i % 10000 == 0: sys.stderr.write(str(i)) if i % 399 == 0: self.scheduler_loop(3, [ [test_ok_00, 1, "WARN"], [test_ok_01, 2, "CRIT"], [test_ok_04, 3, "UNKN"], [test_ok_16, 1, "WARN"], [test_ok_99, 2, "CRIT"], ]) if int(time.time()) >= query_start and int(time.time()) <= query_end: should_be += 3 sys.stderr.write("now it should be %s\n" % should_be) time.sleep(62) if i % 399 == 0: self.scheduler_loop(1, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], [test_ok_04, 0, "OK"], [test_ok_16, 0, "OK"], [test_ok_99, 0, "OK"], ]) if int(time.time()) >= query_start and int(time.time()) <= query_end: should_be += 1 sys.stderr.write("now it should be %s\n" % should_be) time.sleep(2) if i % 9 == 0: self.scheduler_loop(3, [ [test_ok_00, 1, "WARN"], [test_ok_01, 2, "CRIT"], ]) time.sleep(62) if i % 9 == 0: self.scheduler_loop(1, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], ]) time.sleep(2) if i % 9 == 0: self.scheduler_loop(3, [ [test_host_005, 2, "DOWN"], ]) if i % 2 == 0: self.scheduler_loop(3, [ [test_host_099, 2, "DOWN"], ]) time.sleep(62) if i % 9 == 0: self.scheduler_loop(3, [ [test_host_005, 0, "UP"], ]) if i % 2 == 0: self.scheduler_loop(3, [ [test_host_099, 0, "UP"], ]) time.sleep(2) self.update_broker() if i % 1000 == 0: self.livestatus_broker.db.commit() endtime = time.time() self.livestatus_broker.db.commit() sys.stderr.write("day %d end it is %s\n" % (day, time.ctime(time.time()))) sys.stdout.close() sys.stdout = old_stdout self.livestatus_broker.db.commit_and_rotate_log_db() print("************* Now + :", time.time() - now) numlogs = self.livestatus_broker.db.execute("SELECT count(*) FROM logs") print("Logs # is %s" % numlogs) # now we have a lot of events # find type = HOST ALERT for test_host_005 request = """GET log Columns: class time type state host_name service_description plugin_output message options contact_name command_name state_type current_host_groups current_service_groups Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: type = SERVICE ALERT And: 1 Filter: type = HOST ALERT And: 1 Filter: type = SERVICE FLAPPING ALERT Filter: type = HOST FLAPPING ALERT Filter: type = SERVICE DOWNTIME ALERT Filter: type = HOST DOWNTIME ALERT Filter: type ~ starting... Filter: type ~ shutting down... Or: 8 Filter: host_name = test_host_099 Filter: service_description = test_ok_01 And: 5 OutputFormat: json""" # switch back to realtime. we want to know how long it takes time_hacker.set_real_time() print("DB: %s" % self.livestatus_broker.db.database_file) print("Request %s" % request) print("query 1 cache---------------------------------------------") tic = time.time() response, keepalive = self.livestatus_broker.livestatus.handle_request(request) tac = time.time() elapsed1 = tac - tic pyresponse = eval(response) print("pyresponse %s" % len(pyresponse)) print("should be %s" % should_be) self.assertEqual(should_be, len(pyresponse)) print("query 2 cache---------------------------------------------") tic = time.time() response, keepalive = self.livestatus_broker.livestatus.handle_request(request) tac = time.time() elapsed2 = tac - tic pyresponse = eval(response) self.assertEqual(should_be, len(pyresponse) ) print("clear the cache") print("use aggressive sql") print("query 3 --------------------------------------------------") self.livestatus_broker.query_cache.wipeout() self.livestatus_broker.db.use_aggressive_sql = True tic = time.time() response, keepalive = self.livestatus_broker.livestatus.handle_request(request) tac = time.time() elapsed3 = tac - tic pyresponse = eval(response) self.assertEqual(should_be, len(pyresponse)) print("query 4 cache---------------------------------------------") tic = time.time() response, keepalive = self.livestatus_broker.livestatus.handle_request(request) tac = time.time() elapsed4 = tac - tic pyresponse = eval(response) self.assertEqual(should_be, len(pyresponse)) print("Elapsed time:") print("- elapsed1", elapsed1) print("- elapsed2", elapsed2) print("- elapsed3", elapsed3) print("- elapsed4", elapsed4) msg = """~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NB: This isn't necessarily a failure !!! This check highly depends on the system load while the test was running. Maybe you could relaunch the test and it will succeed. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ self.assertLess(elapsed2*0.9, elapsed1, msg) self.assertLess(elapsed3*0.9, elapsed1, msg) self.assertLess(elapsed4*0.9, elapsed3, msg) time_hacker.set_my_time()
def test_a_long_history(self): #return print datetime.datetime.now() print datetime.datetime.today() test_host_005 = self.sched.hosts.find_by_name("test_host_005") test_host_099 = self.sched.hosts.find_by_name("test_host_099") test_ok_00 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_00") test_ok_01 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_01") test_ok_04 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_04") test_ok_16 = self.sched.services.find_srv_by_name_and_hostname("test_host_005", "test_ok_16") test_ok_99 = self.sched.services.find_srv_by_name_and_hostname("test_host_099", "test_ok_01") days = 4 etime = time.time() print "now it is", time.ctime(etime) print "now it is", time.gmtime(etime) etime_midnight = (etime - (etime % 86400)) + time.altzone print "midnight was", time.ctime(etime_midnight) print "midnight was", time.gmtime(etime_midnight) query_start = etime_midnight - (days - 1) * 86400 query_end = etime_midnight print "query_start", time.ctime(query_start) print "query_end ", time.ctime(query_end) # |----------|----------|----------|----------|----------|---x # etime # etime_midnight # ---x------ # etime - 4 days # |--- # query_start # # ............................................ # events in the log database ranging till now # # |________________________________| # events which will be read from db # loops = int(86400 / 192) time_hacker.time_warp(-1 * days * 86400) print "warp back to", time.ctime(time.time()) # run silently old_stdout = sys.stdout sys.stdout = open(os.devnull, "w") should_be = 0 for day in xrange(days): sys.stderr.write("day %d now it is %s i run %d loops\n" % (day, time.ctime(time.time()), loops)) self.scheduler_loop(2, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], [test_ok_04, 0, "OK"], [test_ok_16, 0, "OK"], [test_ok_99, 0, "OK"], ]) self.update_broker() #for i in xrange(3600 * 24 * 7): for i in xrange(loops): if i % 10000 == 0: sys.stderr.write(str(i)) if i % 399 == 0: self.scheduler_loop(3, [ [test_ok_00, 1, "WARN"], [test_ok_01, 2, "CRIT"], [test_ok_04, 3, "UNKN"], [test_ok_16, 1, "WARN"], [test_ok_99, 2, "CRIT"], ]) if int(time.time()) >= query_start and int(time.time()) <= query_end: should_be += 3 sys.stderr.write("now it should be %s\n" % should_be) time.sleep(62) if i % 399 == 0: self.scheduler_loop(1, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], [test_ok_04, 0, "OK"], [test_ok_16, 0, "OK"], [test_ok_99, 0, "OK"], ]) if int(time.time()) >= query_start and int(time.time()) <= query_end: should_be += 1 sys.stderr.write("now it should be %s\n" % should_be) time.sleep(2) if i % 9 == 0: self.scheduler_loop(3, [ [test_ok_00, 1, "WARN"], [test_ok_01, 2, "CRIT"], ]) time.sleep(62) if i % 9 == 0: self.scheduler_loop(1, [ [test_ok_00, 0, "OK"], [test_ok_01, 0, "OK"], ]) time.sleep(2) if i % 9 == 0: self.scheduler_loop(3, [ [test_host_005, 2, "DOWN"], ]) if i % 2 == 0: self.scheduler_loop(3, [ [test_host_099, 2, "DOWN"], ]) time.sleep(62) if i % 9 == 0: self.scheduler_loop(3, [ [test_host_005, 0, "UP"], ]) if i % 2 == 0: self.scheduler_loop(3, [ [test_host_099, 0, "UP"], ]) time.sleep(2) self.update_broker() if i % 1000 == 0: self.livestatus_broker.db.commit() endtime = time.time() self.livestatus_broker.db.commit() sys.stderr.write("day %d end it is %s\n" % (day, time.ctime(time.time()))) sys.stdout.close() sys.stdout = old_stdout self.livestatus_broker.db.commit_and_rotate_log_db() numlogs = self.livestatus_broker.db.execute("SELECT count(*) FROM logs") print "numlogs is", numlogs # now we have a lot of events # find type = HOST ALERT for test_host_005 request = """GET log Columns: class time type state host_name service_description plugin_output message options contact_name command_name state_type current_host_groups current_service_groups Filter: time >= """ + str(int(query_start)) + """ Filter: time <= """ + str(int(query_end)) + """ Filter: type = SERVICE ALERT And: 1 Filter: type = HOST ALERT And: 1 Filter: type = SERVICE FLAPPING ALERT Filter: type = HOST FLAPPING ALERT Filter: type = SERVICE DOWNTIME ALERT Filter: type = HOST DOWNTIME ALERT Filter: type ~ starting... Filter: type ~ shutting down... Or: 8 Filter: host_name = test_host_099 Filter: service_description = test_ok_01 And: 5 OutputFormat: json""" # switch back to realtime. we want to know how long it takes time_hacker.set_real_time() print self.livestatus_broker.db.database_file print request print "query 1 --------------------------------------------------" tic = time.time() response, keepalive = self.livestatus_broker.livestatus.handle_request(request) tac = time.time() elapsed1 = tac - tic pyresponse = eval(response) print "pyresponse", len(pyresponse) print "should be", should_be self.assertEqual(should_be, len(pyresponse)) print "query 2 cache---------------------------------------------" tic = time.time() response, keepalive = self.livestatus_broker.livestatus.handle_request(request) tac = time.time() elapsed2 = tac - tic pyresponse = eval(response) self.assertEqual(should_be, len(pyresponse) ) print "clear the cache" print "use aggressive sql" print "query 3 --------------------------------------------------" self.livestatus_broker.query_cache.wipeout() self.livestatus_broker.db.use_aggressive_sql = True tic = time.time() response, keepalive = self.livestatus_broker.livestatus.handle_request(request) tac = time.time() elapsed3 = tac - tic pyresponse = eval(response) self.assertEqual(should_be, len(pyresponse)) print "query 4 cache---------------------------------------------" tic = time.time() response, keepalive = self.livestatus_broker.livestatus.handle_request(request) tac = time.time() elapsed4 = tac - tic pyresponse = eval(response) self.assertEqual(should_be, len(pyresponse)) print "elapsed1", elapsed1 print "elapsed2", elapsed2 print "elapsed3", elapsed3 print "elapsed4", elapsed4 msg = """~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NB NB NB: This isn't necessarily a failure !!! This check highly depends on the system load there was while the test was running. Maybe you could relaunch the test and it will succeed. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ self.assertLess(elapsed2*0.9, elapsed1, msg) self.assertLess(elapsed3*0.9, elapsed1, msg) self.assertLess(elapsed4*0.9, elapsed3, msg) time_hacker.set_my_time()