def main(path): akumulid = att.create_akumulid(path) # delete database akumulid.delete_database() # create empty database akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: for ix in range(0, len(MSG)): msg, expected_prefix = MSG[ix] result = send_malicious_message(msg) if not result.startswith(expected_prefix): print("Error at {0}".format(ix)) print("Message:\n{0}".format(MSG[ix])) print("Response:\n{0}".format(result)) raise ValueError("Bad response") except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # delete database akumulid.delete_database() # create empty database akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: for ix in range(0, len(MSG)): msg, expected_prefix = MSG[ix] result = send_malicious_message(msg) if not result.startswith(expected_prefix): print("Error at {0}".format(ix)) print("Message:\n{0}".format(MSG[ix])) print("Response:\n{0}".format(result)) raise ValueError("Bad response") except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # delete database akumulid.delete_database() # create empty database akumulid.create_test_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: # fill data in statsurl = "http://{0}:{1}/api/stats".format(host, httpport) chan = TCPChan(host, tcpport) def get_free_space(): rawstats = urllib.urlopen(statsurl).read() stats = json.loads(rawstats) volume0space = stats["volume_0"]["free_space"] if "volume_1" in stats: volume1space = stats["volume_1"]["free_space"] else: volume1space = 0 return int(volume0space), int(volume1space) print("Sending messages...") _, prev_vol1 = get_free_space() batch_size = 10000 for ix, it in enumerate( att.infinite_msg_stream(batch_size, 'temp', tag='test')): chan.send(it) if ix % 1000 == 0: vol0, vol1 = get_free_space() print( "{0} msgs written, free space in 1st volume: {1}, in 2nd volume: {2}" .format(ix * batch_size, vol0, vol1)) if vol1 < prev_vol1: print("Overflow occured") break prev_vol1 = vol1 # Read data back if backward direction (cached values should be included) read_in_backward_direction(batch_size) # Try to reopen and check once again akumulid.stop() time.sleep(5) akumulid.serve() time.sleep(5) read_in_backward_direction(batch_size) except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path, protocol): akumulid = att.create_akumulid(path) # delete database akumulid.delete_database() # create empty database akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: # fill data in host = '127.0.0.1' udpport = 8383 tcpport = 8282 if protocol == 'TCP': chan = TCPChan(host, tcpport) elif protocol == 'UDP': chan = UDPChan(host, udpport) else: print('Unknown protocol "{0}"'.format(protocol)) dt = datetime.datetime.utcnow() delta = datetime.timedelta(milliseconds=1) nmsgs = 1000000 print("Sending {0} messages through {1}...".format(nmsgs, protocol)) for it in att.generate_bulk_messages(dt, delta, nmsgs, ['foo', 'bar', 'buz'], tag='test'): chan.send(it) # check stats httpport = 8181 statsurl = "http://{0}:{1}/api/stats".format(host, httpport) rawstats = urllib.urlopen(statsurl).read() stats = json.loads(rawstats) # some space should be used volume0space = stats["volume_0"]["free_space"] if "volume_1" in stats: volume1space = stats["volume_1"]["free_space"] else: volume1space = 0 if volume0space == volume1space: print("Test #1 failed. Nothing was written to disk, /stats:") print(rawstats) sys.exit(10) else: print("Test #1 passed") except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # delete database akumulid.delete_database() # create empty database akumulid.create_test_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: # fill data in statsurl = "http://{0}:{1}/api/stats".format(host, httpport) chan = TCPChan(host, tcpport) def get_free_space(): rawstats = urllib.urlopen(statsurl).read() stats = json.loads(rawstats) volume0space = stats["volume_0"]["free_space"] if "volume_1" in stats: volume1space = stats["volume_1"]["free_space"] else: volume1space = 0 return int(volume0space), int(volume1space) print("Sending messages...") _, prev_vol1 = get_free_space() batch_size = 10000 for ix, it in enumerate(att.infinite_msg_stream(batch_size, 'temp', tag='test')): chan.send(it) if ix % 1000 == 0: vol0, vol1 = get_free_space() print("{0} msgs written, free space in 1st volume: {1}, in 2nd volume: {2}".format(ix*batch_size, vol0, vol1)) if vol1 < prev_vol1: print("Overflow occured") break prev_vol1 = vol1 # Read data back if backward direction (cached values should be included) read_in_backward_direction(batch_size) # Try to reopen and check once again akumulid.stop() time.sleep(5) akumulid.serve() time.sleep(5) read_in_backward_direction(batch_size) except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path, protocol): akumulid = att.create_akumulid(path) # delete database akumulid.delete_database() # create empty database akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: # fill data in host = '127.0.0.1' udpport = 8383 tcpport = 8282 if protocol == 'TCP': chan = TCPChan(host, tcpport) elif protocol == 'UDP': chan = UDPChan(host, udpport) else: print('Unknown protocol "{0}"'.format(protocol)) dt = datetime.datetime.utcnow() delta = datetime.timedelta(milliseconds=1) nmsgs = 1000000 print("Sending {0} messages through {1}...".format(nmsgs, protocol)) for it in att.generate_bulk_messages(dt, delta, nmsgs, ['foo', 'bar', 'buz'], tag='test'): chan.send(it) # check stats httpport = 8181 statsurl = "http://{0}:{1}/api/stats".format(host, httpport) rawstats = urllib.urlopen(statsurl).read() stats = json.loads(rawstats) # some space should be used volume0space = stats["volume_0"]["free_space"] if "volume_1" in stats: volume1space = stats["volume_1"]["free_space"] else: volume1space = 0 if volume0space == volume1space: print("Test #1 failed. Nothing was written to disk, /stats:") print(rawstats) sys.exit(10) else: print("Test #1 passed") except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: chan = att.TCPChan(HOST, TCPPORT) # fill data in dt = datetime.datetime.utcnow().replace(hour=0, minute=0, second=10, microsecond=0) delta = datetime.timedelta(seconds=1) nmsgs = 1000 snames = [ '!foo A=1 B=1', '!foo A=1 B=2', '!foo A=2 B=1', '!foo A=2 B=2', ] print("Sending {0} messages through TCP...".format(nmsgs * len(snames))) cnt = 0 timestamp = dt for it in range(0, nmsgs): for sname in snames: timestr = timestamp.strftime('+%Y%m%dT%H%M%S.%f') event = "{0} event {1} for {2} generated".format( cnt, sname, timestr) msg = "+{0}\r\n+{1}\r\n+{2}\r\n".format( sname, timestr, event[:it + 1]) allevents.append((timestamp, event[:it + 1])) chan.send(msg) cnt += 1 timestamp = timestamp + delta time.sleep(5) # wait untill all messagess will be processed test_select_events_forward(dt, delta, nmsgs * len(snames)) test_select_events_backward(dt, delta, nmsgs * len(snames)) finally: print("Stopping server...") akumulid.stop() time.sleep(5) on_exit()
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: chan = att.TCPChan(HOST, TCPPORT) # Case 1 invalid_sample = "+cpuload host=machine1\\r\\n:1418224205000000000\\r\\r+25.0\\r\\n\n" # reportd in issue#173 chan.send(invalid_sample) #time.sleep(1) # wait untill all messagess will be processed query = {"select":"cpuload","range": {"from":1418224205000000000, "to":1418224505000000000}} queryurl = "http://{0}:{1}/api/query".format(HOST, HTTPPORT) response = urlopen(queryurl, json.dumps(query)) # response should be empty for line in response: print("Unexpected response: {0}".format(line)) raise ValueError("Unexpected response") err = chan.recv() print(err) if not err.startswith("-PARSER"): raise ValueError("Error message expected") chan.close() # Case 2 chan = att.TCPChan(HOST, TCPPORT) invalid_sample = "+cpuload host=machine2\r\n:1418224205000000000\r\n+25.0" chan.send(invalid_sample) time.sleep(1) response = urlopen(queryurl, json.dumps(query)) # response should be empty for line in response: print("Unexpected response: {0}".format(line)) raise ValueError("Unexpected response") # No error message expected because the write is incomplete chan.close() except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # delete database akumulid.delete_database() # create empty database akumulid.create_test_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: # fill data in statsurl = "http://{0}:{1}/api/stats".format(host, httpport) chan = TCPChan(host, tcpport) def get_free_space(): rawstats = urllib.urlopen(statsurl).read() stats = json.loads(rawstats) volspace = 0 volspace += int(stats["volume_0"]["free_space"]) volspace += int(stats["volume_1"]["free_space"]) volspace += int(stats["volume_2"]["free_space"]) volspace += int(stats["volume_3"]["free_space"]) return volspace print("Sending messages...") prevspace = get_free_space() batch_size = 1000 for ix, it in enumerate( att.infinite_msg_stream(batch_size, 'temp', tag='test')): chan.send(it) if ix % 1000 == 0: volspace = get_free_space() print( "{0} msgs written, free space in the database: {1}".format( ix * batch_size, volspace)) if prevspace < volspace: # free space increased because volume was recycled print("Volume recycle occured") break prevspace = volspace # Read data back if backward direction (cached values should be included) read_in_backward_direction(batch_size) except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: chan = att.TCPChan(HOST, TCPPORT) # Case 1 invalid_sample = "+cpuload host=machine1\\r\\n:1418224205000000000\\r\\r+25.0\\r\\n\n" # reportd in issue#173 chan.send(invalid_sample) #time.sleep(1) # wait untill all messagess will be processed query = {"select":"cpuload","range": {"from":1418224205000000000, "to":1418224505000000000}} queryurl = "http://{0}:{1}/api/query".format(HOST, HTTPPORT) response = urlopen(queryurl, json.dumps(query)) # response should be empty for line in response: print("Unexpected response: {0}".format(line)) raise ValueError("Unexpected response") err = chan.recv() print(err) if not err.startswith("-PARSER"): raise ValueError("Error message expected") chan.close() # Case 2 chan = att.TCPChan(HOST, TCPPORT) invalid_sample = "+cpuload host=machine2\r\n:1418224205000000000\r\n+25.0" chan.send(invalid_sample) time.sleep(1) response = urlopen(queryurl, json.dumps(query)) # response should be empty for line in response: print("Unexpected response: {0}".format(line)) raise ValueError("Unexpected response") # No error message expected because the write is incomplete chan.close() except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: chan = att.TCPChan(HOST, TCPPORT) # fill data in dt = datetime.datetime.utcnow().replace(second=0, microsecond=0) delta = datetime.timedelta(milliseconds=1) nmsgs = 100000 print("Sending {0} messages through TCP...".format(nmsgs)) tags = { "tag1": ['A'], "tag2": ['B', 'C', 'D'], } for it in att.generate_messages(dt, delta, nmsgs, 'col1', **tags): chan.send(it) for it in att.generate_messages(dt, delta, nmsgs, 'col2', **tags): chan.send(it) time.sleep(5) # wait untill all messagess will be processed columns = ['col1', 'col2'] test_join_query_forward(columns, dt, delta, nmsgs) test_join_query_backward(columns, dt, delta, nmsgs) test_join_query_forward_by_series(columns, dt, delta, nmsgs) test_join_query_backward_by_series(columns, dt, delta, nmsgs) except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: chan = att.TCPChan(HOST, TCPPORT) # fill data in dt = datetime.datetime.utcnow().replace(second=0, microsecond=0) delta = datetime.timedelta(milliseconds=1) nmsgs = 100000 print("Sending {0} messages through TCP...".format(nmsgs)) tags = { "tag1": ['A'], "tag2": ['B', 'C', 'D'], } for it in att.generate_messages(dt, delta, nmsgs, 'col1', **tags): chan.send(it) for it in att.generate_messages(dt, delta, nmsgs, 'col2', **tags): chan.send(it) time.sleep(5) # wait untill all messagess will be processed columns = [ 'col1', 'col2' ] test_join_query_forward(columns, dt, delta, nmsgs) test_join_query_backward(columns, dt, delta, nmsgs) test_join_query_forward_by_series(columns, dt, delta, nmsgs) test_join_query_backward_by_series(columns, dt, delta, nmsgs) except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: chan = att.TCPChan(HOST, TCPPORT) # fill data in dt = datetime.datetime.utcnow().replace(hour=0, minute=0, second=10, microsecond=0) delta = datetime.timedelta(seconds=1) nmsgs = 3600*24 print("Sending {0} messages through TCP...".format(nmsgs)) tags = { "tag1": ['A'], "tag2": ['B', 'C'], "tag3": ['D', 'E', 'F', 'G', 'H'], } for it in att.generate_messages(dt, delta, nmsgs, 'test', **tags): chan.send(it) time.sleep(5) # wait untill all messagess will be processed # Test normal operation test_group_aggregate_all_forward (dt, delta, nmsgs, '1m') test_group_aggregate_all_forward (dt, delta, nmsgs, '10m') test_group_aggregate_all_forward (dt, delta, nmsgs, '1h') test_group_aggregate_all_backward(dt, delta, nmsgs, '1m') test_group_aggregate_all_backward(dt, delta, nmsgs, '10m') test_group_aggregate_all_backward(dt, delta, nmsgs, '1h') finally: print("Stopping server...") akumulid.stop() time.sleep(5) on_exit()
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: chan = att.TCPChan(HOST, TCPPORT) # fill data in dt = datetime.datetime.utcnow().replace(second=0, microsecond=0) delta = datetime.timedelta(milliseconds=1) nmsgs = N_MSG for metric in METRICS: print("Sending {0} messages through TCP...".format(nmsgs)) for it in att.generate_messages(dt, delta, nmsgs, metric, **SERIES_TAGS): chan.send(it) time.sleep(5) # wait untill all messagess will be processed # Test cases test_suggest_metric() test_suggest_metric_prefix() test_suggest_tag() test_suggest_tag_prefix() test_suggest_value() test_suggest_value_prefix() test_search_all_names() test_search_names_with_metric() test_search_names_with_tag() finally: print("Stopping server...") akumulid.stop() time.sleep(5) on_exit()
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: chan = att.TCPChan(HOST, TCPPORT) # fill data in dt = datetime.datetime.utcnow().replace(second=0, microsecond=0) delta = datetime.timedelta(milliseconds=1) nmsgs = N_MSG for metric in METRICS: print("Sending {0} messages through TCP...".format(nmsgs)) for it in att.generate_messages(dt, delta, nmsgs, metric, **SERIES_TAGS): chan.send(it) time.sleep(5) # wait untill all messagess will be processed # Test cases test_suggest_metric() test_suggest_metric_prefix() test_suggest_tag() test_suggest_tag_prefix() test_suggest_value() test_suggest_value_prefix() test_search_all_names() test_search_names_with_metric() test_search_names_with_tag() finally: print("Stopping server...") akumulid.stop() time.sleep(5) on_exit()
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) nmsgs = 100000 dt = datetime.datetime.utcnow() - (datetime.timedelta(milliseconds=1) * nmsgs) delta = datetime.timedelta(milliseconds=1) try: chan = att.TCPChan(HOST, TCPPORT) # fill data in print("Sending {0} messages through TCP...".format(nmsgs)) tags = { "tag1": ['A'], "tag2": ['B', 'C'], "tag3": ['D', 'E', 'F', 'G', 'H'], } for it in att.generate_messages(dt, delta, nmsgs, 'test', **tags): chan.send(it) time.sleep(5) # wait untill all messagess will be processed print("Trying to close channel") chan.close() test_read_all(dt, delta, nmsgs) except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5) print("Server stopped") print("Starting server...") akumulid.serve() time.sleep(5) print("Server started") try: test_read_all(dt, delta, nmsgs) # Try to write new data dt = datetime.datetime.utcnow() chan = att.TCPChan(HOST, TCPPORT) # fill data in print("Sending {0} messages through TCP second time...".format(nmsgs)) tags = { "tag1": ['A'], "tag2": ['B', 'C'], "tag3": ['D', 'E', 'F', 'G', 'H'], } for it in att.generate_messages(dt, delta, nmsgs, 'test', **tags): chan.send(it) time.sleep(5) test_read_all(dt, delta, nmsgs) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # delete database akumulid.delete_database() # create empty database akumulid.create_test_database() # start ./akumulid server print("Starting server...") dt = datetime.datetime.utcnow() delta = datetime.timedelta(milliseconds=1) try: akumulid.serve() time.sleep(5) chan = TCPChan(host, tcpport) # This data will be evicted from the database for it in att.generate_messages(dt, delta, 2000, 'evicted', tag=["foo", "bar"]): chan.send(it) except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5) try: akumulid.serve() time.sleep(5) # fill data in statsurl = "http://{0}:{1}/api/stats".format(host, httpport) chan = TCPChan(host, tcpport) def get_free_space(): rawstats = urllib.urlopen(statsurl).read() stats = json.loads(rawstats) volspace = 0 volspace += int(stats["volume_0"]["free_space"]) volspace += int(stats["volume_1"]["free_space"]) volspace += int(stats["volume_2"]["free_space"]) volspace += int(stats["volume_3"]["free_space"]) return volspace print("Sending messages...") prevspace = get_free_space() batch_size = 1000 for ix, it in enumerate(att.infinite_msg_stream(batch_size, 'temp', tag='test')): chan.send(it) if ix % 1000 == 0: volspace = get_free_space() print("{0} msgs written, free space in the database: {1}".format(ix*batch_size, volspace)) if prevspace < volspace: # free space increased because volume was recycled print("Volume recycle occured") break prevspace = volspace # Read data back if backward direction (cached values should be included) read_in_backward_direction(batch_size) except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5) try: akumulid.serve() time.sleep(5) require_empty_response('evicted') chan = TCPChan(host, tcpport) for it in att.generate_messages(dt + datetime.timedelta(milliseconds=2000), delta, 2000, 'evicted', tag=["foo", "bar"]): # Send next 2000 messages that belongs to evicted series chan.send(it) except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) def get_tags(): for ix in xrange(0, NSERIES): yield {"tag1": "A", "tag2": str(ix)} tags = list(get_tags()) dt = datetime.datetime.utcnow() - (datetime.timedelta(milliseconds=1) * 10) delta = datetime.timedelta(milliseconds=1) try: chan = att.TCPChan(HOST, TCPPORT) print("Sending {0} messages through TCP...".format(10 * NSERIES)) # Send 10 messages for each series in the set for ix, it in enumerate( att.generate_messages5(dt, delta, 10, 'test', tags)): chan.send(it) if ix % 100000 == 0: print("{0} series created".format(ix)) chan.close() time.sleep(15) # kill process akumulid.terminate() except: traceback.print_exc() akumulid.terminate() sys.exit(1) finally: print("Server terminated") print("Starting recovery...") akumulid.serve() while True: try: # Wait until server will respond to stas query # which mean that the recovery is completed. statsurl = "http://{0}:{1}/api/stats".format(HOST, HTTPPORT) _ = urllib.urlopen(statsurl).read() except: time.sleep(1) continue break print("Recovery completed") try: test_metadata("test", tags) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: chan = att.TCPChan(HOST, TCPPORT) # fill data in dt = datetime.datetime.utcnow().replace(second=0, microsecond=0) delta = datetime.timedelta(milliseconds=1) nmsgs = 100000 print("Sending {0} messages through TCP...".format(nmsgs)) tags = { "tag1": ['A'], "tag2": ['B', 'C', 'D'], } values = [float(x) for x in range(-100, 100)] for it in att.generate_messages4(dt, delta, nmsgs, 'col1', values, **tags): chan.send(it) for it in att.generate_messages4(dt, delta, nmsgs, 'col2', values, **tags): chan.send(it) time.sleep(5) # wait untill all messagess will be processed test_filter_query_forward('col1', dt, delta, [-20, 20], nmsgs) test_filter_query_backward('col1', dt, delta, [-20, 20], nmsgs) test_filter_query_forward_by_time('col1', dt, delta, [-20, 20], nmsgs) test_filter_query_backward_by_time('col1', dt, delta, [-20, 20], nmsgs) test_filter_query_empty('col1', dt, delta, nmsgs) test_join_query_forward(['col1', 'col2'], [[-20, 20], [40, 60]], dt, delta, nmsgs) test_join_query_backward(['col1', 'col2'], [[-20, 20], [40, 60]], dt, delta, nmsgs) test_join_query_forward_by_time(['col1', 'col2'], [[-20, 20], [40, 60]], dt, delta, nmsgs) test_join_query_backward_by_time(['col1', 'col2'], [[-20, 20], [40, 60]], dt, delta, nmsgs) test_group_aggregate_all_forward('col1', dt, delta, nmsgs, 20000, "all") test_group_aggregate_all_forward('col1', dt, delta, nmsgs, 10000, "all") test_group_aggregate_all_forward('col1', dt, delta, nmsgs, 5000, "all") test_group_aggregate_all_forward('col1', dt, delta, nmsgs, 20000, "any") test_group_aggregate_all_forward('col1', dt, delta, nmsgs, 10000, "any") test_group_aggregate_all_forward('col1', dt, delta, nmsgs, 5000, "any") except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) def get_tags(): for ix in xrange(0, NSERIES): yield { "tag1": "A", "tag2": str(ix) } tags = list(get_tags()) dt = datetime.datetime.utcnow() - (datetime.timedelta(milliseconds=1)*10) delta = datetime.timedelta(milliseconds=1) try: chan = att.TCPChan(HOST, TCPPORT) print("Sending {0} messages through TCP...".format(10*NSERIES)) # Send 10 messages for each series in the set for ix, it in enumerate(att.generate_messages5(dt, delta, 10, 'test', tags)): chan.send(it) if ix % 100000 == 0: print("{0} series created".format(ix)) chan.close() time.sleep(15) # kill process akumulid.terminate() except: traceback.print_exc() akumulid.terminate() sys.exit(1) finally: print("Server terminated") print("Starting recovery...") akumulid.serve() while True: try: # Wait until server will respond to stas query # which mean that the recovery is completed. statsurl = "http://{0}:{1}/api/stats".format(HOST, HTTPPORT) _ = urllib.urlopen(statsurl).read() except: time.sleep(1) continue break print("Recovery completed") try: test_metadata("test", tags) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: chan = att.TCPChan(HOST, TCPPORT) # fill data in dt = datetime.datetime.utcnow().replace(second=0, microsecond=0) delta = datetime.timedelta(milliseconds=1) nmsgs = 100000 print("Sending {0} messages through TCP...".format(nmsgs)) tags = { "tag1": ['A'], "tag2": ['B', 'C', 'D'], } values = [float(x) for x in range(-100, 100)] for it in att.generate_messages4(dt, delta, nmsgs, 'col1', values, **tags): chan.send(it) for it in att.generate_messages4(dt, delta, nmsgs, 'col2', values, **tags): chan.send(it) time.sleep(5) # wait untill all messagess will be processed test_filter_query_forward('col1', dt, delta, [-20, 20], nmsgs) test_filter_query_backward('col1', dt, delta, [-20, 20], nmsgs) test_filter_query_forward_by_time('col1', dt, delta, [-20, 20], nmsgs) test_filter_query_backward_by_time('col1', dt, delta, [-20, 20], nmsgs) test_filter_query_empty('col1', dt, delta, nmsgs) test_join_query_forward(['col1', 'col2'], [[-20, 20], [40, 60]], dt, delta, nmsgs) test_join_query_backward(['col1', 'col2'], [[-20, 20], [40, 60]], dt, delta, nmsgs) test_join_query_forward_by_time(['col1', 'col2'], [[-20, 20], [40, 60]], dt, delta, nmsgs) test_join_query_backward_by_time(['col1', 'col2'], [[-20, 20], [40, 60]], dt, delta, nmsgs) test_group_aggregate_all_forward('col1', dt, delta, nmsgs, 20000, "all") test_group_aggregate_all_forward('col1', dt, delta, nmsgs, 10000, "all") test_group_aggregate_all_forward('col1', dt, delta, nmsgs, 5000, "all") test_group_aggregate_all_forward('col1', dt, delta, nmsgs, 20000, "any") test_group_aggregate_all_forward('col1', dt, delta, nmsgs, 10000, "any") test_group_aggregate_all_forward('col1', dt, delta, nmsgs, 5000, "any") except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5)
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) try: chan = att.TCPChan(HOST, TCPPORT) # fill data in dt = datetime.datetime.utcnow().replace(second=0, microsecond=0) delta = datetime.timedelta(milliseconds=1) nmsgs = 100000 print("Sending {0} messages through TCP...".format(nmsgs)) tags = { "tag1": ['A'], "tag2": ['B', 'C'], "tag3": ['D', 'E', 'F', 'G', 'H'], } for it in att.generate_messages(dt, delta, nmsgs, 'test', **tags): chan.send(it) time.sleep(5) # wait untill all messagess will be processed # Test normal operation test_read_all_in_backward_direction(dt, delta, nmsgs) test_group_by_tag_in_backward_direction(dt, delta, nmsgs) test_where_clause_in_backward_direction(dt, delta, nmsgs) test_where_clause_with_groupby_in_backward_direction(dt, delta, nmsgs) test_metadata_query(tags) test_read_in_forward_direction(dt, delta, nmsgs) test_late_write(dt, delta, nmsgs, chan) test_aggregate_all(dt, delta, nmsgs) test_aggregate_last(dt, delta, nmsgs) test_aggregate_last_timestamp(dt, delta, nmsgs) test_aggregate_all_group_by(dt, delta, nmsgs) test_aggregate_where(dt, delta, nmsgs) test_group_aggregate_all_forward (dt, delta, nmsgs, 10) test_group_aggregate_all_forward (dt, delta, nmsgs, 100) test_group_aggregate_all_forward (dt, delta, nmsgs, 1000) test_group_aggregate_all_forward (dt, delta, nmsgs, 100000) test_group_aggregate_all_backward(dt, delta, nmsgs, 10) test_group_aggregate_all_backward(dt, delta, nmsgs, 100) test_group_aggregate_all_backward(dt, delta, nmsgs, 1000) test_group_aggregate_all_backward(dt, delta, nmsgs, 100000) # Test error handling select_from_nonexistent_metric(dt, delta, nmsgs) aggregate_nonexistent_metric(dt, delta, nmsgs) group_aggregate_nonexistent_metric(dt, delta, nmsgs) join_nonexistent_metrics(dt, delta, nmsgs) select_nonexistent_time_range(dt, delta, nmsgs) aggregate_nonexistent_time_range(dt, delta, nmsgs) group_aggregate_nonexistent_time_range(dt, delta, nmsgs) join_nonexistent_time_range(dt, delta, nmsgs) check_bad_query_handling() finally: print("Stopping server...") akumulid.stop() time.sleep(5) on_exit()
def main(path): akumulid = att.create_akumulid(path) # Reset database akumulid.delete_database() akumulid.create_database() # start ./akumulid server print("Starting server...") akumulid.serve() time.sleep(5) nmsgs = 100000 dt = datetime.datetime.utcnow() - (datetime.timedelta(milliseconds=1)*nmsgs) delta = datetime.timedelta(milliseconds=1) try: chan = att.TCPChan(HOST, TCPPORT) # fill data in print("Sending {0} messages through TCP...".format(nmsgs)) tags = { "tag1": ['A'], "tag2": ['B', 'C'], "tag3": ['D', 'E', 'F', 'G', 'H'], } for it in att.generate_messages(dt, delta, nmsgs, 'test', **tags): chan.send(it) time.sleep(5) # wait untill all messagess will be processed print("Trying to close channel") chan.close() test_read_all(dt, delta, nmsgs) except: traceback.print_exc() sys.exit(1) finally: print("Stopping server...") akumulid.stop() time.sleep(5) print("Server stopped") print("Starting server...") akumulid.serve() time.sleep(5) print("Server started") try: test_read_all(dt, delta, nmsgs) # Try to write new data dt = datetime.datetime.utcnow() chan = att.TCPChan(HOST, TCPPORT) # fill data in print("Sending {0} messages through TCP second time...".format(nmsgs)) tags = { "tag1": ['A'], "tag2": ['B', 'C'], "tag3": ['D', 'E', 'F', 'G', 'H'], } for it in att.generate_messages(dt, delta, nmsgs, 'test', **tags): chan.send(it) time.sleep(5) test_read_all(dt, delta, nmsgs) finally: print("Stopping server...") akumulid.stop() time.sleep(5)