def check_start_no_conf(): filter = Filter(filter_name="logs") filter.valgrind_start() sleep(2) if not access(filter.pid, F_OK): return True logging.error("check_start_wrong_conf: Process running with wrong configuration") return False
def check_socket_monitor_connection(): filter = Filter(filter_name="logs") pid = -1 filter.configure(FLOGS_CONFIG) filter.valgrind_start() try: with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s: s.connect(filter.monitor) data = s.recv(4096).decode() s.close() if RESP_MON_STATUS_RUNNING not in data: logging.error( "check_socket_monitor_connection: Wrong response; got {}". format(data)) return False except Exception as e: logging.error( "check_socket_monitor_connection: Error connecting to socket: {}". format(e)) return False filter.stop() return True
def simple_master_server(): master = RedisServer(unix_socket=REDIS_SOCKET_PATH) filter = Filter(filter_name='test') filter.configure(FTEST_CONF_TEMPLATE) filter.valgrind_start() try: filter.send_single(REDIS_LIST_TRIGGER) except Exception as e: logging.error( "simple_master_server: Could not connect to test filter: {}". format(e)) return False sleep(1) with master.connect() as redis_connection: num_list_entries = redis_connection.llen(REDIS_LIST_NAME) if num_list_entries != 1: logging.error( "simple_master_server: wrong number of entries in the redis list {}: " + "expected 1 but got {}".format(REDIS_LIST_NAME, str(num_list_entries))) return False return True
def check_pid_file(): filter = Filter(filter_name="logs") pid = -1 filter.configure(FLOGS_CONFIG) filter.valgrind_start() try: with open(filter.pid) as f: pid = int(f.readline()) except Exception as e: logging.error("check_pid: Unable to read pid file: {}".format(e)) return False try: kill(pid, 0) except OSError as e: logging.error("check_pid: Process {} not running: {}".format(pid, e)) return False filter.stop() if access(filter.pid, F_OK): logging.error("check_pid: PID file not deleted") return False return True
def master_replica_discovery_rate_limiting(): master = RedisServer(address="127.0.0.1", port=1234) replica = RedisServer(unix_socket=REDIS_SOCKET_PATH, master=master) filter = Filter(filter_name='test', nb_threads=5) filter.configure(FTEST_CONF_TEMPLATE) filter.start() try: # success filter.send_single(REDIS_LIST_TRIGGER) except Exception as e: logging.error( "master_replica_discovery_rate_limiting: Could not connect to test filter: {}" .format(function_name, e)) return False # master shuts down master.stop() #brutal def thread_brute_time(filter, time_end): end = time() + time_end while time() < end: # accelerate throughput by avoiding constant calls to time() for i in range(0, 9): try: filter.send_single(REDIS_LIST_TRIGGER) except: return False return True thread_list = [] for num in range(0, 5): thread_list.append( threading.Thread(target=thread_brute_time, args=(filter, 9))) # ought to crash if command fails initial_connections_num = replica.connect().info( )['total_connections_received'] for thread in thread_list: thread.start() for thread in thread_list: thread.join() # new generated connections generated, minus the one generated by the call to get it new_connections = replica.connect().info( )['total_connections_received'] - initial_connections_num - 1 if new_connections > 10: logging.error( "master_replica_discovery_rate_limiting: Wrong number of new connection attempts, " + "was supposed to have 10 new at most, but got ".format( new_connections)) return False return True
def master_replica_transfer(function_name, healthcheck): master = RedisServer(address="127.0.0.1", port=1234) replica = RedisServer(address="127.0.0.1", port=1235, unix_socket=REDIS_SOCKET_PATH, master=master) filter = Filter(filter_name='test') filter.configure(FTEST_CONF_TEMPLATE) filter.valgrind_start() try: # success filter.send_single(REDIS_LIST_TRIGGER) except Exception as e: logging.error("{}: Could not connect to test filter: {}".format( function_name, e)) return False # master becomes replica with master.connect() as master_connection: master_connection.slaveof(replica.address, replica.port) #replica becomes master with replica.connect() as replica_connection: replica_connection.slaveof() sleep(1) if healthcheck: sleep(8) try: # success return_code = filter.send_single(REDIS_LIST_TRIGGER) except Exception as e: logging.error("{}: Could not connect to test filter: {}".format( function_name, e)) return False if return_code != 0: logging.error( "{}: Filter didn't return correct code, " + "waited for 0 but got {}".format(function_name, return_code)) return False with replica.connect() as new_master_connection: num_entries = new_master_connection.llen(REDIS_LIST_NAME) if num_entries != 2: logging.error("{}: Wrong number of entries in {}, " + "expected 2 but got {}".format( function_name, REDIS_LIST_NAME, num_entries)) return False return True
def check_start_invalid_cache_num(): filter = Filter(filter_name="logs", cache_size="General") filter.configure(FLOGS_CONFIG) filter.valgrind_start() sleep(0.5) if filter.check_start(): logging.error( "check_start_invalid_cache_num: Process started when cache size was invalid" ) filter.stop() return False return True
def check_start_invalid_threshold_num(): filter = Filter(filter_name="logs", thresold="Kenobi") filter.configure(FLOGS_CONFIG) filter.valgrind_start() sleep(0.5) if filter.check_start(): logging.error( "check_start_invalid_threshold_num: Process started when threshold was invalid" ) filter.stop() return False return True
def check_start_invalid_thread_num(): filter = Filter(filter_name="logs", nb_threads="HelloThere") filter.configure(FLOGS_CONFIG) filter.valgrind_start() sleep(0.5) if filter.check_start(): logging.error( "check_start_invalid_thread_num: Process started when thread number was invalid" ) filter.stop() return False return True
def check_start_outbound_thread_num(): filter = Filter(filter_name="logs", nb_threads="314159265358979323846264338327") filter.configure(FLOGS_CONFIG) filter.valgrind_start() sleep(0.5) if filter.check_start(): logging.error( "check_start_outbound_thread_num: Process started when thread number was out of bounds" ) filter.stop() return False return True
def check_start_outbound_cache_num(): filter = Filter(filter_name="logs", cache_size="950288419716939937510582097494") filter.configure(FLOGS_CONFIG) filter.valgrind_start() sleep(0.5) if filter.check_start(): logging.error( "check_start_outbound_cache_num: Process started when cache size was out of bounds" ) filter.stop() return False return True
def check_start_outbound_threshold_num(): filter = Filter(filter_name="logs", thresold="459230781640628620899862803482") filter.configure(FLOGS_CONFIG) filter.valgrind_start() sleep(0.5) if filter.check_start(): logging.error( "check_start_outbound_threshold_num: Process started when threshold was out of bounds" ) filter.stop() return False return True
def check_socket_connection(): filter = Filter(filter_name="logs") pid = -1 filter.configure(FLOGS_CONFIG) filter.valgrind_start() try: api = DarwinApi(socket_path=filter.socket, socket_type="unix") api.call("test\n", filter_code="logs", response_type="back") api.close() except Exception as e: logging.error("check_socket_connection_back: Error connecting to socket: {}".format(e)) return False filter.stop() return True
def check_socket_create_delete(): filter = Filter(filter_name="logs") pid = -1 filter.configure(FLOGS_CONFIG) filter.valgrind_start() if not access(filter.socket, F_OK): logging.error("check_socket_create_delete: Socket file not accesible") return False filter.stop() if access(filter.socket, F_OK): logging.error("check_socket_create_delete: Socket file not deleted") return False return True
def check_start_stop(): filter = Filter(filter_name="logs") filter.configure(FLOGS_CONFIG) filter.valgrind_start() try: kill(filter.process.pid, 0) except OSError as e: logging.error("check_start_stop: Process {} not running: {}".format(filter.process.pid, e)) return False if filter.stop() is not True: return False return True
def master_replica(): master = RedisServer(address="127.0.0.1", port=1234) replica = RedisServer(address="127.0.0.1", port=1235, unix_socket=REDIS_SOCKET_PATH, master=master) filter = Filter(filter_name='test') filter.configure(FTEST_CONF_TEMPLATE) filter.valgrind_start() master.channel_subscribe(REDIS_CHANNEL_NAME) try: filter.send_single(REDIS_CHANNEL_TRIGGER) except Exception as e: logging.error( "master_replica: Could not connect to test filter: {}".format(e)) return False sleep(1) message = master.channel_get_message() if message is '': logging.error( "master_replica: expected to get a message in channel {} " + "but got nothing".format(REDIS_CHANNEL_NAME)) return False if message != REDIS_CHANNEL_TRIGGER: logging.error( "master_replica: expected to get a message in channel {} saying '{}' " + "but got '{}' instead".format(REDIS_CHANNEL_NAME, REDIS_CHANNEL_TRIGGER, message)) return False return True
def multi_thread_master(): master = RedisServer(unix_socket=REDIS_SOCKET_PATH) filter = Filter(filter_name='test', nb_threads=5) filter.configure(FTEST_CONF_TEMPLATE) filter.valgrind_start() thread_list = [] def thread_brute(filter, count_log): for count in range(0, count_log): try: filter.send_single(REDIS_LIST_TRIGGER) except: return False return True for num in range(0, 5): thread_list.append( threading.Thread(target=thread_brute, args=(filter, 500))) for thread in thread_list: thread.start() for thread in thread_list: thread.join() sleep(1) number = master.get_number_of_connections() # 5 threads if number != 5: logging.error( "multi_thread_master: wrong number of active connections: expected 5 but got " + str(number)) return False return True
from tools.oss2 import OSS2 from kafka_adapter import kafka_adapter bot = Bot(cache_path=True, console_qr=cfg.getint('CONSOLE_QR'), logout_callback=sys.exit) map = bot.enable_puid() itchat = bot.core bot.groups(update=False, contact_only=False) # groups = bot.groups() # for group_tmp in groups: # group_tmp.update_group(True) oss2 = OSS2(cfg['ACCESS_KEY_ID'], cfg['ACCESS_KEY_SECRET']) filter = Filter() @bot.register(except_self=False) def save_messages(msg): # 获取城市、签名、省份需 以下两行代码 # msg.chat.update_group(True) send_time = msg.create_time # 消息发送时间 send_timestamp = int(send_time.timestamp() * 1000) print(msg) if msg is None or msg.type is None or msg.member is None: return member = msg.member print(msg)
def thread_working_test(): ret = True config_buffer = '{{' \ '"redis_socket_path": "{redis_socket}",' \ '"input_format": [' \ '{{"name": "net_src_ip", "type": "string"}},' \ '{{"name": "net_dst_ip", "type": "string"}},' \ '{{"name": "net_dst_port", "type": "string"}},' \ '{{"name": "ip_proto", "type": "string"}}' \ '],' \ '"outputs": [' \ '{{' \ '"filter_type": "fanomaly",' \ '"filter_socket_path": "/tmp/anomaly.sock",' \ '"interval": 10,' \ '"required_log_lines": 5,' \ '"redis_lists": [{{' \ '"source": "",' \ '"name": "darwin_buffer_anomaly"' \ '}}]' \ '}}' \ ']' \ '}}'.format(redis_socket=REDIS_SOCKET) config_test = '{{' \ '"redis_socket_path": "{redis_socket}",' \ '"alert_redis_list_name": "{redis_alert}",' \ '"log_file_path": "/var/log/darwin/alerts.log",' \ '"alert_redis_channel_name": "darwin.alerts"' \ '}}'.format(redis_socket=REDIS_SOCKET, redis_alert=REDIS_ALERT_LIST) # CONFIG buffer_filter = Buffer() buffer_filter.configure(config_buffer) test_filter = Filter(filter_name="anomaly", socket_path="/tmp/anomaly.sock") test_filter.configure(config_test) # START FILTER if not buffer_filter.valgrind_start(): return False if not test_filter.start(): print("Anomaly did not start") return False # SEND TEST darwin_api = DarwinApi( socket_path=buffer_filter.socket, socket_type="unix", ) data = buffer_filter.get_test_data() darwin_api.bulk_call( data, response_type="back", ) # We wait for the thread to activate sleep(15) redis_data = buffer_filter.get_internal_redis_set_data( "darwin_buffer_anomaly") if redis_data != set(): logging.error( "thread_working_test : Expected no data in Redis but got {}". format(redis_data)) ret = False # CLEAN darwin_api.close() # ret = buffer_filter.valgrind_stop() or buffer_filter.valgrind_stop() # would erase upper ret if this function return True if not buffer_filter.valgrind_stop(): ret = False return ret
def fanomaly_connector_and_send_test(): test_name = "fanomaly_connector_and_send_test" ret = True config_test = '{{' \ '"redis_socket_path": "{redis_socket}",' \ '"alert_redis_list_name": "{redis_alert}",' \ '"log_file_path": "/var/log/darwin/alerts.log",' \ '"alert_redis_channel_name": "darwin.alerts"' \ '}}'.format(redis_socket=REDIS_SOCKET, redis_alert=REDIS_ALERT_LIST) config_buffer = '{{' \ '"redis_socket_path": "{redis_socket}",' \ '"input_format": [' \ '{{"name": "net_src_ip", "type": "string"}},' \ '{{"name": "net_dst_ip", "type": "string"}},' \ '{{"name": "net_dst_port", "type": "string"}},' \ '{{"name": "ip_proto", "type": "string"}}' \ '],' \ '"outputs": [' \ '{{' \ '"filter_type": "fanomaly",' \ '"filter_socket_path": "/tmp/anomaly.sock",' \ '"interval": 10,' \ '"required_log_lines": 5,' \ '"redis_lists": [{{' \ '"source": "",' \ '"name": "darwin_buffer_test"' \ '}}]' \ '}}' \ ']' \ '}}'.format(redis_socket=REDIS_SOCKET) # CONFIG buffer_filter = Buffer() buffer_filter.configure(config_buffer) test_filter = Filter(filter_name="anomaly", socket_path="/tmp/anomaly.sock") test_filter.configure(config_test) # START FILTER if not buffer_filter.valgrind_start(): print("Buffer did not start") return False if not test_filter.start(): print("Anomaly did not start") return False # SEND TEST data = buffer_filter.get_test_data() darwin_api = DarwinApi(socket_path=buffer_filter.socket, socket_type="unix") darwin_api.bulk_call( data, response_type="back", ) sleep(15) # GET REDIS DATA AND COMPARE redis_data = buffer_filter.get_internal_redis_list_data(REDIS_ALERT_LIST) expected_data = '"details": {"ip": "192.168.110.2","udp_nb_host": 1.000000,"udp_nb_port": 252.000000,"tcp_nb_host": 0.000000,"tcp_nb_port": 0.000000,"icmp_nb_host": 0.000000,"distance": 246.193959}' if len(redis_data) != 1: logging.error("{}: Expecting a single element list.".format(test_name)) ret = False redis_data = [a.decode() for a in redis_data] if (expected_data not in redis_data[0]): logging.error("{}: Expected this data : {} but got {} in redis".format( test_name, expected_data, redis_data)) ret = False # CLEAN darwin_api.close() test_filter.clean_files() # ret = buffer_filter.valgrind_stop() or buffer_filter.valgrind_stop() # would erase upper ret if this function return True if not buffer_filter.valgrind_stop(): ret = False if not test_filter.stop(): ret = False return ret
def master_replica_master_temp_fail(): master = RedisServer(address="127.0.0.1", port=1234) replica = RedisServer(address="127.0.0.1", unix_socket=REDIS_SOCKET_PATH, master=master) filter = Filter(filter_name='test') filter.configure(FTEST_CONF_TEMPLATE) filter.valgrind_start() try: # success filter.send_single(REDIS_LIST_TRIGGER) except Exception as e: logging.error( "master_replica_master_temp_fail: Could not connect to test filter: {}" .format(e)) return False master.stop() sleep(1) try: # failure filter.send_single(REDIS_LIST_TRIGGER) except Exception as e: logging.error( "master_replica_master_temp_fail: Could not connect to test filter: {}" .format(e)) return False sleep(1) if not filter.check_run(): logging.error( "master_replica_master_temp_fail: filter seems to have crashed when master Redis got offline" ) return False master.start() # rate limiting will prevent to reconnect immediately after failure, so there should have a wait # (rate limiting is not tested here) sleep(8) try: # success filter.send_single(REDIS_LIST_TRIGGER) except Exception as e: logging.error( "master_replica_master_temp_fail: Could not connect to test filter: {}" .format(e)) return False with master.connect() as master_connection: num_list_entries = master_connection.llen(REDIS_LIST_NAME) if num_list_entries != 1: logging.error( "master_replica_master_temp_fail: wrong number of entries in the redis list {}: " + "expected 1 but got {}".format(REDIS_LIST_NAME, num_list_entries)) return False return True