def test_simple_config_in_period(tmp_file, config, another_config): tmp_file.write(config) def async_update(): time.sleep(2) tmp_file.write(another_config) run_in_thread(async_update) loader = FileLoader("loader", str(tmp_file)) parser = PropertiesParser("parser") c = Config(loader, parser) c.load_config(sync=True) # first time assert c.get_value("a") == "1" assert c.get_value("b") == "test" assert c.get_int("a") == 1 assert c.get_string("a") == "1" assert c.get_boolean("c") time.sleep(5) # second time assert c.get_value("a") == "1" assert c.get_value("b") == "test" assert c.get_int("a") == 1 assert c.get_string("a") == "1" assert c.get_boolean("c")
def load_config(self, sync=False): """ load config. Be invoked at the first time or the config has been invalidated. :param sync: whether this is a synchronous invocation """ if self.has_loaded: return if self.is_loading: # some one has already loaded if not sync: return else: attempts = 0 while self.is_loading and attempts < 3: attempts += 1 time.sleep(0.5) if attempts >= 3: raise RuntimeError("fail to load config") # should be loaded successfully return self.is_loading = True if sync: self._load_config() else: run_in_thread(self._load_config)
def test_callback_config_item_in_period(tmp_file, config, another_config): tmp_file.write(config) def async_update(): time.sleep(2) tmp_file.write(another_config) loader = FileLoader("loader", str(tmp_file)) parser = PropertiesParser("parser") tmp_file.write(config) c = Config(loader, parser) pc = PeriodicalConfig(c, 3) item = pc.item("a", caching=1, cb_load=lambda x: int(x) + 1) pc.load_config(sync=True) run_in_thread(async_update) # first time assert pc.get_value("a") == "1" assert item.get() == 2 time.sleep(5) # second time pc.get_value("a") # trigger update time.sleep(1) assert pc.get_value("a") == "2" assert item.get() == 3
def load_config(self, sync=False): def _load(): for config in self.configs: config.load_config(sync=True) if sync: _load() else: run_in_thread(_load)
def __init__(self, dc, exchange, exchange_type, **kwargs): PikaQueue.__init__(self, dc, exchange, exchange_type, None, None, **kwargs) self.max_queue_size = kwargs.get("max_queue_size") self.lazy_limit = kwargs.get("lazy_limit") if self.queue and self.lazy_limit and self.max_queue_size: self.qsize_diff_limit = int(self.max_queue_size * 0.1) else: self.qsize_diff_limit = 0 self.qsize_diff = 0 self.local2server = Queue.Queue() #add by wxt 2015-12-16 解耦 run_in_thread(self.put_to_server) # add by wxt 2015-12-16
def start(self): if self.running: return self.logger.info("start tshark driver on interface %s for ports %s, with bpf filter %s", self.interface, self.ports, self.bpf_filter) self.running = True self.data_mr = MetricsRecorder("sniffer.driver.data") self.error_mr = MetricsRecorder("sniffer.driver.error") port_filter = " or ".join(["tcp port {}".format(port) for port in self.ports]) if self.bpf_filter: port_filter = "({}) and ({})".format(port_filter, self.bpf_filter) tshark_home = get_tshark_home() if not tshark_home: raise RuntimeError("tshark is not find") self.logger.info("find tshark at %s", tshark_home) command = (is_linux() and "sudo " or "") + """%(tshark_home)s/tshark -o ssl.desegment_ssl_application_data:TRUE -o ssl.desegment_ssl_records:TRUE -o ssl.keys_list:"0.0.0.0","443","http","/home/threathunter/private.key" -f "%(port_filter)s" -i %(interface)s -Y "http.request or http.response" -T fields -Eseparator=/t -e http -e http.request -e ip.src -e tcp.srcport -e ip.dst -e tcp.dstport -e http.request.method -e http.host -e http.request.uri -e http.request.full_uri -e http.user_agent -e http.content_length -e http.content_type -e http.response.code -e http.response.phrase -e http.content_encoding -e http.cookie -e http.set_cookie -e http.referer -e data.data -e text """ % ({"tshark_home": tshark_home, "port_filter": port_filter, "interface": self.interface}) environments = dict() environments["PCAP_PF_RING_CLUSTER_ID"] = "14" environments["PCAP_PF_RING_APPNAME"] = "tshark-" + self.interface environments["PCAP_PF_RING_USE_CLUSTER_PER_FLOW_4_TUPLE"] = "1" environments["LD_LIBRARY_PATH"] = "/usr/local/lib64" self.logger.info("start tshark command: %s", command) self.sub_task = pexpect.spawn(command, env=environments, timeout=3600) import atexit atexit.register(self.stop) # establish client gevent.sleep(2) self.client_task = run_in_thread(self.process_input) return
def get_current_generators(): """ 获取当前所有的日志解析 :return: """ now = millis_now() # 初始化 if last_update_ts == 0: load_parsers() if now - last_update_ts >= 30000: run_in_thread(load_parsers()) return current_generators
def get_event_schema(event_name): """ 获取某个事件的schema,返回为一个字典{字段名称:字段属性} :param event_name: :return: """ now = millis_now() # 初始化 if last_update_ts == 0: load_event_schemas() if now - last_update_ts >= 30000: run_in_thread(load_event_schemas) return event_schema_dictionary.get(event_name)
def start(self, func=None, sync=False): if not self.func: self.func = func self.running = True self._receiver.start_consuming() if sync: self.accept() else: self.accept_task = run_in_thread(self.accept)
def test_config_item_helper_methods(tmp_file, config, another_config): tmp_file.write(config) def async_update(): time.sleep(2) tmp_file.write(another_config) loader = FileLoader("loader", str(tmp_file)) parser = PropertiesParser("parser") tmp_file.write(config) c = Config(loader, parser) pc = PeriodicalConfig(c, 3) int_item = pc.int_item("int_key", caching=1) str_item = pc.str_item("str_key", caching=1) list_item = pc.list_item("list_key", caching=1) boolean_true_item = pc.boolean_item("boolean_true_key") boolean_false_item = pc.boolean_item("boolean_false_key") pc.load_config(sync=True) # first batch of assert assert int_item.get() == 1 assert str_item.get() == "aa" assert list(list_item.get()) == list(["1", "2", "3"]) assert boolean_true_item.get() assert not boolean_false_item.get() run_in_thread(async_update) time.sleep(5) pc.get_value("a") # trigger update time.sleep(1) assert int_item.get() == 10 assert str_item.get() == "test" assert list(list_item.get()) == list(["a", "b", "c"]) assert boolean_true_item.get() assert not boolean_false_item.get()
def start_consuming(self): """This method sets up the consumer by first calling add_on_cancel_callback so that the object is notified if RabbitMQ cancels the consumer. It then issues the Basic.Consume RPC command which returns the consumer tag that is used to uniquely identify the consumer with RabbitMQ. We keep the value to use it when we want to cancel consuming. The on_message method is passed in as a callback pika will invoke when a message is fully received """ # self.channel.add_on_cancel_callback(self.on_consumer_cancelled) self.running = True self.consumer_tag = self.channel.basic_consume(self.on_message, queue=self.queue) self.consume_task = run_in_thread(self.backend_consuming)
def start(): from complexconfig.configcontainer import configcontainer sniffer_config = configcontainer.get_config("sniffer") running_tasks = [] running_drivers = [] processes_type = sniffer_config.get_string("sniffer.processes.type") sources = sniffer_config.get_list('sniffer.sources') logger.info('sources: {}'.format(sources)) for source in sources: source_config = sniffer_config.get_value("sniffer." + source) instances = source_config.get('instances', 1) parser_name = source_config['parser']['name'] parser_module = source_config['parser']['module'] interface = source_config["interface"] p = get_parser(parser_name, parser_module) for idx in range(1, instances+1): driver = get_driver(source_config, interface, p, idx) if processes_type == "process": # 获取到驱动并开启子进程进行数据处理 task = run_in_subprocess(run_task, interface, idx, p, driver, True) else: task = run_in_thread(run_task, interface, idx, p, driver, False) running_tasks.append(task) running_drivers.append(driver) logger.warn("Finished starting source {} driver {} index {} on interface {}".format(source, driver, idx, interface)) def terminate(): logger.warn("finish produce") Produce.stop() logger.warn("finish %d drivers", len(running_drivers)) for d in running_drivers: try: d.stop() except: pass logger.warn("finish %d tasks", len(running_tasks)) for t in running_tasks: if processes_type == "process": try: t.terminate() except: pass else: # daemon threads pass atexit.register(terminate) from threathunter_common.util import millis_now start_time = millis_now() while True: try: gevent.sleep(5) is_all_alive = True for t in running_tasks: if processes_type == "process": if not t.is_alive(): is_all_alive = False break else: if not t.isAlive(): is_all_alive = False break ttl = sniffer_config.get_int("sniffer.ttl", 5) * 1000 if (millis_now() - start_time) > ttl: logger.warn("ttl has expire") break if not is_all_alive: logger.warn("some tasks has exited, exiting") break except Exception as err: logger.error("meet error {}, exit sniffer and wait for rebooting".format(err)) break logger.warn("exiting sniffer") terminate() print "terminating"
def start(self): self.consumer = KafkaConsumer(self.topics,**self.config) self.bg_task = run_in_thread(self.bg_processing)
def start_consuming(self): self.running = True latch = CountDownLatch() self.consume_task = run_in_thread(self.consume_task, latch) latch.wait()
def start_consuming(self): self.running = True self.consume_task = run_in_thread(self.consume_task)
def start(self): if self._receiver: self._receiver.start_consuming() self.response_task = run_in_thread(self.process_mails)