def init_autoparser(): from nebula_parser.parser_initializer import init_parser, build_fn_load_event_schemas_on_web, \ build_fn_load_parsers_on_web from complexconfig.configcontainer import configcontainer event_url = configcontainer.get_config("sniffer").get_string("sniffer.web_config.event_url") parser_url = configcontainer.get_config("sniffer").get_string("sniffer.web_config.parser_url") auth = configcontainer.get_config("sniffer").get_string("sniffer.web_config.auth") fn_load_event_schema = build_fn_load_event_schemas_on_web(event_url, auth) fn_load_parsers = build_fn_load_parsers_on_web(parser_url, auth) init_parser(fn_load_event_schema, fn_load_parsers) print_with_time("successfully init auto parsers, event from: {}, parser from: ".format(event_url, parser_url))
def init_redis(): from complexconfig.configcontainer import configcontainer host = configcontainer.get_config("sniffer").get_string("sniffer.redis.host") port = configcontainer.get_config("sniffer").get_int("sniffer.redis.port") password = configcontainer.get_config("sniffer").get_string("sniffer.redis.password", "") from threathunter_common.redis.redisctx import RedisCtx RedisCtx.get_instance().host = host RedisCtx.get_instance().port = port RedisCtx.get_instance().password = password print_with_time("successfully init redis[host={},port={},password={}]".format(host, port, "*"*len(password)))
def __init__(self, interface="lo0", ports=(80, 8080, 8443), key_place="/Users/lw/sslprivatekey/server.key.unencrypted", bpf_filter=None): Driver.__init__(self) self.ports = configcontainer.get_config("sniffer").get_string("filter.traffic.server_ports", "") \ or ports self.ports = expand_ports(self.ports) self.key_place = key_place self.interface = interface self.bpf_filter = bpf_filter self.sub_task = None self.client_task = None self.running = False # cache used for building the http message self.cache = Cache(50000, ttl=30) self.TIMEOUT = 30 # 30s timeout self.last_check = millis_now() self.logger = logging.getLogger("sniffer.tshark.{}".format(interface)) self.data_mr = None self.error_mr = None self.fixed_tags = { "ports": str(self.ports), "interface": self.interface }
def init_sentry(): """ init sentry :return: """ from raven import Client from raven.handlers.logging import SentryHandler from raven.conf import setup_logging from complexconfig.configcontainer import configcontainer config = configcontainer.get_config("sniffer") enable = config.get_boolean("sniffer.sentry.enable", False) if not enable: return sentry_level = config.get_string("sniffer.sentry.min_level", "error") sentry_server_name = config.get_string("sniffer.sentry.server_name", "") sentry_dsn = config.get_string("sniffer.sentry.dsn", "") if not sentry_dsn or not sentry_server_name: return print_with_time("sentry is enabled with dsn: {}, server_name: {}, level: {}".format(sentry_dsn, sentry_server_name, sentry_level)) client = Client(sentry_dsn, name=sentry_server_name) handler = SentryHandler(client) if sentry_level.lower() == 'debug': handler.level = logging.DEBUG elif sentry_level.lower() == 'info': handler.level = logging.INFO else: handler.level = logging.ERROR setup_logging(handler)
def __init__(self, interface, ports=None, embedded_bro=True, bro_home=None, idx=1, start_port=None, bpf_filter=""): Driver.__init__(self) if ports is None: ports = [80, 81, 1080, 3128, 8000, 8080, 8888, 9001] self.embedded_bro = embedded_bro self.bro_home = get_bro_home(bro_home) self.interface = interface self.bpf_filter = bpf_filter self.logger = settings.init_logging('bro.{}'.format(idx)) self.ports = configcontainer.get_config("sniffer").get_string( "filter.traffic.server_ports", "") or ports self.ports = expand_ports(self.ports) self.idx = idx self.bro_port = start_port + idx self.last_netstat_ts = millis_now() self.sub_task = None self.client_task = None self.last_update = 0 self.filtered_clients = [] self.encrypt_keys = [] self.encrypt_salt = "" self.ep = None self.sub = None self.ss = None self.data_mr = None self.error_mr = None self.running = False
def __run(): timer = 0 from complexconfig.configcontainer import configcontainer sniffer_config = configcontainer.get_config("sniffer") while Produce.running: if timer < Produce.sec: gevent.sleep(1) timer += 1 else: url = sniffer_config.get_string('sniffer.web_config.produce_url', 'http://127.0.0.1:9001/nebula/NebulaStrategy') produce(url) timer = 0
def __init__(self, depth, value, parent, is_leaf, disable_fold): """ 新建一个node :param depth: 节点的高度,root为空,高度0;host在高度1; 往下依次递增 :param value: 该节点的值,url的一部分或者是表示fold的内容(FOLD_FLAG) :param parent: 父节点 :param is_leaf: 是否可以作为一个叶子节点,表示可以作为一个url。is_leaf只是逻辑概念,由于url可以有共同前缀,所以逻辑上的leaf 节点下可能还有节点(对应到别的url) :param disable_fold: 该节点下禁止折叠 """ self.depth = depth self.value = value self.parent = parent self.is_leaf = is_leaf self.disable_fold = disable_fold # max_children: 最多有多少个子节点, 超过就开始折叠 from complexconfig.configcontainer import configcontainer sniffer_config = configcontainer.get_config("sniffer") if self.depth == 0: # as many hosts self.max_children = 1000000 elif self.depth <= 2: self.max_children = sniffer_config.get_int( 'sniffer.urltree.rootnodes.width', 300) else: self.max_children = sniffer_config.get_int( 'sniffer.urltree.leafnodes.width', 30) # 本节点下的普通节点是否折叠 self.is_fold = False # 表示折叠是否是因为配置造成的 self.fold_by_config = False # 表示折叠是否是因为子节点过多 self.fold_by_too_many_children = False # children有两种,ordinary_children表示普通的子节点,数量超过可以折叠掉;nominated_children表示通过配置得到的节点, # 不允许折叠 self.ordinary_children = dict() self.nominated_children = dict() # 访问计数 self.visit_count = 0
def all_py(url): from complexconfig.configcontainer import configcontainer sniffer_config = configcontainer.get_config("sniffer") events_url = sniffer_config.get_string('sniffer.web_config.host', 'http://127.0.0.1:9001/nebula/events') events_url = events_url + '/nebula/events' r = requests.get(events_url) t = r.json() py_name_all = [e.get('name') for e in t.get('values', [])] py_content_all = [] for p in py_name_all: request = nebula_strategy_get(url, p) if request is not None: if request.status_code == 200: j = request.json() py_content_all.append(j) else: pass else: pass return py_content_all
def init_metrics(): from threathunter_common.metrics.metricsagent import MetricsAgent from complexconfig.configcontainer import configcontainer sniffer_config = configcontainer.get_config("sniffer") redis_host = sniffer_config.get_string('sniffer.redis_host') redis_port = sniffer_config.get_int('sniffer.redis_port') if not redis_host or not redis_port: print_with_time("invalid redis configuration") import sys sys.exit(-1) metrics_config = { 'server': 'redis', 'redis': { 'type': 'redis', 'host': redis_host, 'port': redis_port } } MetricsAgent.get_instance().initialize_by_dict(metrics_config) print_with_time("successfully initializing metrics with config {}".format(str(metrics_config)))
def get_driver(config, interface, parser, idx): """ global c """ from complexconfig.configcontainer import configcontainer name = config['driver'] if name == "bro": from nebula_sniffer.drivers.brohttpdriver import BroHttpDriver embedded = config.get("embedded", True) ports = config['ports'] from nebula_sniffer.utils import expand_ports ports = expand_ports(ports) # extend it start_port = int(config['start_port']) bpf_filter = config.get("bpf_filter", "") home = configcontainer.get_config("sniffer").get_string("sniffer.bro.home") if ports and home: driver = BroHttpDriver(interface=interface, embedded_bro=embedded, idx=idx, ports=ports, bro_home=home, start_port=start_port, bpf_filter=bpf_filter) elif ports: driver = BroHttpDriver(interface=interface, embedded_bro=embedded, idx=idx, ports=ports, start_port=start_port, bpf_filter=bpf_filter) elif home: driver = BroHttpDriver(interface=interface, embedded_bro=embedded, idx=idx, bro_home=home, start_port=start_port, bpf_filter=bpf_filter) else: driver = BroHttpDriver(interface=interface, embedded_bro=embedded, idx=idx, start_port=start_port, bpf_filter=bpf_filter) return driver if name == "tshark": from nebula_sniffer.drivers.tsharkhttpsdriver import TsharkHttpsDriver interface = interface ports = config["ports"] bpf_filter = config.get("bpf_filter", "") if ports: driver = TsharkHttpsDriver(interface=interface, ports=ports, bpf_filter=bpf_filter) else: driver = TsharkHttpsDriver(interface=interface, bpf_filter=bpf_filter) return driver if name == "syslog": from nebula_sniffer.drivers.syslogdriver import SyslogDriver port = int(config["port"]) driver = SyslogDriver(port) return driver if name == "packetbeat": from nebula_sniffer.drivers.pktdriver import PacketbeatDriver port = int(config["port"]) driver = PacketbeatDriver(port) return driver if name == "redislist": from nebula_sniffer.drivers.redislistdriver import RedisListDriver host = config["host"] port = int(config['port']) password = config.get('password', '') driver = RedisListDriver(host, port, password) return driver if name == "logstash": from nebula_sniffer.drivers.logstashdriver import LogstashDriver port = int(config['port']) driver = LogstashDriver(port) return driver if name == "rabbitmq": from nebula_sniffer.drivers.rabbitmqdriver import RabbitmqDriver amqp_url = config['amqp_url'] queue_name = config['queue_name'] exchange_name = config['exchange_name'] exchange_type = config['exchange_type'] durable = bool(config['durable']) routing_key = config['routing_key'] driver = RabbitmqDriver(amqp_url, queue_name, exchange_name, exchange_type, durable, routing_key) return driver if name == "kafka": from nebula_sniffer.drivers.kafkadriver import KafkaDriver driver = KafkaDriver(config['topics'], bootstrap_servers=config['bootstrap_servers'], group_id=config['group_id']) return driver return None
def start(): from complexconfig.configcontainer import configcontainer sniffer_config = configcontainer.get_config("sniffer") running_tasks = [] running_drivers = [] processes_type = sniffer_config.get_string("sniffer.processes.type") sources = sniffer_config.get_list('sniffer.sources') logger.info('sources: {}'.format(sources)) for source in sources: source_config = sniffer_config.get_value("sniffer." + source) instances = source_config.get('instances', 1) parser_name = source_config['parser']['name'] parser_module = source_config['parser']['module'] interface = source_config["interface"] p = get_parser(parser_name, parser_module) for idx in range(1, instances+1): driver = get_driver(source_config, interface, p, idx) if processes_type == "process": # 获取到驱动并开启子进程进行数据处理 task = run_in_subprocess(run_task, interface, idx, p, driver, True) else: task = run_in_thread(run_task, interface, idx, p, driver, False) running_tasks.append(task) running_drivers.append(driver) logger.warn("Finished starting source {} driver {} index {} on interface {}".format(source, driver, idx, interface)) def terminate(): logger.warn("finish produce") Produce.stop() logger.warn("finish %d drivers", len(running_drivers)) for d in running_drivers: try: d.stop() except: pass logger.warn("finish %d tasks", len(running_tasks)) for t in running_tasks: if processes_type == "process": try: t.terminate() except: pass else: # daemon threads pass atexit.register(terminate) from threathunter_common.util import millis_now start_time = millis_now() while True: try: gevent.sleep(5) is_all_alive = True for t in running_tasks: if processes_type == "process": if not t.is_alive(): is_all_alive = False break else: if not t.isAlive(): is_all_alive = False break ttl = sniffer_config.get_int("sniffer.ttl", 5) * 1000 if (millis_now() - start_time) > ttl: logger.warn("ttl has expire") break if not is_all_alive: logger.warn("some tasks has exited, exiting") break except Exception as err: logger.error("meet error {}, exit sniffer and wait for rebooting".format(err)) break logger.warn("exiting sniffer") terminate() print "terminating"
global_config_parser = PropertiesParser("global_config_parser") global_config = Config(global_config_loader, global_config_parser) global_config.load_config(sync=True) # init the web config web_config_loader = FileLoader("web_config_loader", Conf_Web_Path) web_config_parser = PropertiesParser("web_config_parser") web_config = Config(web_config_loader, web_config_parser) web_config.load_config(sync=True) # build the cascading config # file config will be updated every half an hour, while the web config # will be updated every 5 minute cascading_config = CascadingConfig(global_config, web_config) configcontainer.set_config("nebula", cascading_config) _config = configcontainer.get_config("nebula") Nebula_Path = 'nebula' GRAFANA_PATH = 'grafana_app' SITE_TITLE = 'Nebula' # 前端版本 Nebula_Web_Version = '3.0.1' # API版本 API_VERSION = '1.4.0' Constructing = u'前方施工中,请自觉绕行...' # swagger模板位置 Swagger_Assets_Path = opath.join(Base_Path, Nebula_Path, "middleware/swagger") # web监听设置 WebUI_Address = '0.0.0.0' # log配置 Logging_File = opath.join(
import hashlib import urlparse import logging import Cookie from collections import Mapping from IPy import IP from complexconfig.configcontainer import configcontainer from .bson.objectid import ObjectId from .befilteredexception import BeFilteredException from .path_normalizer import normalize_path logger = logging.getLogger("sniffer.httpmsg") sniffer_config = configcontainer.get_config("sniffer") suffix_config = sniffer_config.item( key="filter.static.suffixes", caching=60, default={ "gif", "png", "ico", "css", "js", "csv", "txt", "jpeg", "jpg", "woff", "ttf" }, cb_load=lambda raw: set(raw.lower().split(",")) if raw else set()) filtered_hosts_config = sniffer_config.item( key="filter.traffic.domains", caching=60, default=set(), cb_load=lambda raw: set(raw.lower().split(",")) if raw else set())
def synchronize(self, force=False): """ synchronize with server :param force: 是否强制sync,默认false,隔一段时间;强制为测试方便 :return: success state """ now = millis_now() if not force and (now - self.synchronize_ts) < 300000: # 5 min for sync return self.synchronize_ts = now try: print 'url tree sync, the fold urls are: ', self.get_all_generated_rule( ) from complexconfig.configcontainer import configcontainer sniffer_config = configcontainer.get_config("sniffer") report_url = sniffer_config.get_string( 'sniffer.web_config.bones.report_url', 'http://127.0.0.1:8080/asset-manager/trunk/report') report_leaves_url = sniffer_config.get_string( 'sniffer.web_config.bones.page_count_report_url', 'http://127.0.0.1:8080/page_analysis/report_url') fetch_url = sniffer_config.get_string( 'sniffer.web_config.bones.fetch_url', 'http://127.0.0.1:8080/asset-manager/trunk/list') if not report_url or not fetch_url or not report_leaves_url: logger.error("bones url is not configured") return False # report first. Continue with failure. data = { 'urls': self.get_all_generated_rule(), 'version': self.version } print "data", data print "report_url....", report_url print "report_leaves_url...", report_leaves_url response = requests.post(report_url, json=data) print response.text if response.status_code != 200: logger.error('fail to send report with status: %s', response.status_code) else: try: response_data = response.json() if response_data.get('status') != 200: logger.error( 'report server return failure, response is %s', response.text) except Exception as ex: logger.error( 'report server return failure, exception is %s', ex) data = { 'visit_times': self.get_all_leaves_dict(), } print "data2", data, type(data) response = requests.post(report_leaves_url, json=data) if response.status_code != 200: logger.error('fail to send leaves report with status: %s', response.status_code) else: try: response_data = response.json() if response_data.get('status') != 200: logger.error( 'leaves report server return failure, response is %s', response.text) except Exception as ex: logger.error( 'report server return failure, exception is %s', ex) self.clean_visit_count() # start to fetch, return with failure response = requests.get(fetch_url) if response.status_code != 200: logger.error('fail to fetch url from server with status: %s', response.status_code) return False else: try: response_data = response.json() if response_data.get('status') != 200: logger.error('fetch url fail, the response is %s', response.text) return False except Exception as ex: logger.error('fetch url fail, the exception is %s', ex) return False # data is fetched successfully result = response_data['result'] version = response_data['version'] trunks = result['trunks'] blackTrunks = result['blackTrunks'] whiteTrunks = result['whiteTrunks'] if self.version != version: self.reset(version) self.sync_with_config(version, trunks, whiteTrunks, blackTrunks) except Exception as ex: logger.error('meet error during synchronization, error:%s', ex) import traceback traceback.print_exc() return False return True
global_config = None if _opath.exists(_global_config_fn): _loader = _FileLoader("loader", _global_config_fn) _parser = _PropertiesParser("parser") global_config = _Config(_loader, _parser) global_config.load_config(sync=True) config_scope = "global" if config_scope == "local": global_config = _EmptyConfig() _web_config_fn = _opath.join(_basedir, "localconfig") _cc.set_config("nebula", global_config) _config = _cc.get_config("nebula") # =================== Loadding Settings =================== DEBUG = False CACHE_TYPE = 'simple' Auth_Code = "196ca0c6b74ad61597e3357261e80caf" WebUI_Address = _config.get_string("webui_address", '0.0.0.0') WebUI_Port = _config.get_int("webui_port", 9001) Notice_RPC_Template_Path = _config.get_string( "notice_rpc_template_path", "conf") Redis_Host = _config.get_string('redis_host', "127.0.0.1") Redis_Port = _config.get_int('redis_port', 6379)