Exemplo n.º 1
0
def env_prepare():
    # metrics 初始化配置
    metrics_dict = {
    "app": "nebula_web",
    "redis": {
        "type": "redis",
        "host": settings.Redis_Host,
        "port": settings.Redis_Port
    },
    "influxdb": {
        "type": "influxdb",
        "url": settings.Influxdb_Url,
        "username": "******",
        "password": "******"
    },
    "server": settings.Metrics_Server
    }
    from threathunter_common.redis.redisctx import RedisCtx
    RedisCtx.get_instance().host = settings.Redis_Host
    RedisCtx.get_instance().port = settings.Redis_Port
    
    from threathunter_common.metrics.metricsagent import MetricsAgent
    MetricsAgent.get_instance().initialize_by_dict(metrics_dict)
    
    # load取cache们
    from nebula.dao.cache import Cache_Init_Functions, init_caches

    # 策略权重的缓存
    from nebula.dao.strategy_dao import init_strategy_weigh
    Cache_Init_Functions.append(init_strategy_weigh)
    init_caches()
Exemplo n.º 2
0
def init_env(logger_name, only_babel=False):
    logger = logging.getLogger(logger_name)
    logger.debug("=================== Enter Debug Level.=====================")
    # 配置redis
    RedisCtx.get_instance().host = settings.Redis_Host
    RedisCtx.get_instance().port = settings.Redis_Port

    # 初始化 metrics 服务
    try:
        from threathunter_common.metrics.metricsagent import MetricsAgent
    except ImportError:
        logger.error(
            u"from threathunter_common.metrics.metricsagent import MetricsAgent 失败"
        )
        sys.exit(-1)

    MetricsAgent.get_instance().initialize_by_dict(settings.metrics_dict)
    logger.info(u"成功初始化metrics服务: {}.".format(MetricsAgent.get_instance().m))

    if not only_babel:
        # 初始化mysql 连接
        uri = "mysql+pymysql://%s:%s@%s:%s/%s?charset=utf8" % (
            settings.MySQL_User, settings.MySQL_Passwd, settings.MySQL_Host,
            settings.MySQL_Port, settings.Nebula_DB)
        logger.debug("Connect Url: %s", uri)
        settings.db = create_engine(uri,
                                    pool_size=settings.Concurrency,
                                    max_overflow=10,
                                    pool_recycle=14400)  #,echo="debug")
    return logger
def init_server_runtime():
    RedisCtx.get_instance().host = settings.Redis_Host
    RedisCtx.get_instance().port = settings.Redis_Port
    logger.debug(u"成功初始化redis: {}".format(RedisCtx.get_instance()))
    
    MetricsAgent.get_instance().initialize(settings.Metrics_Conf_FN)
    logger.debug(u"成功初始化metrics服务: {}.".format(MetricsAgent.get_instance().m))
Exemplo n.º 4
0
    def _run(self):
        while True:
            try:
                diskinfo = psutil.disk_usage("/")
                cpuinfo = psutil.cpu_percent(interval=None)
                meminfo = psutil.virtual_memory()

                metrics = {
                    "totalmem": meminfo.total,
                    "freemem": meminfo.available,
                    "memratio": meminfo.percent,
                    "totalspace": diskinfo.total,
                    "freespace": diskinfo.free,
                    "spaceratio": diskinfo.percent,
                    "cpuload": cpuinfo
                }

                current = millis_now()
                # align current to 5 second
                current = (current + 4999) / 5000 * 5000

                for name, data in metrics.iteritems():
                    db = "default"
                    metrics_name = "system.{}".format(name)
                    MetricsAgent.get_instance().add_metrics(
                        db, metrics_name, {}, data, 3600 * 24 * 7)
                gevent.sleep(5)  # @todo
            except Exception as error:
                logger.error(error)
Exemplo n.º 5
0
 def setup_class(self):
     MetricsAgent.get_instance().initialize_by_dict({
         "server": "redis",
         "redis": {
             "type": "redis",
             "host": "127.0.0.1",
             "port": 6379
         }
     })
Exemplo n.º 6
0
 def setup_method(self, method):
     MetricsAgent.get_instance().initialize_by_dict(
         {"redis": {
             "type": "redis",
             "host": "localhost",
             "port": "6379"
         }},
         db="test",
         server_name="redis")
     MetricsAgent.get_instance().clear("test", "test")
Exemplo n.º 7
0
 def test_simple(self):
     MetricsAgent.get_instance().add_metrics("test", "test",
                                             {"tag1": "tag1"}, 1.0, 60)
     MetricsAgent.get_instance().add_metrics("test", "test",
                                             {"tag1": "tag2"}, 3.0, 60)
     import gevent
     gevent.sleep(1)
     now = millis_now()
     assert MetricsAgent.get_instance().query(
         "test", "test", "sum", now - 60000, now, 60, {},
         {}).values()[0].values()[0] == 4.0
Exemplo n.º 8
0
 def setup_class(self):
     print "start to init service"
     MetricsAgent.get_instance().initialize_by_dict({
         "server": "redis",
         "redis": {
             "type": "redis",
             "host": "127.0.0.1",
             "port": 6379
         }
     })
     TestService.init_services()
     print "successfully started the services"
Exemplo n.º 9
0
def _get_metrics_data(name, accumulative, fromtime, endtime, window):
    db = "default"
    if name.startswith("system"):
        # hack for old grafana
        accumulative = False
    if accumulative:
        aggregation_type = "sum"
    else:
        aggregation_type = "max"

    data = MetricsAgent.get_instance().query(db, name, aggregation_type,
                                             fromtime, endtime, window / 1000,
                                             {}, [])
    start = fromtime
    if data:
        minimum = min(data.keys())
        start = start + (minimum - start) % window
    values = []
    ts = start
    while ts <= endtime:
        entry = data.get(ts)
        if not entry:
            values.append(None)
        else:
            values.append(entry.get(tuple()))
        ts += window

    return {"start": start, "interval": window, "values": values}
Exemplo n.º 10
0
def test_influxdb():
    MetricsAgent.get_instance().initialize_by_dict({"influxdb": {"type": "influxdb", "url": "http://127.0.0.1:8086/", "username": "******", "password": "******"}}, "influxdb")
    MetricsAgent.get_instance().clear("test", "test")
    MetricsAgent.get_instance().add_metrics("test", "test", {"tag1": "tag1"}, 1.0, 60)
    MetricsAgent.get_instance().add_metrics("test", "test", {"tag1": "tag2"}, 3.0, 60)
    time.sleep(1)
    result = get_metrics("/db/test/series?p=test&q=select sum(value) from \"test\" where time > now()-1h and (\"tag1\" = 'tag1' or \"tag1\" = 'tag2') group by time(60s)")
    print result
    assert result[0]["points"][0][1] == 4.0
Exemplo n.º 11
0
def test_redis():
    MetricsAgent.get_instance().initialize_by_dict({"redis": {"type": "redis", "host": "localhost", "port": "6379"}}, "redis")
    MetricsAgent.get_instance().clear("test", "test")
    MetricsAgent.get_instance().add_metrics("test", "test", {"tag1": "tag1"}, 1.0, 60)
    MetricsAgent.get_instance().add_metrics("test", "test", {"tag1": "tag2"}, 3.0, 60)
    time.sleep(1)
    result = get_metrics("/db/test/series?p=test&q=select sum(value) from \"test\" where time > now()-1h and (\"tag1\" = 'tag1' or \"tag1\" = 'tag2') group by time(60s)")
    print result
    assert result[0]["points"][0][1] == 4.0
def init_env(logger_name):
    logger = logging.getLogger(logger_name)
    logger.debug("=================== Enter Debug Level.=====================")
    # 配置redis
    RedisCtx.get_instance().host = settings.Redis_Host
    RedisCtx.get_instance().port = settings.Redis_Port
    # 初始化 metrics 服务
    try:
        from threathunter_common.metrics.metricsagent import MetricsAgent
    except ImportError:
        logger.error(
            u"from threathunter_common.metrics.metricsagent import MetricsAgent 失败"
        )
        sys.exit(-1)

    MetricsAgent.get_instance().initialize_by_dict(settings.metrics_dict)
    logger.info(u"成功初始化metrics服务: {}.".format(MetricsAgent.get_instance().m))

    return logger
Exemplo n.º 13
0
def test_proxy():
    MetricsAgent.get_instance().initialize_by_dict({"influxdb": {"type": "influxdb", "url": "http://127.0.0.1:8086/", "username": "******", "password": "******"}}, "influxdb")
    MetricsAgent.get_instance().clear("test", "test")
    MetricsAgent.get_instance().add_metrics("test", "test", {"tag1": "tag1"}, 1.0, 60)
    MetricsAgent.get_instance().add_metrics("test", "test", {"tag1": "tag2"}, 3.0, 60)
    time.sleep(1)
    url = "http://127.0.0.1:8086/db/test/series?p=test&q=select%20sum(value)%20from%20test%20where%20time%20%3E%20now()-1h%20and%20(tag1%20=%20%27tag1%27%20or%20tag1%20=%20%27tag2%27)%20group%20by%20time(60s)&u=root"
    original_result = json.loads(urllib2.urlopen(url).read())
    proxy_result = get_metrics(url)
    print original_result
    print proxy_result
    assert original_result == proxy_result
Exemplo n.º 14
0
def init_metrics():
    from threathunter_common.metrics.metricsagent import MetricsAgent
    from complexconfig.configcontainer import configcontainer
    sniffer_config = configcontainer.get_config("sniffer")

    redis_host = sniffer_config.get_string('sniffer.redis_host')
    redis_port = sniffer_config.get_int('sniffer.redis_port')
    if not redis_host or not redis_port:
        print_with_time("invalid redis configuration")
        import sys
        sys.exit(-1)

    metrics_config = {
        'server': 'redis',
        'redis': {
            'type': 'redis',
            'host': redis_host,
            'port': redis_port
        }
    }
    MetricsAgent.get_instance().initialize_by_dict(metrics_config)
    print_with_time("successfully initializing metrics with config {}".format(str(metrics_config)))
Exemplo n.º 15
0
def common_test():
    # MetricsAgent.get_instance().clear("MetricsTest", "test_metrics")
    MetricsAgent.get_instance().add_metrics("redq", "test_metrics",
                                            {"tag1": "tag1"}, 1.0, 60)
    MetricsAgent.get_instance().add_metrics("redq", "test_metrics",
                                            {"tag1": "tag1"}, 1.0, 60)
    MetricsAgent.get_instance().add_metrics("redq", "test_metrics",
                                            {"tag1": "tag1"}, 1, 60)
Exemplo n.º 16
0
    def get(self):
        """
        Get the all the http count
        @API
        summary: all the http count
        notes: Get all the http count
        tags:
          - platform
        produces:
          - application/json
        """

        interval = 5 * minute
        now = millis_now()
        endtime = now - (now % interval)
        fromtime = endtime - hour
        try:
            network_statistics = MetricsAgent.get_instance().query(
                'nebula.online', 'events.income.count', 'sum', fromtime,
                endtime, interval)
        except Exception as e:
            logger.error(e)
            self.process_error(-1, 'fail to get metrics')

        # 按照时间戳顺序解析network statistics结果
        statistics_timeframe = network_statistics.keys()
        network_list = list()
        try:
            for time_frame in range(fromtime, endtime, interval):
                network = dict(time_frame=time_frame, count=0)

                if time_frame in statistics_timeframe:
                    ts_data = network_statistics[time_frame]
                    for legend, value in ts_data.iteritems():
                        network['count'] = int(value)

                network_list.append(network)

            self.finish(json_dumps(network_list))
        except Exception as e:
            logger.error(e)
            self.process_error(-1, 'fail to get network statistics')
Exemplo n.º 17
0
 def post(self):
     """
     add metrics via influxdb proxy
     """
     path = self.request.path.split('/')
     metrics = json.loads(self.request.body)
     metric_agent = MetricsAgent.get_instance()
     expire_seconds = 86400 * 7
     if 'db' in path:
         db = path[3]
         for metric in metrics:
             metric_name = metric['name']
             columns = metric['columns']
             value_index = columns.index('value')
             points = metric['points']
             for point in points:
                 value = point[value_index]
                 tags = {columns[i]: point[i] for i in range(len(point)) if i != value_index}
                 metric_agent.add_metrics(
                     db, metric_name, tags, value, expire_seconds)
         self.finish(json_dumps({'status': 0, 'msg': 'ok'}))
     else:
         self.finish(json_dumps({'status': 0, 'msg': 'db not exist'}))
Exemplo n.º 18
0
def MetricsDataHandler():
    """
    从metrics监控系统获取统计数据
    
    values: { timepoint1: {tag1:count, tag2:count}, timepoint2:{tag1:count, tag2:count}}
    """
    req = request.args
    from_time = int(req.get('from_time', 0))
    end_time = int(req.get('end_time', 0))
    group_tags = req.getlist('group_tag')
    filter_tags = req.getlist('filter_tag')
    db = req.get('db', 'default')
    metrics_name = req.get('metrics_name', None)
    interval = req.get('interval', 0)
    aggregation = req.get('aggregation', 'sum')

    if not metrics_name:
        return jsonify(status=-1, msg=u"参数错误,查询的metrics_name为空")

    logger.debug(DEBUG_PREFIX + u"查询的时间范围是%s ~ %s",
                 datetime.fromtimestamp(from_time / 1000.0),
                 datetime.fromtimestamp(end_time / 1000.0))
    logger.debug(
        DEBUG_PREFIX +
        u"查询的db: %s, metrics_name:%s, aggregation:%s, from_time:%s, end_time:%s, group_tags:%s, filter_tags:%s, interval:%s",
        db, metrics_name, aggregation, from_time, end_time, group_tags,
        filter_tags, interval)
    try:
        ret_stats = MetricsAgent.get_instance().query(db, metrics_name,
                                                      aggregation, from_time,
                                                      end_time, interval,
                                                      filter_tags, group_tags)
        return jsonify(status=0, values=ret_stats)
    except Exception as e:
        logger.error(e)
        return jsonify(status=-1, error=e.message)
Exemplo n.º 19
0
def webui(ctx, port, debug):
    if debug:
        logger.setLevel(logging.DEBUG)
    from nebula.views import (
        auth, general, influxdbproxy, config, metrics, notice, system_perf,
        system_log, nebula_config, strategy, checkrisk, strategy_default,
        config_default, read_batch, alarm, network, user, group, risk_incident,
        logparser, permission, notice_export, notice_report, strategy_export,
        upgrade, logquery, follow, event_model, event_model_default,
        variable_model, variable_model_default)

    # 注册nebula_backend type2class
    from nebula_meta.model import variable_meta
    print variable_meta.VariableMeta.TYPE2Class
    # 初始化 redis 服务
    try:
        from threathunter_common.redis.redisctx import RedisCtx
    except ImportError:
        logger.error(
            u"from threathunter_common.redis.redisctx import RedisCtx 失败.")
        sys.exit(-1)
    RedisCtx.get_instance().host = settings.Redis_Host
    RedisCtx.get_instance().port = settings.Redis_Port
    logger.debug(u"成功初始化redis: {}".format(RedisCtx.get_instance()))

    # 初始化 metrics 服务
    try:
        from threathunter_common.metrics.metricsagent import MetricsAgent
    except ImportError:
        logger.error(
            u"from threathunter_common.metrics.metricsagent import MetricsAgent 失败"
        )
        sys.exit(-1)

    MetricsAgent.get_instance().initialize_by_dict(metrics_dict)
    logger.debug(u"成功初始化metrics服务: {}.".format(MetricsAgent.get_instance().m))

    # 初始化 babel service
    #    from nebula.services import babel
    #    babel.set_mode(settings.Babel_Mode)
    #    logger.debug(u"成功初始化 {} 模式的babel服务.".format(babel.mode))

    # 启动 performance metrics logger
    from nebula.services.metricspoller import HistoryMetricsPoller
    his_metrics = HistoryMetricsPoller()
    his_metrics.start()
    logger.debug(u"成功启动metrics性能日志服务.")

    #    # 启动 定期清理notice任务
    #    from nebula.services.notice_cleaner import NoticeCleaner
    #    cleaner = NoticeCleaner()
    #    cleaner.start()
    #    logger.debug(u"成功启动定期清理notice服务.")

    # 初始化 dbcontext
    from nebula.dao.DBDataCache import dbcontext
    dbcontext.init()

    #    # 启动 NoticeRPCServer
    #    template_path = settings.Notice_RPC_Template_Path#os.path.join(os.path.dirname(__file__), "templates")
    #    from nebula.services.notice_server import NoticeRPCServer
    #    notice_server = NoticeRPCServer(template_path)
    #    notice_server.start()

    # load取cache们
    from nebula.dao.cache import Cache_Init_Functions, init_caches

    # 策略权重的缓存
    from nebula.dao.strategy_dao import init_strategy_weigh
    Cache_Init_Functions.append(init_strategy_weigh)
    init_caches()

    # load session auth code
    init_session_jar()

    # 启动 ESLogSendServer
    #    from nebula.services.eslog_sender import ESLogSenderServer
    #    eslogserver = ESLogSenderServer()
    #    eslogserver.start()

    urls = [
        # 通用模块
        (r"/", general.IndexHandler),
        (r"/project", general.ProjectHandler),
        (r"/user", general.LoginHandler),
        (r"/nebula/web/config", general.WebConfigHandler),
        (r"/auth/register", auth.RegisterHandler),

        # 权限模块
        (r"/auth/login", auth.LoginHandler),
        (r"/auth/logout", auth.LogoutHandler),
        (r"/auth/changepwd", auth.ChangePasswordHandler),
        (r"/auth/users", user.UserListHandler),
        (r"/auth/users/(.*)", user.UserQueryHandler),
        (r"/auth/groups", group.GroupListHandler),
        (r"/auth/groups/(.*)", group.GroupQueryHandler),
        (r"/auth/permissions", permission.PermissionListHandler),
        (r"/auth/permissions/(.*)/(.*)", permission.PermissionQueryHandler),
        (r"/auth/strategy_access", group.StrategyAccessHandler),
        (r"/auth/privileges", group.PrivilegesHandler),

        # 系统模块
        (r"/system/performance/digest", system_perf.SystemPerformanceHandler),
        (r'/system/log', system_log.LogInfoHandler),
        (r'/geo/all_city', general.AllCityHandler),
        (r'/geo/all_province', general.AllProvinceHandler),
        (r'/platform/geoinfo', general.GeoStatsHandler),

        # 默认的策略、配置、变量们
        (r"/default/variable_models/variable/(.*)/(.*)",
         variable_model_default.VariableModelQueryHandler),
        (r"/default/variable_models",
         variable_model_default.VariableModelListHandler),
        (r"/default/strategies", strategy_default.StrategyListHandler),
        (r"/default/strategies/strategy/(.*)/(.*)",
         strategy_default.StrategyQueryHandler),
        (r"/default/strategies/changestatus/",
         strategy_default.StrategyStatusHandler),
        (r"/default/config", config_default.ConfigListHandler),
        (r"/default/configproperties", config_default.ConfigPropertiesHandler),
        (r"/default/config/(.*)", config_default.ConfigHandler),
        (r"/default/event_models/event/(.*)/(.*)",
         event_model_default.EventQueryHandler),
        (r"/default/event_models", event_model_default.EventModelListHandler),

        # 定制配置接口
        (r"/platform/config", config.ConfigListHandler),
        (r"/platform/configproperties", config.ConfigPropertiesHandler),
        (r"/platform/config/(.*)", config.ConfigHandler),

        # 定制事件接口
        (r"/platform/event_models/event/(.*)/(.*)",
         event_model.EventQueryHandler),
        (r"/platform/event_models", event_model.EventListHandler),
        (r"/platform/event_models_beautify",
         event_model.EventModelBeautifyHandler),

        # 定制变量接口
        (r"/platform/variable_models/variable/(.*)/(.*)",
         variable_model.VariableModelQueryHandler),
        (r"/platform/variable_models",
         variable_model.VariableModelListHandler),
        (r"/platform/variable_models_beautify",
         variable_model.VariableModelBeautifyHandler),

        # 定制策略接口
        (r"/nebula/strategies", strategy.StrategyListHandler),
        (r"/nebula/strategies/strategy/(.*)/(.*)",
         strategy.StrategyQueryHandler),
        (r"/nebula/strategies/changestatus/", strategy.StrategyStatusHandler),
        (r"/nebula/strategies/delete", strategy.StrategyBatchDelHandler),
        (r"/nebula/strategy/import", strategy_export.StrategyImportHandler),
        (r"/nebula/strategy/export", strategy_export.StrategyExportHandler),
        (r"/nebula/strategy/export/(.*)",
         strategy_export.StrategyDownloadHandler),
        (r"/nebula/strategyweigh", strategy.StrategyWeighHandler),
        (r"/nebula/tags", strategy.TagsHandler),
        (r"/nebula/tag/(.*)", strategy.TagQueryHandler),
        (r"/nebula/glossary", nebula_config.VariableGlossaryHandler),
        (r"/nebula/events", nebula_config.NebulaUIEventsHandler),
        (r"/nebula/variables", nebula_config.NebulaUIVariablesHandler),
        (r"/nebula/online/events", nebula_config.NebulaOnlineEventsHandler),
        (r"/nebula/online/variables",
         nebula_config.NebulaOnlineVariablesHandler),

        # 策略定制脚本相关接口
        (r"/nebula/NebulaStrategy", nebula_strategy.NebulaStrategy),
        (r"/nebula/supervisor", Supervisor),
        #        (r"/platform/variabledata/latest/(.*)", variable_value.VariableValueQueryHandler),
        #        (r"/platform/variabledata/top/(.*)", variable_value.VariableValueTopHandler),
        #        (r"/platform/variabledata/list/(.*)", variable_value.VariableValueListHandler),
        #        (r"/platform/variabledata/keytop/(.*)", variable_value.VariableValueKeyTopHandler),

        # 风险事件相关
        (r"/platform/risk_incidents", risk_incident.IncidentListHandler),
        #        (r"/platform/risks/statistics", risk_incident.RisksStatisticsHandler),
        #        (r"/platform/risks/realtime", risk_incident.RisksRealtimeHandler),
        (r"/platform/risks/history", risk_incident.RisksHistoryHandler),
        (r'/platform/risks/black_check', risk_incident.BlackHandler),
        (r"/platform/risks/(.*)", risk_incident.IncidentQueryHandler),
        (r"/platform/notices/export", notice_export.NoticeExportHandler),
        (r"/platform/notices/export/(.*)", tornado.web.StaticFileHandler, {
            "path": settings.NoticeExport_Path
        }),
        (r'/platform/stats/notice_report', notice_report.NoticeReportHandler),

        # 统计数据源通用查询接口
        #        (r'/platform/stats/online', data_bus.OnlineDataHandler),
        #        (r'/platform/stats/slot', data_bus.SlotDataHandler),
        #        (r'/platform/stats/slot_baseline', data_bus.SlotBaseLineDataHandler),
        #        (r'/platform/stats/offline', data_bus.OfflineDataHandler),
        #        (r'/platform/stats/offline_baseline', data_bus.OfflineBaseLineDataHandler),
        #        (r'/platform/stats/profile', data_bus.ProfileDataHandler),
        #        (r'/platform/stats/offline_serial', data_bus.OfflineSerialDataHandler),
        #        (r'/platform/stats/metrics', data_bus.MetricsDataHandler),
        #        (r'/platform/stats/notice', data_bus.NoticeDataHandler),
        #        (r'/platform/stats/risk_incident', data_bus.RiskIncidentDataHandler),
        #        (r'/platform/stats/geo', data_bus.GEODataHandler),
        #        (r'/platform/stats/threat_map', data_bus.ThreatMapDataHandler),
        #        (r'/platform/stats/clean_cache', data_bus.CleanCacheHandler),

        # 持久化事件查询数据
        (r'/platform/alarm', alarm.AlarmListHandler),
        (r'/platform/alarm/valid_count', alarm.ValidCountHandler),
        (r'/platform/alarm/statistics', alarm.StatisticsHandler),
        (r'/platform/network/statistics', network.NetworkStatisticsHandler),
        #        (r'/platform/alarm/statistics_detail', alarm.StatisticsDetailHandler),
        #        (r'/platform/behavior/strategy_statistic', incident_stat.StrategyStatHandler),
        (r'/platform/behavior/tag_statistics', alarm.TagStatHandler),

        #        (r'/platform/behavior/start_time',incident_stat.PersistBeginTimeHandler),
        #        (r'/platform/behavior/statistics', incident_stat.IncidentStatsHandler),
        #        (r'/platform/behavior/clicks_detail', incident_stat.ClickDetailHandler),
        #        (r'/platform/behavior/related_statistics', incident_stat.RelatedStatisticHandler),
        #        (r'/platform/behavior/continuous_related_statistic', incident_stat.ContinuousRelatedStatHandler),
        #        (r'/platform/behavior/continuous_top_related_statistic', incident_stat.ContinuousTopRelatedStatHandler),
        #        (r'/platform/behavior/scene_statistic', incident_stat.SceneStatHandler),

        #        (r"/platform/behavior/user_statistics", incident_stat.UserStatHandler),
        #        (r'/platform/behavior/page_statistics', incident_stat.RelatedPageStatisticHandler),
        #        (r'/platform/behavior/top/clicks_location', incident_stat.ClickLocation),

        #        (r'/platform/online/visit_stream', incident_stat.OnlineVisitStreamHandler),
        #        (r'/platform/behavior/visit_stream', incident_stat.OnlineVisitStreamHandler),
        #        (r'/platform/online/clicks_period', incident_stat.OnlineClicksPeriodHandler),
        #        (r'/platform/behavior/clicks_period', incident_stat.OnlineClicksPeriodHandler),
        #        (r'/platform/online/clicks', incident_stat.OnlineClickListHandler),
        #        (r'/platform/behavior/clicks', incident_stat.OnlineClickListHandler),

        # 日志解析
        (r"/platform/logparser", logparser.LogParserListHandler),
        # 日志查询
        #        (r'/platform/logquery', incident_stat.LogQuery),
        #        (r'/platform/logquery/(.*)', tornado.web.StaticFileHandler, {'path':settings.LogQuery_Path}),
        #        (r"/platform/logquery_config", logquery.LogQueryConfigHandler),

        # metrics
        (r"/platform/metrics/(.*)", metrics.MetricsHandler),
        (r"/platform/batchmetrics/", metrics.BatchMetricsHandler),
        (r"/metricsproxy/.*", influxdbproxy.InfluxdbProxyHandler),

        # 黑名单相关
        (r"/platform/notices", notice.NoticeListHandler),
        (r"/platform/notices/trigger_event", notice.TriggerEventHandler),
        (r"/platform/noticestats", notice.NoticeStatsHandler),
        (r"/platform/bwlist", notice.NoticeBWListHandler),
        (r"/platform/noticetimestamps", notice.NoticeTimestampListHandler),
        (r"/checkRisk", checkrisk.CheckRiskHandler),
        (r"/checkRiskTest", checkrisk.CheckRiskHandler),
        (r"/checkRiskTest/GiveMeAccept", checkrisk.GiveMeAcceptHandler),
        (r"/checkRiskTest/GiveMeReview", checkrisk.GiveMeReviewHandler),
        (r"/checkRiskTest/GiveMeReject", checkrisk.GiveMeRejectHandler),
        (r"/checkRiskTest/GiveMeNothing", checkrisk.GiveMeNothingHandler),
        (r"/checkRiskTest/GiveMeError", checkrisk.GiveMeErrorHandler),

        #        (r"/platform/monitor/riskyitems", monitor.RiskyItemsHandler),
        #        (r"/platform/monitor/toptargets", monitor.TopTargetsHandler),
        #        (r"/platform/monitor/topcities", monitor.TopCitiesHandler),
        (r"/platform/data/export/notice", notice.NoticeExportHandler),
        (r"/platform/batchCheckRisk/bwlist", read_batch.BatchBWListHandler),

        # 档案查询
        #        (r"/platform/stats/profile", profile.ProfileHandler),
        #        (r"/platform/stats/page_risk", profile.ProfilePageRiskHandler),
        #        (r"/platform/stats/account_risk", profile.ProfileAccountRiskHandler),

        # 爬虫统计
        (r"/platform/follow_keyword", follow.FollowKeywordHandler),
        (r"/platform/follow_keyword/(.*)", follow.FollowKeywordAnotherHandler),

        # 更新nebula各组件
        (r"/platform/api/upgrade", upgrade.UpgradeHandler),

        # restful相关接口
        (r"/restful/", general.SwaggerUIHandler, {
            'assets_path': settings.Swagger_Assets_Path
        }),
        (r"/restful/(.*)", tornado.web.StaticFileHandler, {
            'path': settings.Swagger_Assets_Path
        }),
    ]

    # 注册 grafana url
    metrics = [
        (r'/metrics/(.*)', general.CustomizedFileHandler, {
            'path': settings.GRAFANA_PATH
        }),
    ]
    urls.extend(metrics)

    settings.Tornado_Setting["compress_response"] = True
    settings.Tornado_Setting["job_worker"] = 10

    utils.executor = ThreadPoolExecutor(
        max_workers=settings.Tornado_Setting.get('job_worker', 10))
    application = tornado.web.Application(urls, **settings.Tornado_Setting)

    # 注册 restful api url
    application_api_wrapper(application, authenticated,
                            need_auth=False)  #restful api not need auth

    # 注册nebula_strategy
    from nebula.views.nebula_config import context as nebula_context

    def load_event_schema():
        events = [_.get_dict() for _ in nebula_context.nebula_events]
        event_schema = dict()
        for e in events:
            properties = {p["name"]: p["type"] for p in e["properties"]}
            event_schema[e["name"]] = properties
        return event_schema

    def load_variable_schema():
        variables = [_.get_dict() for _ in nebula_context.nebula_variables]
        variable_schema = dict()
        for v in variables:
            variable_schema[v["name"]] = {}
        return variable_schema

    # """定时任务处理,此后需要定时任务都可往里面加,无需加机器上的crontab"""
    # scheduler = BackgroundScheduler()
    # # 添加调度任务
    # # 触发器选择 interval(间隔性),间隔时长为 60 秒
    # scheduler.add_job(scheduler_tasks[0], 'interval', seconds=60)
    # # 启动调度任务
    # scheduler.start()

    # 定时60秒启动一次
    tornado.ioloop.PeriodicCallback(scheduler_tasks[0],
                                    callback_time=1000 * 60).start()

    tornado.options.parse_command_line()
    http_server = tornado.httpserver.HTTPServer(application, xheaders=True)
    http_server.listen(str(port), settings.WebUI_Address)
    click.echo("Nebula Web Start, Port:%s" % port)
    tornado.ioloop.IOLoop.instance().start()
Exemplo n.º 20
0
metrics_dict = {
    "app": "nebula_web",
    "redis": {
        "type": "redis",
        "host": settings.Redis_Host,
        "port": settings.Redis_Port
    },
    "influxdb": {
        "type": "influxdb",
        "url": settings.Influxdb_Url,
        "username": "******",
        "password": "******"
    },
    "server": settings.Metrics_Server
}
MetricsAgent.get_instance().initialize_by_dict(metrics_dict)

wsgi_safe_tests = []


def wsgi_safe(cls):
    wsgi_safe_tests.append(cls)
    return cls


class WebTestCase(AsyncHTTPTestCase):
    """Base class for web tests that also supports WSGI mode.

    Override get_handlers and get_app_kwargs instead of get_app.
    Append to wsgi_safe to have it run in wsgi_test as well.
    """
Exemplo n.º 21
0
class TestService:
    EVENT = Event("testapp", "testname", "testkey", 0, {})

    MetricsAgent.get_instance().initialize_by_dict({
        "server": "redis",
        "redis": {
            "type": "redis",
            "host": "127.0.0.1",
            "port": 6379
        }
    })

    def setup_method(self, method):
        RedisCtx.get_instance().host = "127.0.0.1"
        RedisCtx.get_instance().port = 6379

    def test_notify_queue_service(self):
        service_data = [{
            "callmode": "notify",
            "delivermode": "queue",
        }, {
            "callmode": "notify",
            "delivermode": "queue",
        }]
        client_result, server_result = self._common_test_process(service_data)
        assert client_result[0] == (True, None)
        sleep(1)
        assert server_result[0] == [TestService.EVENT]

    def test_notify_topic_service(self):
        service_data = [{
            "callmode": "notify",
            "delivermode": "topic",
            "serversubname": "consumer1"
        }, {
            "callmode": "notify",
            "delivermode": "topic",
            "serversubname": "consumer2"
        }]
        client_result, server_result = self._common_test_process(service_data)
        assert client_result[0] == (True, None)
        assert server_result[0] == [TestService.EVENT]
        assert server_result[1] == [TestService.EVENT]

    def test_notify_sharding_service(self):
        service_data = [{
            "callmode": "notify",
            "delivermode": "sharding",
            "serverseq": 1
        }, {
            "callmode": "notify",
            "delivermode": "sharding",
            "serverseq": 2
        }]

        events = list()
        for i in range(10):
            e = TestService.EVENT.copy()
            e.key = str(i)
            events.append(e)

        client_result, server_result = self._common_test_process(
            service_data, events)
        print events
        print server_result
        assert client_result == [(True, None)] * 10
        assert len(server_result[0]) + len(server_result[1]) == 10
        assert set(events) == set(server_result[0] + server_result[1])

    def test_batch_notify_service(self):
        events = list()
        for i in range(10):
            e = TestService.EVENT.copy()
            e.key = str(i)
            events.append(e)

        s = ServiceMeta.from_dict({
            "name": "test",
            "callmode": "notify",
            "delivermode": "queue",
            "serverimpl": "redis",
            "coder": "mail",
            "options": {
                "cdc": "sh",
                "sdc": "sh"
            }
        })

        client = ServiceClient(s)
        client.start()

        echo_service, results = get_echo_service()
        server = ServiceServer(s, echo_service)
        server.start()

        if events is None:
            events = [TestService.EVENT]

        client_response_list = list()
        for event in events:
            response = client.batch_notify(event, event.key, limit=5)
            client_response_list.append(response)

            sleep(0.1)
            print len(results)

        sleep(1)
        client.close()
        server.close()
        print client_response_list
        print results

    def test_rpc_queue_service(self):
        service_data = [{
            "callmode": "rpc",
            "delivermode": "queue",
        }]
        client_result, server_result = self._common_test_process(service_data)
        print client_result, server_result
        assert client_result[0] == (True, TestService.EVENT)
        assert server_result[0] == [TestService.EVENT]

    def test_mrpc_topic_service(self):
        service_data = [{
            "callmode": "polling",
            "delivermode": "topic",
            "servercardinality": 2,
            "serversubname": "consumer1",
            "serverseq": 1
        }, {
            "callmode": "polling",
            "delivermode": "topic",
            "servercardinality": 2,
            "serversubname": "consumer2",
            "serverseq": 2
        }]
        client_result, server_result = self._common_test_process(service_data)
        print client_result, server_result
        assert client_result[0] == (True,
                                    [TestService.EVENT, TestService.EVENT])
        assert server_result[0] == [TestService.EVENT]
        assert server_result[1] == [TestService.EVENT]

    def _common_test_process(self, service_data, events=None):
        if not isinstance(service_data, list):
            service_data = [service_data]
        services = list()
        for d in service_data:
            s = ServiceMeta.from_dict({
                "name":
                "test",
                "callmode":
                d.get("callmode", "notify"),
                "delivermode":
                d.get("delivermode", "queue"),
                "serverimpl":
                "redis",
                "coder":
                "mail",
                "options": {
                    "cdc": "sh",
                    "sdc": "sh",
                    "serversubname": d.get("serversubname", ""),
                    "serverseq": d.get("serverseq", ""),
                    "servercardinality": d.get("servercardinality", 1)
                }
            })
            services.append(s)

        client = ServiceClient(services[0])
        client.start()

        servers = list()
        server_results_list = list()
        for s in services:
            echo_service, results = get_echo_service()
            server = ServiceServer(s, echo_service)
            servers.append(server)
            server.start()
            server_results_list.append(results)

        if events is None:
            events = [TestService.EVENT]

        client_response_list = list()
        for event in events:
            response = client.send(event, event.key, timeout=5)
            client_response_list.append(response)

        sleep(1)
        client.close()
        map(lambda s: s.close(), servers)
        return client_response_list, server_results_list
Exemplo n.º 22
0
def compute_statistic(specify_db_path=None):
    """
    
    """
    # 初始化redis实例
    RedisCtx.get_instance().host = settings.Redis_Host
    RedisCtx.get_instance().port = settings.Redis_Port
    MetricsAgent.get_instance().initialize_by_dict(metrics_dict)

    # 获取日志文件们所在的路径
    db_path, logs_path = Index.get_log_paths(specify_db_path)
    work_hour = db_path.rsplit('/', 1)[-1]
    t = datetime.strptime(work_hour, settings.LogPath_Format)
    settings.Working_TS = time.mktime(
        (t.year, t.month, t.day, t.hour, t.minute, t.second, 0, 0, 0))
    settings.Working_DAY = int(
        time.mktime((t.year, t.month, t.day, 0, 0, 0, 0, 0, 0)))
    logger.debug(DEBUG_PREFIX + 'working_hour:%s working_ts:%s, len:%s',
                 work_hour, settings.Working_TS, len(str(settings.Working_TS)))

    # 从每个小时日志文件夹中获取 record schema && record header
    utils.load_record_settings(db_path)

    # 重新生成索引python_index目录
    Index.regen_index(db_path)

    # 获取所有策略权重
    utils.get_strategies_weigh()

    # 获取compute variables
    compute_variables = utils.fetch_compute_variables()
    logger.debug(DEBUG_PREFIX + '获得的计算变量们是:%s',
                 [_['name'] for _ in compute_variables if _['name']])

    # 新增变量时的调试本地的变量文件, 文件里面就可以只有单独的变量树来避免等很久
    # import json
    # with open('/home/threathunter/nebula/nebula_web/venv/lib/python2.7/site-packages/nebula_utils/unittests/offline.json', 'r') as f:
    # compute_variables = json.load(f)
    # cvs = [ ComputeVariableHandler.get_compute_variable(**_) for _ in compute_variables]
    # dag.add_nodes(cvs)

    # 遍历日志离线统计
    compute_dag(compute_variables, logs_path)

    # 注册生成风险事件的回调函数
    Hook_Functions.append(gen_risk_incidents)

    # 统计点击量的counter
    Hook_Functions.append(gen_click_counter)
    #    Hook_Functions.append(gen_related_counter)

    # 注册统计user profile的回调函数
    Hook_Functions.append(gen_visit_profile)

    # 统计notices过去一个小时的数据
    ioloop.IOLoop.current().run_sync(gen_notice_statistics)

    # 聚合notices过去一个小时的metrics
    last = millis_now()
    logger.info("开始merge history metrics")
    merge_history_metrics('default',
                          'web.notices',
                          'sum',
                          group_tags=['test', 'strategy', 'location', 'url'])
    now = millis_now()
    logger.info("时间消耗:{}ms".format(now - last))
    last = now

    # 清理统计数据目录
    logger.info("开始清理统计数据目录")
    stat_db_tmp_path, stat_db_path = get_stat_db_path(db_path)
    now = millis_now()
    logger.info("时间消耗:{}ms".format(now - last))
    last = now

    # 持久化统计数据
    logger.info("开始持久化统计数据")
    write_statistic_data(stat_db_tmp_path)
    now = millis_now()
    logger.info("时间消耗:{}ms".format(now - last))
    last = now

    # 统计完成, 临时统计目录改名为正式, 提供查询服务
    logger.info("开始移动目录")
    shutil.move(stat_db_tmp_path, stat_db_path)
    now = millis_now()
    logger.info("时间消耗:{}ms".format(now - last))
    last = now

    # 定时脚本统计完成后,调用web API清除缓存,刷新数据
    utils.clean_cache()
    now = millis_now()
    logger.info("时间消耗:{}ms".format(now - last))
Exemplo n.º 23
0
 def teardown_method(self, method):
     MetricsAgent.get_instance().clear("test", "test")
Exemplo n.º 24
0
        "type": "redis",
        "host": "127.0.0.1",
        "port": 7000,
        "password": "******",
        "nodes": nodes
    },
    "influxdb": {
        "type": "influxdb",
        "url": "127.0.0.1",
        "username": "******",
        "password": "******"
    },
    "server": "redis"
}

MetricsAgent.get_instance().initialize_by_dict(metrics_dict)

test_db = "nebula.offline"
# proxy的是可以传进去ts的...
test_ts = time.time() * 1000
test_type = "sum"
metrics_name = "cronjob.notice_stat"
test_interval = 300

MetricsAgent.get_instance().add_metrics(test_db, metrics_name, {
    "status": "run",
    "ts": test_ts
}, 1.0, 600)
MetricsAgent.get_instance().add_metrics(test_db, metrics_name, {
    "status": "failed",
    "ts": test_ts
Exemplo n.º 25
0
def test_influxdb():
    c_file = os.path.join(os.path.dirname(__file__), "metrics.conf")
    MetricsAgent.get_instance().initialize(c_file, "influxdb")
    common_test()
Exemplo n.º 26
0
def test_agent():
    c_file = os.path.join(os.path.dirname(__file__), "metrics.conf")
    MetricsAgent.get_instance().initialize(c_file)