Ejemplo n.º 1
0
def list_trouble_stat(ns):
    conf = open_logdag_config(ns)
    amulog_conf = config.open_config(conf["database_amulog"]["source_conf"])
    from . import trouble
    tm = trouble.init_trouble_manager(conf)
    from amulog import log_db
    ld = log_db.LogData(amulog_conf)
    gid_name = conf.get("database_amulog", "event_gid")

    from scipy.stats import entropy

    table = [[
        "trouble_id", "group", "messages", "gids", "hosts", "events", "groups",
        "entropy_events", "entropy_groups"
    ]]
    for tr in tm:
        line = []
        d_ev, d_gid, d_host = trouble.event_stat(tr, ld, gid_name)
        d_group = trouble.event_label(d_gid, ld, gid_name)
        ent_ev = entropy(list(d_ev.values()), base=2)
        ent_group = entropy(
            [sum([d_gid[gid] for gid in l_gid]) for l_gid in d_group.values()],
            base=2)
        line.append(tr.tid)
        line.append(tr.data["group"])
        line.append(sum(d_gid.values()))  # messages
        line.append(len(d_gid.keys()))  # gids
        line.append(len(d_host.keys()))  # hosts
        line.append(len(d_ev.keys()))  # events
        line.append(len(d_group.keys()))  # groups
        line.append(ent_ev)  # entropy of events
        line.append(ent_group)  # entropy of groups
        table.append(line)

    print(common.cli_table(table))
Ejemplo n.º 2
0
def show_trouble_info(ns):
    conf = open_logdag_config(ns)
    tid = ns.tid

    from . import trouble
    dirname = conf.get("eval", "path")
    tm = trouble.TroubleManager(dirname)
    from amulog import log_db
    ld = log_db.LogData(conf)
    from amulog import lt_label
    ll = lt_label.init_ltlabel(conf)
    gid_name = conf.get("dag", "event_gid")

    tr = tm[tid]
    d_ev, d_gid, d_host = trouble.event_stat(tr, ld, gid_name)
    d_group = trouble.event_label(d_gid, ld, ll)

    print(tr)
    print("{0} related events".format(len(d_ev)))
    print("{0} related hosts: {1}".format(len(d_host), sorted(d_host.keys())))
    print("{0} related templates: {1}".format(len(d_gid),
                                              sorted(d_gid.keys())))
    for group, l_gid in d_group.items():
        num = sum([d_gid[gid] for gid in l_gid])
        print("  group {0}: {1} messages, {2} templates {3}".format(
            group, num, len(l_gid), l_gid))
Ejemplo n.º 3
0
def search_trouble(ns):
    conf = open_logdag_config(ns)
    amulog_conf = config.open_config(conf["database_amulog"]["source_conf"])
    d = parse_condition(ns.conditions)
    from . import trouble
    tm = trouble.init_trouble_manager(conf)
    from amulog import log_db
    ld = log_db.LogData(amulog_conf)
    gid_name = conf.get("database_amulog", "event_gid")

    # match group
    if "group" in d:
        l_tr = [tr for tr in tm if tr.data["group"] == d["group"]]
    else:
        l_tr = [tr for tr in tm]

    # match event
    if "gid" in d or "host" in d:
        search_gid = d.get("gid", None)
        search_host = d.get("host", None)
        ret = []
        for tr in l_tr:
            for lid in tr.data["message"]:
                lm = ld.get_line(lid)
                gid = lm.lt.get(gid_name)
                host = lm.host
                if (search_gid is None or search_gid == gid) and \
                        (search_host is None or search_host == host):
                    ret.append(tr)
                    break
        l_tr = ret

    for tr in l_tr:
        print(tr)
Ejemplo n.º 4
0
    def test_db_sqlite3(self):
        path_testlog = "/tmp/amulog_testlog"
        path_db = "/tmp/amulog_db"

        conf = config.open_config()
        path_testlog = conf['general']['src_path']
        path_db = conf['database']['sqlite3_filename']

        tlg = testlog.TestLogGenerator(testlog.DEFAULT_CONFIG, seed=3)
        tlg.dump_log(path_testlog)

        l_path = config.getlist(conf, "general", "src_path")
        if conf.getboolean("general", "src_recur"):
            targets = common.recur_dir(l_path)
        else:
            targets = common.rep_dir(l_path)
        log_db.process_files(conf, targets, True)

        ld = log_db.LogData(conf)
        num = ld.count_lines()
        self.assertEqual(num, 6539, "not all logs added to database")
        ltg_num = len([gid for gid in ld.iter_ltgid()])
        self.assertTrue(ltg_num > 3 and ltg_num < 10,
                        ("log template generation fails? "
                         "(groups: {0})".format(ltg_num)))

        del ld
        common.rm(path_testlog)
        common.rm(path_db)
Ejemplo n.º 5
0
def log2ts_pal(conf, dt_range, pal=1):
    from amulog import common
    timer = common.Timer(
        "make-tsdb subtask ({0[0]} - {0[1]})".format(dt_range), output=_logger)
    timer.start()

    gid_name = conf.get("dag", "event_gid")
    usefilter = conf.getboolean("database_ts", "usefilter")

    from amulog import log_db
    ld = log_db.LogData(conf)
    if gid_name == "ltid":
        iterobj = ld.whole_host_lt(dt_range[0], dt_range[1], "all")
    elif gid_name == "ltgid":
        iterobj = ld.whole_host_ltg(dt_range[0], dt_range[1], "all")
    else:
        raise NotImplementedError

    import multiprocessing
    td = TimeSeriesDB(conf, edit=True)
    l_args = [(conf, dt_range, gid, host) for host, gid in iterobj]
    with multiprocessing.Pool(processes=pal) as pool:
        for ret in pool.imap_unordered(log2ts_elem, l_args):
            gid, host, stat, new_l_dt, val = ret
            if new_l_dt is not None and len(new_l_dt) > 0:
                for dt in new_l_dt:
                    td.add_line(dt, gid, host)
            td.add_filterlog(dt_range, gid, host, stat, val)
        pool.close()
        pool.join()
    td.commit()
    timer.stop()
    return
Ejemplo n.º 6
0
def log2ts_elem(args):
    conf, dt_range, gid, host = args
    name = "{0}_{1}_{2}".format(dtutil.shortstr(dt_range[0]), gid, host)
    _logger.info("make-tsdb job start ({0})".format(name))
    from amulog import log_db
    ld = log_db.LogData(conf)
    gid_name = conf.get("dag", "event_gid")
    d = {
        gid_name: gid,
        "host": host,
        "top_dt": dt_range[0],
        "end_dt": dt_range[1]
    }
    iterobj = ld.iter_lines(**d)
    l_dt = sorted([line.dt for line in iterobj])
    del iterobj
    _logger.debug("gid {0}, host {1}: {2} counts".format(gid, host, len(l_dt)))
    assert len(l_dt) > 0

    evdef = (gid, host)
    stat, new_l_dt, val = apply_filter(conf, ld, l_dt, dt_range, evdef)

    fl = FilterLog(dt_range, gid, host, stat, val)
    _logger.debug(str(fl))
    _logger.info("make-tsdb job done ({0})".format(name))

    return (gid, host, stat, new_l_dt, val)
Ejemplo n.º 7
0
def process_files_offline(conf, targets, reset_db, parallel=False):
    """Add log messages to DB from files. This function do NOT process
    messages incrementally. Use this to avoid bad-start problem of
    log template generation with clustering or training methods.

    Note:
        This function needs large memory space.

    Args:
        conf (config.ExtendedConfigParser): A common configuration object.
        targets (List[str]): A sequence of filepaths to process.
        reset_db (bool): True if DB needs to reset before adding.
        parallel (bool, optional): Use multiprocessing.

    Raises:
        IOError: If a file in targets not found.
    """
    msg = "amulog offline processing"
    if parallel:
        msg += " in parallel"
    _logger.info(msg)

    ld = log_db.LogData(conf, edit=True, reset_db=reset_db)
    ltm = LTManager(conf, ld.db, ld.lttable, reset_db=reset_db,
                    parallel=parallel)

    l_line = [line for line in iter_lines(targets)]
    ltm.process_offline(l_line)
Ejemplo n.º 8
0
def process_files_online(conf, targets, reset_db):
    """Add log messages to DB from files.

    Args:
        conf (config.ExtendedConfigParser): A common configuration object.
        targets (List[str]): A sequence of filepaths to process.
        reset_db (bool): True if DB needs to reset before adding.

    Raises:
        IOError: If a file in targets not found.
    """
    def _sigterm_handler():
        raise KeyboardInterrupt

    import signal
    signal.signal(signal.SIGTERM, _sigterm_handler)

    msg = "amulog online processing"
    _logger.info(msg)

    ld = log_db.LogData(conf, edit=True, reset_db=reset_db)
    ltm = LTManager(conf, ld.db, ld.lttable, reset_db=reset_db)

    try:
        for line in iter_lines(targets):
            ltm.process_line(line)
    except KeyboardInterrupt:
        pass
    finally:
        ltm.commit_db()
        ltm.dump()
Ejemplo n.º 9
0
def search_trouble(ns):
    conf = open_logdag_config(ns)
    d = parse_condition(ns.conditions)
    from . import trouble
    dirname = conf.get("eval", "path")
    tm = trouble.TroubleManager(dirname)
    from amulog import log_db
    ld = log_db.LogData(conf)
    gid_name = conf.get("dag", "event_gid")
    from logdag import dtutil

    # match group
    if "group" in d:
        l_tr = [tr for tr in tm if tr.data["group"] == d["group"]]
    else:
        l_tr = [tr for tr in tm]

    # match event
    if "gid" in d or "host" in d:
        search_gid = d.get("gid", None)
        search_host = d.get("host", None)
        ret = []
        for tr in l_tr:
            for lid in tr.data["message"]:
                lm = ld.get_line(lid)
                gid = lm.lt.get(gid_name)
                host = lm.host
                if (search_gid is None or search_gid == gid) and \
                        (search_host is None or search_host == host):
                    ret.append(tr)
                    break
        l_tr = ret

    for tr in l_tr:
        print(tr)
Ejemplo n.º 10
0
def make_crf_model(ns):
    conf_load = config.open_config(ns.config_load)
    conf_dump = config.open_config(ns.config_dump)
    lv = logging.DEBUG if ns.debug else logging.INFO
    config.set_common_logging(conf_dump, logger=_logger, lv=lv)

    from . import train
    output = ns.output
    if ns.train_file is None:
        from amulog import log_db
        d = parse_condition(ns.conditions)
        output_sampled = ns.output_sampled
        get_output_sampled = output_sampled is not None

        ld = log_db.LogData(conf_load)
        iterobj = [lm for lm in ld.iter_lines(**d)]
        if get_output_sampled:
            fn, l_train = train.make_crf_model(conf_dump, iterobj, output,
                                               return_sampled_messages=True)
            import pickle
            with open(output_sampled, 'wb') as f:
                pickle.dump(l_train, f)
            print("> {0}".format(output_sampled))
        else:
            fn = train.make_crf_model(conf_dump, iterobj, output)
    else:
        fn = train.make_crf_model_from_trainfile(conf_dump, ns.train_file, output)
    print("> {0}".format(fn))
Ejemplo n.º 11
0
    def test_makedb_offline(self):
        from amulog import __main__ as amulog_main
        targets = amulog_main.get_targets_conf(self._conf)
        manager.process_files_offline(self._conf, targets, reset_db=True)

        ld = log_db.LogData(self._conf)
        num = ld.count_lines()
        self.assertEqual(num, 6539, "not all logs added to database")
        ltg_num = len([gid for gid in ld.iter_ltgid()])
        self.assertTrue(3 < ltg_num < 20, ("log template generation fails? "
                                           "(groups: {0})".format(ltg_num)))
Ejemplo n.º 12
0
def make_crf_train(ns):
    conf_load = config.open_config(ns.config_load)
    conf_dump = config.open_config(ns.config_dump)
    lv = logging.DEBUG if ns.debug else logging.INFO
    config.set_common_logging(conf_dump, logger=_logger, lv=lv)

    from . import train
    from amulog import log_db
    d = parse_condition(ns.conditions)
    ld = log_db.LogData(conf_load)
    iterobj = ld.iter_lines(**d)
    print(train.crf_trainfile(conf_dump, iterobj))
Ejemplo n.º 13
0
    def __init__(self, dt_range, conf_path, gid_name, use_mapping):
        self.conf = config.open_config(conf_path)
        self._ld = log_db.LogData(self.conf)
        self._gid_name = gid_name
        self.dt_range = dt_range

        self._mapper = None
        if use_mapping:
            # use if tsdb is anonymized but amulog db is original
            from amulog import anonymize
            self._mapper = anonymize.AnonymizeMapper(self.conf)
            self._mapper.load()
Ejemplo n.º 14
0
def show_trouble(ns):
    conf = open_logdag_config(ns)
    tid = ns.tid

    from . import trouble
    dirname = conf.get("eval", "path")
    tm = trouble.TroubleManager(dirname)
    from amulog import log_db
    ld = log_db.LogData(conf)

    tr = tm[tid]
    print(tr)
    print("\n".join(tr.get_message(ld, show_lid=ns.lid_header)))
Ejemplo n.º 15
0
def show_trouble(ns):
    conf = open_logdag_config(ns)
    amulog_conf = config.open_config(conf["database_amulog"]["source_conf"])
    tid = ns.tid

    from . import trouble
    tm = trouble.init_trouble_manager(conf)
    from amulog import log_db
    ld = log_db.LogData(amulog_conf)

    tr = tm[tid]
    print(tr)
    print("\n".join(tr.get_message(ld, show_lid=ns.lid_header)))
Ejemplo n.º 16
0
def count_node_label(conf):
    from amulog import log_db
    ld = log_db.LogData(conf)
    from amulog import lt_label
    ll = lt_label.init_ltlabel(conf)

    d_group = defaultdict(int)
    from logdag import showdag
    for r in showdag.iter_results(conf):
        r.load_ltlabel(conf, ld=ld, ll=ll)
        for node in r.graph.nodes():
            evdef = r.node_evdef(node)
            node_group = r._label_group_ltg(evdef.gid)
            d_group[node_group] += 1
    return d_group
Ejemplo n.º 17
0
def separate_args(conf, tr):
    """Some troubles can appear among multiple days.
    This function separates DAG arguments and corresponding logs.
    """
    from logdag import arguments
    am = arguments.ArgumentManager(conf)
    am.load()
    from amulog import log_db
    ld = log_db.LogData(arguments.open_amulog_config(conf))

    d_args = defaultdict(list)
    for lid in tr.data["message"]:
        lm = ld.get_line(lid)
        for args in am.args_from_time(lm.dt):
            name = arguments.args2name(args)
            d_args[name].append(lm)
    return [(arguments.name2args(name, conf), l_lm)
            for name, l_lm in d_args.items()]
Ejemplo n.º 18
0
def count_edge_label(conf):
    from amulog import log_db
    ld = log_db.LogData(conf)
    from amulog import lt_label
    ll = lt_label.init_ltlabel(conf)

    d_group = defaultdict(int)
    from logdag import showdag
    for r in showdag.iter_results(conf):
        r.load_ltlabel(conf, ld=ld, ll=ll)
        g = r.graph.to_undirected()
        for edge in g.edges():
            src_evdef, dst_evdef = r.edge_evdef(edge)
            src_group = r._label_group_ltg(src_evdef.gid)
            d_group[src_group] += 1
            dst_group = r._label_group_ltg(dst_evdef.gid)
            d_group[dst_group] += 1
    return d_group
Ejemplo n.º 19
0
def log2ts(conf, dt_range):
    _logger.info("make-tsdb job start ({0[0]} - {0[1]})".format(dt_range))
    
    gid_name = conf.get("dag", "event_gid")
    usefilter = conf.getboolean("database_ts", "usefilter")
    top_dt, end_dt = dt_range
    
    from amulog import log_db
    ld = log_db.LogData(conf)
    if gid_name == "ltid":
        iterobj = ld.whole_host_lt(top_dt, end_dt, "all")
    elif gid_name == "ltgid":
        iterobj = ld.whole_host_ltg(top_dt, end_dt, "all")
    else:
        raise NotImplementedError

    for host, gid in iterobj:
        # load time-series from db
        d = {gid_name: gid,
             "host": host,
             "top_dt": top_dt,
             "end_dt": end_dt}
        iterobj = ld.iter_lines(**d)
        l_dt = sorted([line.dt for line in iterobj])
        _logger.debug("gid {0}, host {1}: {2} counts".format(gid, host,
                                                             len(l_dt)))
        assert len(l_dt) > 0

        # apply preprocessing(filter)
        evdef = (gid, host)
        stat, new_l_dt, val = apply_filter(conf, ld, l_dt, dt_range, evdef)

        # update database
        td = TimeSeriesDB(conf, edit = True)
        if new_l_dt is not None and len(new_l_dt) > 0:
            for dt in new_l_dt:
                td.add_line(dt, gid, host)
        td.add_filterlog(dt_range, gid, host, stat, val)
        td.commit()

        fl = FilterLog(dt_range, gid, host, stat, val)
        _logger.debug(str(fl))
    
    _logger.info("make-tsdb job done".format(dt_range))
Ejemplo n.º 20
0
def show_graph_diff_lts(ns):
    conf_fn1, conf_fn2 = ns.confs
    conf1 = arguments.open_logdag_config(conf_fn1)
    conf2 = arguments.open_logdag_config(conf_fn2)
    lv = logging.DEBUG if ns.debug else logging.INFO
    am_logger = logging.getLogger("amulog")
    config.set_common_logging(conf1, logger=[_logger, am_logger], lv=lv)

    from amulog import log_db
    ld = log_db.LogData(conf1)

    from . import comparison
    d_ltid = comparison.edge_diff_gid(conf1, conf2)
    for ltid, l_name in sorted(d_ltid.items(),
                               key=lambda x: len(x[1]),
                               reverse=True):
        print("{0}: {1} ({2})".format(len(l_name), ltid, ld.lt(ltid)))
        if len(l_name) < 100:
            print(l_name)
Ejemplo n.º 21
0
def ts_filtered(conf, **kwargs):
    assert "dts" in kwargs
    assert "dte" in kwargs
    assert "gid" in kwargs
    assert "host" in kwargs
    
    from amulog import log_db
    ld = log_db.LogData(conf)
    gid_name = conf.get("dag", "event_gid")
    d = {"top_dt": kwargs["dts"],
         "end_dt": kwargs["dte"],
         gid_name: kwargs["gid"],
         "host": kwargs["host"]}
    l_dt = [line.dt for line in ld.iter_lines(**d)]

    td = TimeSeriesDB(conf)
    l_ts = [dt for dt in td.iter_ts(**kwargs)]
    l_filtered = [dt for dt in l_dt if not dt in l_ts]

    return l_filtered, l_ts
Ejemplo n.º 22
0
def list_trouble_label(ns):
    conf = open_logdag_config(ns)
    amulog_conf = config.open_config(conf["database_amulog"]["source_conf"])
    from . import trouble
    tm = trouble.init_trouble_manager(conf)
    from amulog import log_db
    ld = log_db.LogData(amulog_conf)
    gid_name = conf.get("database_amulog", "event_gid")

    for tr in tm:
        d_ev, d_gid, d_host = trouble.event_stat(tr, ld, gid_name)
        d_group = trouble.event_label(d_gid, ld, gid_name)

        buf = "{0} ({1}): ".format(tr.tid, tr.data["group"])
        for group, l_gid in sorted(d_group.items(),
                                   key=lambda x: len(x[1]),
                                   reverse=True):
            num = sum([d_gid[gid] for gid in l_gid])
            buf += "{0}({1},{2}) ".format(group, len(l_gid), num)
        print(buf)
Ejemplo n.º 23
0
def show_graph_diff_lts(ns):
    l_conffp = ns.confs
    assert len(l_conffp) == 2
    openconf = lambda c: config.open_config(
        c, ex_defaults=[arguments.DEFAULT_CONFIG])
    conf1, conf2 = [openconf(c) for c in l_conffp]
    lv = logging.DEBUG if ns.debug else logging.INFO
    am_logger = logging.getLogger("amulog")
    config.set_common_logging(conf1, logger=[_logger, am_logger], lv=lv)

    from amulog import log_db
    ld = log_db.LogData(conf1)

    from . import comp_conf
    d_ltid = comp_conf.edge_diff_gid(conf1, conf2)
    for ltid, l_name in sorted(d_ltid.items(),
                               key=lambda x: len(x[1]),
                               reverse=True):
        print("{0}: {1} ({2})".format(len(l_name), ltid, ld.lt(ltid)))
        if len(l_name) < 100:
            print(l_name)
Ejemplo n.º 24
0
def list_trouble_label(ns):
    conf = open_logdag_config(ns)
    from . import trouble
    dirname = conf.get("eval", "path")
    tm = trouble.TroubleManager(dirname)
    from amulog import log_db
    ld = log_db.LogData(conf)
    from amulog import lt_label
    ll = lt_label.init_ltlabel(conf)
    gid_name = conf.get("dag", "event_gid")

    for tr in tm:
        d_ev, d_gid, d_host = trouble.event_stat(tr, ld, gid_name)
        d_group = trouble.event_label(d_gid, ld, ll)

        buf = "{0} ({1}): ".format(tr.tid, tr.data["group"])
        for group, l_gid in sorted(d_group.items(),
                                   key=lambda x: len(x[1]),
                                   reverse=True):
            num = sum([d_gid[gid] for gid in l_gid])
            buf += "{0}({1},{2}) ".format(group, len(l_gid), num)
        print(buf)
Ejemplo n.º 25
0
def all_args(conf):
    amulog_conf = config.open_config(conf["database_amulog"]["source_conf"])
    from amulog import log_db
    ld = log_db.LogData(amulog_conf)
    w_top_dt, w_end_dt = config.getterm(conf, "dag", "whole_term")
    term = config.getdur(conf, "dag", "unit_term")
    diff = config.getdur(conf, "dag", "unit_diff")

    l_args = []
    top_dt = w_top_dt
    while top_dt < w_end_dt:
        end_dt = top_dt + term
        l_area = config.getlist(conf, "dag", "area")
        if "each" in l_area:
            l_area.pop(l_area.index("each"))
            l_area += [
                "host_" + host for host in ld.whole_host(top_dt, end_dt)
            ]
        for area in l_area:
            l_args.append((conf, (top_dt, end_dt), area))
        top_dt = top_dt + diff
    return l_args
Ejemplo n.º 26
0
    def test_makedb_parallel(self):
        import copy
        conf = copy.copy(self._conf)
        conf["manager"]["n_process"] = "2"
        conf["log_template"]["lt_methods"] = "re"
        conf["log_template_re"]["variable_rule"] = \
            common.filepath_local(__file__, "test_re.conf")

        from amulog import __main__ as amulog_main
        targets = amulog_main.get_targets_conf(conf)
        manager.process_files_offline(conf,
                                      targets,
                                      reset_db=True,
                                      parallel=True)

        ld = log_db.LogData(self._conf)
        #for ltobj in ld.iter_lt():
        #    print(ltobj)
        num = ld.count_lines()
        self.assertEqual(num, 6539, "not all logs added to database")
        ltg_num = len([gid for gid in ld.iter_ltgid()])
        self.assertTrue(3 < ltg_num < 20, ("log template generation fails? "
                                           "(groups: {0})".format(ltg_num)))
Ejemplo n.º 27
0
def show_trouble_info(ns):
    conf = open_logdag_config(ns)
    amulog_conf = config.open_config(conf["database_amulog"]["source_conf"])
    tid = ns.tid

    from . import trouble
    tm = trouble.init_trouble_manager(conf)
    from amulog import log_db
    ld = log_db.LogData(amulog_conf)
    gid_name = conf.get("database_amulog", "event_gid")

    tr = tm[tid]
    d_ev, d_gid, d_host = trouble.event_stat(tr, ld, gid_name)
    d_group = trouble.event_label(d_gid, ld, gid_name)

    print(tr)
    print("{0} related events".format(len(d_ev)))
    print("{0} related hosts: {1}".format(len(d_host), sorted(d_host.keys())))
    print("{0} related templates: {1}".format(len(d_gid),
                                              sorted(d_gid.keys())))
    for group, l_gid in d_group.items():
        num = sum([d_gid[gid] for gid in l_gid])
        print("  group {0}: {1} messages, {2} templates {3}".format(
            group, num, len(l_gid), l_gid))
Ejemplo n.º 28
0
def remake_ltgroup(conf):
    ld = log_db.LogData(conf, edit=True, reset_db=False)
    ltm = LTManager(conf, ld.db, ld.lttable, reset_db=False)
    ltm.remake_ltg()
    ltm.commit_db()
Ejemplo n.º 29
0
ACCEPT_SYM = False

if len(sys.argv) < 4:
    sys.exit("usage: {0} CONFIG RULE1 RULE2".format(sys.argv[0]))

conf = config.open_config(sys.argv[1])
if ACCEPT_SYM:
    sym = conf.get("log_template", "variable_symbol")
else:
    sym = None
d_rule1 = parse_condition(sys.argv[2].split(","))
d_rule2 = parse_condition(sys.argv[3].split(","))
s_ltid1 = set()
s_ltid2 = set()

ld = log_db.LogData(conf)
for lm in ld.iter_lines(**d_rule1):
    s_ltid1.add(lm.lt.ltid)

for lm in ld.iter_lines(**d_rule2):
    s_ltid2.add(lm.lt.ltid)

common = s_ltid1 & s_ltid2
print("{0} common log template found... ".format(len(common)))

d_ed1 = {}
d_ed2 = {}
for key in common:
    d_ed1[key] = 0
    d_ed2[key] = 0
    s_ltid1.remove(key)
Ejemplo n.º 30
0
 def __init__(self, dt_range, conf_fn, gid_name):
     self.conf = config.open_config(conf_fn)
     self._ld = log_db.LogData(self.conf)
     self._gid_name = gid_name
     self.dt_range = dt_range
     self._ll = None