Beispiel #1
0
    def __init__(self,
                 log_glob=None,
                 log_map=None,
                 log_lookback=None,
                 refresh_minutes=10):
        """ Initialize the LogDataTable with a file glob pattern to collect the
        matching logs on this machine, a timespan to aggregate for, aggregation
        bucket in hours, a refresh interval for updating in minutes and a
        log_map of the structure [{
            "line_regex" : "python regex with groups to match columns",
            "num_buckets" : "number of buckets for this key",
            "bucket_size" : "size of a bucket",
            "bucket_type" : "type of buckets",
            "column_map" : [
                [group_number 1..n,
                "Column Name",
                "type one of _int,_float,_string,_date",
                "action one of key,avg,min,max,count(value),mode,median"],
                ...]},...]
        the key action is special and indicates that this is the bucket key for this type of line
        log_lookback is of the form [ days, hours, minutes ] all must be specified """
        self.log_glob = log_glob
        self.log_map = log_map
        self.log_lookback = log_lookback
        self.file_map = {}

        DataTable.__init__(
            self, None, "LogDataTable: %s, %d minutes refresh" %
            (self.log_glob, refresh_minutes), refresh_minutes)
        self.refresh()
Beispiel #2
0
 def __init__(self, num_hours=24, bucket_hours=1, refresh_minutes=10):
     """ Initialize the ProcDataTable to collect system process information, a timespan to aggregate for, aggregation bucket in hours, a refresh interval for updating in minutes """
     self.num_hours = num_hours
     self.bucket_hours = bucket_hours
     DataTable.__init__(
         self, None,
         "Proc Data: for the last %d hours in %d hour buckets, refreshed every %d minutes"
         % (self.num_hours, self.bucket_hours, refresh_minutes),
         refresh_minutes)
     self.refresh()
Beispiel #3
0
    def refresh(self):
        """ refresh or rebuild tables """

        es = Elasticsearch()

        result = es.search(index=self.es_index_pattern,
                           body=self.es_query_body)

        def match_fields(name, result):
            matches = []
            if isinstance(result, dict):
                for k in result:
                    full_name = (name + "." if name else "") + k
                    item = result[k]
                    for json_path, field_name, field_type in self.es_field_map:
                        if full_name == json_path:
                            matches.append((field_name, field_type, item))
                    if isinstance(item, dict) or isinstance(item, list):
                        matches += match_fields(full_name, item)
            elif isinstance(result, list):
                for k in result:
                    matches += match_fields(name, k)
            return matches

        matches = match_fields("", result)

        new_columns = {}
        for column_name, column_type, value in matches:
            if not column_name in new_columns:
                new_columns[column_name] = Column(name=column_name)
            c = new_columns[column_name]
            if column_type == "date":
                cc = Cell(date_type, datetime.fromtimestamp(value / 1000),
                          format_date)
            elif column_type == "int":
                cc = Cell(int_type, value, format_int)
            elif column_type == "float":
                cc = Cell(float_type, value, format_float)
            elif column_type == "str":
                cc = Cell(string_type, value, format_string)
            else:
                cc = Cell(string_type, str(value), format_string)
            c.put(c.size(), cc)

        for column_name in new_columns:
            if self.has_column(column_name):
                self.replace_column(self.map_column(column_name),
                                    new_columns[column_name])
            else:
                self.add_column(new_columns[column_name])

        self.changed()

        DataTable.refresh(self)
Beispiel #4
0
 def __init__(self,
              ssh_spec=None,
              table_def=None,
              name=None,
              refresh_minutes=1):
     """ accepts an ssh_spec to connect to of the form ssh://username@server_name:port_number, a json string with the definition for the remote table, the local name for this table, and the number of minutes for refresh """
     DataTable.__init__(self, None, name, refresh_minutes)
     self.ssh_spec = ssh_spec
     self.table_def = table_def
     self.connection = None
     self.refresh()
Beispiel #5
0
 def __init__(self,
              refresh_minutes=1,
              csv_spec=None,
              csv_map=None,
              csv_name=None):
     """ Initialize the CSVDataTable object from the file named in csv_spec and extract the columns in the provided csv_map, name the table based on the name provided or extracted from the CSV """
     self.csv_spec = csv_spec
     self.csv_map = csv_map
     self.csv_name = csv_name
     DataTable.__init__(self, None, (csv_name if csv_name else None),
                        refresh_minutes)
     self.refresh()
Beispiel #6
0
    def refresh(self):
        """ refresh the table by opening the csv file and loading it into a table """
        dt = from_csv(open(self.csv_spec, "r"), self.name, self.csv_map)
        if dt:
            rows, cols = dt.get_bounds()
            for idx in range(cols):
                self.replace_column(idx, dt.get_column(idx))

            if dt.get_name():
                self.name = dt.get_name()

            self.changed()
            DataTable.refresh(self)
Beispiel #7
0
    def refresh(self):
        """ refresh the table by opening the JSON file and loading it into a table """
        dt = from_json(open(self.json_spec, "r"))
        if dt:
            rows, cols = dt.get_bounds()
            for idx in range(cols):
                self.replace_column(idx, dt.get_column(idx))

            if dt.get_name():
                self.name = dt.get_name()

            self.refresh_minutes = dt.refresh_minutes

            self.changed()
            DataTable.refresh(self)
Beispiel #8
0
 def __init__(self,
              refresh_minutes=1,
              es_index_pattern=None,
              es_query_body=None,
              es_field_map=None):
     """ Initialize the ElasticsearchQueryTable pass in a refresh interval in minutes, the es_query_body dict representing the query json and the field map list of tuples [( json path, field name, field type )...]"""
     self.es_query_body = es_query_body
     self.es_field_map = es_field_map
     self.es_index_pattern = es_index_pattern
     DataTable.__init__(
         self, None,
         "Elasticsearch query:%s,index:%s,fieldmap:%s,refreshed every %d minutes"
         % (es_query_body, es_index_pattern, es_field_map, refresh_minutes),
         refresh_minutes)
     self.refresh()
Beispiel #9
0
    def __init__(self,
                 refresh_minutes=1,
                 sql_spec=None,
                 sql_query=None,
                 sql_map=None):
        """ Initalize the ODBCDataTable object pass in a sql_spec to connect to the database of the form odbc://user@server/driver/database:port, a sql_query to be executed, and a field map of the form [[sql_column_name, data_table_column_name],..] indicating the columns to collect from the result """
        self.sql_spec = sql_spec
        self.sql_query = sql_query
        self.sql_map = sql_map
        DataTable.__init__(
            self, None,
            "ODBCDataTable query:%s,database:%s,fieldmap:%s,refreshed every %d minutes"
            % (sql_query, sql_spec, sql_map, refresh_minutes), refresh_minutes)

        self.refresh()
Beispiel #10
0
 def __init__(self,
              syslog_glob="/var/log/syslog*",
              num_hours=24,
              bucket_hours=1,
              refresh_minutes=10,
              start_time=None):
     """ Initialize the SyslogDataTable with a file glob pattern to collect the syslogs on this machine, a timespan to aggregate for, aggregation bucket in hours, a refresh interval for updating in minutes """
     self.syslog_glob = syslog_glob
     self.num_hours = num_hours
     self.bucket_hours = bucket_hours
     self.start_time = start_time
     DataTable.__init__(
         self, None,
         "Syslog Data: %s for the last %d hours in %d hour buckets, refreshed every %d minutes"
         % (self.syslog_glob, self.num_hours, self.bucket_hours,
            refresh_minutes), refresh_minutes)
     self.refresh()
Beispiel #11
0
    def refresh(self):
        """ refresh the table from the query """
        username, server, driver, database, port = re.match(
            r"odbc://([a-z_][a-z0-9_-]*\${0,1})@([^/]*)/([^/]*)/([^:]*):{0,1}(\d*){0,1}",
            self.sql_spec).groups()

        password = keyring.get_password(self.sql_spec, username)
        if not password:
            return

        conn = pyodbc.connect(
            "DRIVER={%s};DATABASE=%s;UID=%s;PWD=%s;SERVER=%s;PORT=%s;" %
            (driver, database, username, password, server, port))
        if not conn:
            return

        result = conn.execute(self.sql_query)

        for row in result:
            for sql_column, data_column in self.sql_map:
                value = getattr(row, sql_column)
                if not self.has_column(data_column):
                    self.add_column(Column(name=data_column))
                c = self.get_column(data_column)
                if isinstance(value, datetime):
                    cc = Cell(date_type, value, format_date)
                elif isinstance(value, int):
                    cc = Cell(int_type, value, format_int)
                elif isinstance(value, float):
                    cc = Cell(float_type, value, format_float)
                elif isinstance(value, str):
                    cc = Cell(string_type, value, format_string)
                else:
                    cc = Cell(string_type, str(value), format_string)
                c.put(c.size(), cc)

        self.changed()
        DataTable.refresh(self)
Beispiel #12
0
    def refresh(self):
        """ create a connection to the remote dashboard table server and refresh our internal state """

        if not self.connection:
            cm = get_connection_manager()
            connection = cm.connect(self.ssh_spec, self)
            if not connection:
                return
            self.connection = connection
            response = self.connection.table(json.dumps(self.table_def))
            if not response.startswith("loaded:%s" % self.table_def["name"]):
                return

        table_data = self.connection.get(self.table_def["name"])
        name, json_blob = table_data.split(":", 1)
        dt = from_json(StringIO(json_blob))

        rows, cols = dt.get_bounds()
        for idx in range(cols):
            self.replace_column(idx, dt.get_column(idx))

        self.changed()
        DataTable.refresh(self)
Beispiel #13
0
def test_DataTable():
    column_names = [
        "Test Column 1", "Test Column 2", "Test Column 3", "Test Column 4",
        "Test Column 5"
    ]

    dt = DataTable(name="Test Data Table", refresh_minutes=0.0167)

    for cn in column_names:
        dt.add_column(Column(name=cn))

    for cn in column_names:
        assert dt.get_column(cn).get_name() == cn

    base_v = 0
    for cn in column_names:
        for v in range(0, 10):
            dt.put(v, cn, Cell(int_type, v + base_v, format_int))
        base_v += 11

    base_v = 0
    for cn in column_names:
        for v in range(0, 10):
            assert dt.get(v, cn).get_value() == v + base_v
        base_v += 11

    assert not dt.has_column("Bad Column")
    assert dt.has_column(column_names[1])
    assert dt.map_column(column_names[1]) == 1

    cols = dt.get_columns()
    for c in cols:
        assert c.get_name() in column_names
    assert len(cols) == len(column_names)

    names = dt.get_names()
    for cn in column_names:
        assert cn in names
    assert len(names) == len(column_names)

    rows, cols = dt.get_bounds()
    assert rows == 10 and cols == len(column_names)

    nc = Column(name="Test Column 2.5")
    for v in range(0, 10):
        nc.put(v, Cell(int_type, v, format_int))

    dt.insert_column(2, nc)
    rows, cols = dt.get_bounds()
    assert rows == 10 and cols == len(column_names) + 1
    assert dt.has_column("Test Column 2.5")
    assert dt.map_column("Test Column 2.5") == 2

    for v in range(0, 10):
        assert dt.get(v, "Test Column 2.5").get_value() == v

    nc = Column(name="Test Column 2.6")
    for v in range(20, 30):
        nc.put(v, Cell(int_type, v, format_int))

    dt.replace_column(2, nc)
    assert rows == 10 and cols == len(column_names) + 1
    assert not dt.has_column("Test Column 2.5")
    assert dt.has_column("Test Column 2.6")
    assert dt.map_column("Test Column 2.6") == 2

    for v in range(20, 30):
        assert dt.get(v, "Test Column 2.6").get_value() == v

    cl = dt.get_column("Test Column 2")
    assert cl.get_name() == "Test Column 2"

    v = 11
    for cell in ColumnIterator(cl):
        assert cell.get_value() == v
        v = v + 1

    test_DataTable.changed = False

    def change_listener(data_table):
        test_DataTable.changed = True

    dt.listen(change_listener)
    dt.changed()
    assert test_DataTable.changed
    test_DataTable.changed = False
    dt.unlisten(change_listener)
    dt.changed()
    assert not test_DataTable.changed

    dt.refresh()
    timestamp = dt.get_refresh_timestamp()

    dt.start_refresh()
    time.sleep(5)
    dt.stop_refresh()
    new_timestamp = dt.get_refresh_timestamp()

    assert timestamp != new_timestamp
Beispiel #14
0
        def main(stdscr):
            screen_size(40,100)
            stdscr.clear()
            python_path = os.path.dirname(os.path.dirname(request.fspath))

            c_names = ["X-Series","Pie Labels","Metric 1","Metric 2","Metric 3","Metric 4","Metric 5","Metric 6"]
            d = DataTable()
            for c in c_names:
                d.add_column(Column(name=c))

            for idx in range(0,10):
                d.put(idx,"X-Series",Cell(int_type,idx*10,format_int))
                d.put(idx,"Pie Labels",Cell(string_type,"Group %d"%idx,format_string))
                d.put(idx,"Metric 1",Cell(float_type,50.0+(idx*20),format_float))
                d.put(idx,"Metric 2",Cell(float_type,75.0+(idx*30),format_float))
                d.put(idx,"Metric 3",Cell(float_type,100.0+(idx*40),format_float))
                d.put(idx,"Metric 4",Cell(float_type,123.0+(idx*23),format_float))
                d.put(idx,"Metric 5",Cell(float_type,143+(idx*33),format_float))
                d.put(idx,"Metric 6",Cell(float_type,171+(idx*51),format_float))

            c = canvas.Canvas(stdscr)
            max_x,max_y = c.get_maxxy()

            g = graph.BarGraph(d,"X-Series",["Metric 1","Metric 3","Metric 5"],"Metric Units",None,c,0,"Basic Bar Graph")
            g.render()
            dashboard_test_case(stdscr,"gr_basic_bargraph",python_path)
            c.clear()
            g = graph.LineGraph(d,"X-Series",["Metric 2","Metric 4","Metric 6"],"Metric Units",None,c,False,"Basic Line Graph")
            g.render()
            dashboard_test_case(stdscr,"gr_basic_linegraph",python_path)
            c.clear()
            g = graph.PieGraph(d,"Pie Labels",["Metric 3"],None,c,"Basic Pie Graph")
            g.render()
            dashboard_test_case(stdscr,"gr_basic_piegraph",python_path)
            c.clear()
            g = graph.PieGraph(d,"Pie Labels",["Metric 3","Metric 2","Metric 5","Metric 1","Metric 4"],None,c,"Five Pie Graph")
            g.render()
            dashboard_test_case(stdscr,"gr_five_piegraph",python_path)
            c.clear()
            g = graph.TableGraph(d,"Pie Labels",["Metric 1","Metric 2","Metric 3","Metric 4","Metric 5","Metric 6"],None,c,"Basic Table")
            g.render()
            dashboard_test_case(stdscr,"gr_basic_tablegraph",python_path)
            c.clear()
Beispiel #15
0
        def main(stdscr):
            screen_size(40,100)
            stdscr.clear()
            stdscr.refresh()

            python_path = os.path.dirname(os.path.dirname(request.fspath))

            c_names = ["X-Series","Pie Labels","Metric 1","Metric 2","Metric 3","Metric 4","Metric 5","Metric 6"]
            d = DataTable()
            for c in c_names:
                d.add_column(Column(name=c))

            for idx in range(0,10):
                d.put(idx,"X-Series",Cell(int_type,idx*10,format_int))
                d.put(idx,"Pie Labels",Cell(string_type,"Group %d"%idx,format_string))
                d.put(idx,"Metric 1",Cell(float_type,50.0+(idx*20),format_float))
                d.put(idx,"Metric 2",Cell(float_type,75.0+(idx*30),format_float))
                d.put(idx,"Metric 3",Cell(float_type,100.0+(idx*40),format_float))
                d.put(idx,"Metric 4",Cell(float_type,123.0+(idx*23),format_float))
                d.put(idx,"Metric 5",Cell(float_type,143+(idx*33),format_float))
                d.put(idx,"Metric 6",Cell(float_type,171+(idx*51),format_float))


            c = canvas.Canvas(stdscr)
            max_x,max_y = c.get_maxxy()

            db = dashboard.Dashboard(stdscr,None,0)
            p = dashboard.Page(stdscr)
            pp = dashboard.Panel()

            g = graph.BarGraph(d,"X-Series",["Metric 1","Metric 3","Metric 5"],"Metric Units",None,c,0,"Basic Bar Graph")
            pp.add_graph(g)
            g = graph.LineGraph(d,"X-Series",["Metric 2","Metric 4","Metric 6"],"Metric Units",None,c,False,"Basic Line Graph")
            pp.add_graph(g)
            p.add_panel(pp)
            db.add_page(p)

            p = dashboard.Page(stdscr)
            pp = dashboard.Panel()
            g = graph.PieGraph(d,"Pie Labels",["Metric 3"],None,c,"Basic Pie Graph")
            pp.add_graph(g)
            g = graph.TableGraph(d,"Pie Labels",["Metric 1","Metric 2","Metric 3","Metric 4","Metric 5","Metric 6"],None,c,"Basic Table")
            pp.add_graph(g)
            p.add_panel(pp)
            db.add_page(p)

            # force the timestamp to be the same so the screen diffs will match
            d.refresh_timestamp = datetime(2020,8,24,9,49,0,0).timestamp()
            d.changed()

            db.main([])
            dashboard_test_case(stdscr,"db_basic_dashboard",python_path)
            db.main([curses.KEY_NPAGE])
            dashboard_test_case(stdscr,"db_basic_dashboard_1",python_path)
            db.main([9]) # tab
            dashboard_test_case(stdscr,"db_basic_dashboard_2",python_path)
            db.main([curses.KEY_HOME])
            dashboard_test_case(stdscr,"db_basic_dashboard_3",python_path)
            db.main([curses.KEY_ENTER])
            dashboard_test_case(stdscr,"db_basic_dashboard_4",python_path)
            db.main([27,-1]) # esc to exit zoom and redraw
            dashboard_test_case(stdscr,"db_basic_dashboard_5",python_path)
Beispiel #16
0
    def refresh(self):
        """ refresh or rebuild tables """
        if self.start_time:
            year, month, day, hour, minute, second = self.start_time
            current_time = datetime(year, month, day, hour, minute, second)
        else:
            current_time = datetime.now()
        start_time = current_time - timedelta(hours=self.num_hours)
        syslog_files = glob.glob(self.syslog_glob)

        time_column = Column(name="Time Stamps")
        bucket_time = start_time
        idx = 0
        while bucket_time < current_time:
            time_column.put(idx, Cell(date_type, bucket_time, format_date))
            bucket_time = bucket_time + timedelta(hours=self.bucket_hours)
            idx += 1
        time_column.put(idx, Cell(date_type, current_time, format_date))

        def bucket_idx(timestamp):
            if timestamp < start_time or timestamp > current_time:
                return -1

            for idx in range(time_column.size()):
                if time_column.get(idx).get_value() >= timestamp:
                    return idx
            else:
                return -1

        errors_column = Column(name="Errors by Time")
        warnings_column = Column(name="Warnings by Time")
        messages_column = Column(name="Messages by Time")

        services_column = Column(name="Services")
        errors_service_column = Column(name="Errors by Service")
        warnings_service_column = Column(name="Warnings by Service")
        messages_service_column = Column(name="Messages by Service")

        def service_idx(service):
            for idx in range(services_column.size()):
                if services_column.get(idx).get_value() == service:
                    return idx
            else:
                return -1

        def put_or_sum(column, idx, value):
            current_value = 0
            if idx < column.size():
                c = column.get(idx)
                if c.get_type() != blank_type:
                    current_value = int(c.get_value())
            column.put(idx, Cell(int_type, current_value + value, format_int))

        for slf in syslog_files:
            if slf.endswith(".gz"):
                slf_f = gzip.open(slf, "rt", encoding="utf-8")
            else:
                slf_f = open(slf, "r", encoding="utf-8")

            for line in slf_f:
                line = line.strip()
                m = re.match(
                    r"(\w\w\w\s+\d+\s\d\d:\d\d:\d\d)\s[a-z0-9\-]*\s([a-zA-Z0-9\-\_\.]*)[\[\]0-9]*:\s*(.*)",
                    line)
                if m:
                    log_date = re.sub(r"\s+", " ",
                                      "%d " % current_time.year + m.group(1))
                    log_process = m.group(2)
                    log_message = m.group(3)
                    log_datetime = datetime.strptime(log_date,
                                                     "%Y %b %d %H:%M:%S")
                    b_idx = bucket_idx(log_datetime)
                    if b_idx >= 0:
                        s_idx = service_idx(log_process)
                        if s_idx < 0:
                            s_idx = services_column.size()
                            services_column.put(
                                s_idx,
                                Cell(string_type, log_process, format_string))
                        put_or_sum(messages_column, b_idx, 1)
                        put_or_sum(messages_service_column, s_idx, 1)
                        is_error = re.search(r"[Ee]rror|ERROR", log_message)
                        is_warning = re.search(r"[Ww]arning|WARNING",
                                               log_message)
                        error_count = 0
                        warning_count = 0
                        if is_error and not is_warning:
                            error_count = 1
                        elif is_warning:
                            warning_count = 1
                        put_or_sum(errors_column, b_idx, error_count)
                        put_or_sum(errors_service_column, s_idx, error_count)
                        put_or_sum(warnings_column, b_idx, warning_count)
                        put_or_sum(warnings_service_column, s_idx,
                                   warning_count)

        columns = [
            time_column, errors_column, warnings_column, messages_column,
            services_column, errors_service_column, warnings_service_column,
            messages_service_column
        ]

        for c in columns:
            if self.has_column(c.get_name()):
                self.replace_column(self.map_column(c.get_name()), c)
            else:
                self.add_column(c)

        self.changed()

        DataTable.refresh(self)
Beispiel #17
0
    def refresh(self):
        """ refresh or rebuild tables """
        def get_bucket(line_spec, value):
            if not self.has_column(value.column_name):
                self.add_column(Column(name=value.column_name))
            bc = self.get_column(value.column_name)
            for idx in range(bc.size()):
                if bc.get(idx).get_value() >= value.get_value():
                    break
            else:
                idx = bc.size()
            if idx < bc.size():
                if line_spec["bucket_type"] == string_type:
                    if bc.get(idx).get_value() != value.get_value():
                        bc.ins(
                            idx,
                            Cell(line_spec["bucket_type"], value.get_value(),
                                 format_map[line_spec["bucket_type"]]))
                return idx
            elif idx == 0 and bc.size() > 0:
                diff = bc.get(idx).get_value() - value.get_value()
                if line_spec["bucket_type"] == date_type:
                    while diff > timedelta(minutes=line_spec["bucket_size"]):
                        new_bucket = bc.get(idx).get_value() - timedelta(
                            minutes=line_spec["bucket_size"])
                        bc.ins(
                            idx,
                            Cell(line_spec["bucket_type"], new_bucket,
                                 format_map[line_spec["bucket_type"]]))
                        diff = bc.get(idx).get_value() - value.get_value()
                    return idx
                elif line_spec["bucket_type"] == string_type:
                    bc.ins(
                        idx,
                        Cell(line_spec["bucket_type"], value.get_value(),
                             format_map[line_spec["bucket_type"]]))
                    return idx
                else:
                    while diff > line_spec["bucket_size"]:
                        new_bucket = bc.get(
                            idx).get_value() - line_spec["bucket_size"]
                        bc.ins(
                            idx,
                            Cell(line_spec["bucket_type"], new_bucket,
                                 format_map[line_spec["bucket_type"]]))
                        diff = bc.get(idx).get_value() - value.get_value()
                    return idx
            elif idx == bc.size():
                if line_spec["bucket_type"] == string_type:
                    bc.put(
                        idx,
                        Cell(line_spec["bucket_type"], value.get_value(),
                             format_map[line_spec["bucket_type"]]))
                    return idx
                else:
                    while True:
                        if idx > 0:
                            prev_bucket = bc.get(idx - 1).get_value()
                        else:
                            prev_bucket = value.get_value()

                        if line_spec["bucket_type"] == date_type:
                            new_bucket = prev_bucket + timedelta(
                                minutes=line_spec["bucket_size"])
                        else:
                            new_bucket = prev_bucket + line_spec["bucket_size"]

                        bc.put(
                            idx,
                            Cell(line_spec["bucket_type"], new_bucket,
                                 format_map[line_spec["bucket_type"]]))
                        if value.get_value() < new_bucket:
                            return idx
                        idx = bc.size()

        def put_value(value, bidx):
            if not self.has_column(value.column_name):
                self.add_column(Column(name=value.column_name))
            cc = self.get_column(value.column_name)
            if bidx < cc.size():
                c = cc.get(bidx)
                if c.type == blank_type:
                    cc.put(bidx, value.to_cell())
                else:
                    cc.get(bidx).put_value(value.get_value())
            else:
                cc.put(bidx, value.to_cell())

        def prune_buckets(line_spec):
            for group, column_name, type, action in line_spec["column_map"]:
                if self.has_column(column_name):
                    cc = self.get_column(column_name)
                    while cc.size() > line_spec["num_buckets"]:
                        cc.delete(0)

        def top_buckets(line_spec):
            columns = []
            key_idx = None
            idx = 0
            for group, column_name, type, action in line_spec["column_map"]:
                columns.append(self.get_column(column_name))
                if action == "key":
                    key_idx = idx
                idx += 1

            sort_rows = []
            for idx in range(columns[key_idx].size()):
                values = []
                for cidx in range(len(columns)):
                    if cidx != key_idx:
                        values.append(columns[cidx].get(idx).get_value())
                values.append(idx)
                sort_rows.append(values)

            sort_rows.sort(reverse=True)
            new_columns = []
            for group, column_name, type, action in line_spec["column_map"]:
                new_columns.append(Column(name=column_name))

            for ridx in range(min(len(sort_rows), line_spec["num_buckets"])):
                for cidx in range(len(columns)):
                    new_columns[cidx].put(
                        sort_rows[ridx][-1],
                        columns[cidx].get(sort_rows[ridx][-1]))

            for c in new_columns:
                self.replace_column(self.map_column(c.get_name()), c)

        lb_days, lb_hours, lb_minutes = self.log_lookback
        start_time = datetime.now() - timedelta(
            days=lb_days, hours=lb_hours, minutes=lb_minutes)

        log_files = glob.glob(self.log_glob)

        for lf in log_files:
            lfp = 0
            stat = os.stat(lf)
            if stat.st_mtime < start_time.timestamp():
                continue

            if lf in self.file_map:
                lft, lfp = self.file_map[lf]
                if stat.st_mtime <= lft:
                    continue

            if lf.endswith(".gz"):
                lf_f = gzip.open(lf, "rt", encoding="utf-8")
            else:
                lf_f = open(lf, "r", encoding="utf-8")

            lf_f.seek(lfp, 0)

            for line in lf_f:
                line = line.strip()
                for line_spec in self.log_map:
                    m = re.match(line_spec["line_regex"], line)
                    if m:
                        values = []
                        key_idx = None
                        for group, column_name, type, action in line_spec[
                                "column_map"]:
                            values.append(
                                Value(column_name, type, action,
                                      m.group(group)))
                            if action == "key":
                                key_idx = len(values) - 1
                        bidx = get_bucket(line_spec, values[key_idx])
                        for v in values:
                            if v.action != "key":
                                put_value(v, bidx)
                        if values[key_idx].type != string_type:
                            prune_buckets(line_spec)

            self.file_map[lf] = (stat.st_mtime, lf_f.tell())

        for line_spec in self.log_map:
            key_idx = None
            idx = 0
            for group, column_name, type, action in line_spec["column_map"]:
                if action == "key":
                    key_idx = idx
                    break
                idx += 1

            kg, kn, kt, ka = line_spec["column_map"][key_idx]
            kc = self.get_column(kn)
            for idx in range(kc.size()):
                for fg, fn, ft, fa in line_spec["column_map"]:
                    if fn != kn:
                        fc = self.get_column(fn)
                        cc = fc.get(idx)
                        if cc.type == blank_type:
                            fc.put(idx, ActionCell(ft, None, format_map[ft],
                                                   fa))

            if kt == string_type:
                top_buckets(line_spec)

        self.changed()

        DataTable.refresh(self)
Beispiel #18
0
    def refresh(self):
        """ refresh or rebuild tables """

        current_time = datetime.now()

        column_names = [
            "Time Stamps", "CPU Percent", "Load Avg", "Total Virtual Memory",
            "Available Virtual Memory", "Filesystem Percent Full",
            "Filesystem Read Bytes", "Filesystem Write Bytes",
            "Network Sent Bytes", "Network Received Bytes",
            "Network Connections"
        ]

        def bucket_idx(timestamp, column):
            for idx in range(column.size()):
                if column.get(idx).get_value() >= timestamp:
                    return idx
            else:
                return -1

        def append_bucket(timestamp, column):
            column.put(
                column.size(),
                Cell(date_type, timestamp + timedelta(hours=self.bucket_hours),
                     format_date))
            if column.size() > self.num_hours / self.bucket_hours:
                for cn in column_names:
                    self.get_column(cn).delete(0)
            return column.size() - 1

        def add_average(column_name, idx, value):
            column = self.get_column(column_name)
            if not column.size() or idx >= column.size():
                column.put(idx, AverageCell(float_type, value, format_float))
            else:
                column.get(idx).put_value(value)

        bidx = 0
        for cn in column_names:
            if not self.has_column(cn):
                self.add_column(Column(name=cn))
            if cn == "Time Stamps":
                bidx = bucket_idx(current_time, self.get_column(cn))
                if bidx < 0:
                    bidx = append_bucket(current_time, self.get_column(cn))
            elif cn == "CPU Percent":
                add_average(cn, bidx, psutil.cpu_percent())
            elif cn == "Load Avg":
                add_average(cn, bidx, psutil.getloadavg()[2])
            elif cn == "Total Virtual Memory":
                add_average(cn, bidx, psutil.virtual_memory().total)
            elif cn == "Available Virtual Memory":
                add_average(cn, bidx, psutil.virtual_memory().available)
            elif cn == "Filesystem Percent Full":
                add_average(cn, bidx, psutil.disk_usage("/").percent)
            elif cn == "Filesystem Read Bytes":
                add_average(cn, bidx, psutil.disk_io_counters().read_bytes)
            elif cn == "Filesystem Write Bytes":
                add_average(cn, bidx, psutil.disk_io_counters().write_bytes)
            elif cn == "Network Sent Bytes":
                add_average(cn, bidx, psutil.net_io_counters().bytes_sent)
            elif cn == "Network Received Bytes":
                add_average(cn, bidx, psutil.net_io_counters().bytes_recv)
            elif cn == "Network Connections":
                add_average(cn, bidx, float(len(psutil.net_connections())))
        self.changed()

        DataTable.refresh(self)
Beispiel #19
0
 def __init__(self, json_spec=None):
     """ Initialize the JSONDataTable object from the file named in json_spec, refresh minutes will come from the loaded json """
     self.json_spec = json_spec
     DataTable.__init__(self, None, "JSONDataTable", 120)
     self.refresh()