예제 #1
0
        def top_buckets(line_spec):
            columns = []
            key_idx = None
            idx = 0
            for group, column_name, type, action in line_spec["column_map"]:
                columns.append(self.get_column(column_name))
                if action == "key":
                    key_idx = idx
                idx += 1

            sort_rows = []
            for idx in range(columns[key_idx].size()):
                values = []
                for cidx in range(len(columns)):
                    if cidx != key_idx:
                        values.append(columns[cidx].get(idx).get_value())
                values.append(idx)
                sort_rows.append(values)

            sort_rows.sort(reverse=True)
            new_columns = []
            for group, column_name, type, action in line_spec["column_map"]:
                new_columns.append(Column(name=column_name))

            for ridx in range(min(len(sort_rows), line_spec["num_buckets"])):
                for cidx in range(len(columns)):
                    new_columns[cidx].put(
                        sort_rows[ridx][-1],
                        columns[cidx].get(sort_rows[ridx][-1]))

            for c in new_columns:
                self.replace_column(self.map_column(c.get_name()), c)
예제 #2
0
    def refresh(self):
        """ refresh or rebuild tables """

        es = Elasticsearch()

        result = es.search(index=self.es_index_pattern,
                           body=self.es_query_body)

        def match_fields(name, result):
            matches = []
            if isinstance(result, dict):
                for k in result:
                    full_name = (name + "." if name else "") + k
                    item = result[k]
                    for json_path, field_name, field_type in self.es_field_map:
                        if full_name == json_path:
                            matches.append((field_name, field_type, item))
                    if isinstance(item, dict) or isinstance(item, list):
                        matches += match_fields(full_name, item)
            elif isinstance(result, list):
                for k in result:
                    matches += match_fields(name, k)
            return matches

        matches = match_fields("", result)

        new_columns = {}
        for column_name, column_type, value in matches:
            if not column_name in new_columns:
                new_columns[column_name] = Column(name=column_name)
            c = new_columns[column_name]
            if column_type == "date":
                cc = Cell(date_type, datetime.fromtimestamp(value / 1000),
                          format_date)
            elif column_type == "int":
                cc = Cell(int_type, value, format_int)
            elif column_type == "float":
                cc = Cell(float_type, value, format_float)
            elif column_type == "str":
                cc = Cell(string_type, value, format_string)
            else:
                cc = Cell(string_type, str(value), format_string)
            c.put(c.size(), cc)

        for column_name in new_columns:
            if self.has_column(column_name):
                self.replace_column(self.map_column(column_name),
                                    new_columns[column_name])
            else:
                self.add_column(new_columns[column_name])

        self.changed()

        DataTable.refresh(self)
예제 #3
0
 def put_value(value, bidx):
     if not self.has_column(value.column_name):
         self.add_column(Column(name=value.column_name))
     cc = self.get_column(value.column_name)
     if bidx < cc.size():
         c = cc.get(bidx)
         if c.type == blank_type:
             cc.put(bidx, value.to_cell())
         else:
             cc.get(bidx).put_value(value.get_value())
     else:
         cc.put(bidx, value.to_cell())
예제 #4
0
        def main(stdscr):
            screen_size(40,100)
            stdscr.clear()
            python_path = os.path.dirname(os.path.dirname(request.fspath))

            c_names = ["X-Series","Pie Labels","Metric 1","Metric 2","Metric 3","Metric 4","Metric 5","Metric 6"]
            d = DataTable()
            for c in c_names:
                d.add_column(Column(name=c))

            for idx in range(0,10):
                d.put(idx,"X-Series",Cell(int_type,idx*10,format_int))
                d.put(idx,"Pie Labels",Cell(string_type,"Group %d"%idx,format_string))
                d.put(idx,"Metric 1",Cell(float_type,50.0+(idx*20),format_float))
                d.put(idx,"Metric 2",Cell(float_type,75.0+(idx*30),format_float))
                d.put(idx,"Metric 3",Cell(float_type,100.0+(idx*40),format_float))
                d.put(idx,"Metric 4",Cell(float_type,123.0+(idx*23),format_float))
                d.put(idx,"Metric 5",Cell(float_type,143+(idx*33),format_float))
                d.put(idx,"Metric 6",Cell(float_type,171+(idx*51),format_float))

            c = canvas.Canvas(stdscr)
            max_x,max_y = c.get_maxxy()

            g = graph.BarGraph(d,"X-Series",["Metric 1","Metric 3","Metric 5"],"Metric Units",None,c,0,"Basic Bar Graph")
            g.render()
            dashboard_test_case(stdscr,"gr_basic_bargraph",python_path)
            c.clear()
            g = graph.LineGraph(d,"X-Series",["Metric 2","Metric 4","Metric 6"],"Metric Units",None,c,False,"Basic Line Graph")
            g.render()
            dashboard_test_case(stdscr,"gr_basic_linegraph",python_path)
            c.clear()
            g = graph.PieGraph(d,"Pie Labels",["Metric 3"],None,c,"Basic Pie Graph")
            g.render()
            dashboard_test_case(stdscr,"gr_basic_piegraph",python_path)
            c.clear()
            g = graph.PieGraph(d,"Pie Labels",["Metric 3","Metric 2","Metric 5","Metric 1","Metric 4"],None,c,"Five Pie Graph")
            g.render()
            dashboard_test_case(stdscr,"gr_five_piegraph",python_path)
            c.clear()
            g = graph.TableGraph(d,"Pie Labels",["Metric 1","Metric 2","Metric 3","Metric 4","Metric 5","Metric 6"],None,c,"Basic Table")
            g.render()
            dashboard_test_case(stdscr,"gr_basic_tablegraph",python_path)
            c.clear()
예제 #5
0
    def refresh(self):
        """ refresh the table from the query """
        username, server, driver, database, port = re.match(
            r"odbc://([a-z_][a-z0-9_-]*\${0,1})@([^/]*)/([^/]*)/([^:]*):{0,1}(\d*){0,1}",
            self.sql_spec).groups()

        password = keyring.get_password(self.sql_spec, username)
        if not password:
            return

        conn = pyodbc.connect(
            "DRIVER={%s};DATABASE=%s;UID=%s;PWD=%s;SERVER=%s;PORT=%s;" %
            (driver, database, username, password, server, port))
        if not conn:
            return

        result = conn.execute(self.sql_query)

        for row in result:
            for sql_column, data_column in self.sql_map:
                value = getattr(row, sql_column)
                if not self.has_column(data_column):
                    self.add_column(Column(name=data_column))
                c = self.get_column(data_column)
                if isinstance(value, datetime):
                    cc = Cell(date_type, value, format_date)
                elif isinstance(value, int):
                    cc = Cell(int_type, value, format_int)
                elif isinstance(value, float):
                    cc = Cell(float_type, value, format_float)
                elif isinstance(value, str):
                    cc = Cell(string_type, value, format_string)
                else:
                    cc = Cell(string_type, str(value), format_string)
                c.put(c.size(), cc)

        self.changed()
        DataTable.refresh(self)
예제 #6
0
    def refresh(self):
        """ refresh or rebuild tables """

        current_time = datetime.now()

        column_names = [
            "Time Stamps", "CPU Percent", "Load Avg", "Total Virtual Memory",
            "Available Virtual Memory", "Filesystem Percent Full",
            "Filesystem Read Bytes", "Filesystem Write Bytes",
            "Network Sent Bytes", "Network Received Bytes",
            "Network Connections"
        ]

        def bucket_idx(timestamp, column):
            for idx in range(column.size()):
                if column.get(idx).get_value() >= timestamp:
                    return idx
            else:
                return -1

        def append_bucket(timestamp, column):
            column.put(
                column.size(),
                Cell(date_type, timestamp + timedelta(hours=self.bucket_hours),
                     format_date))
            if column.size() > self.num_hours / self.bucket_hours:
                for cn in column_names:
                    self.get_column(cn).delete(0)
            return column.size() - 1

        def add_average(column_name, idx, value):
            column = self.get_column(column_name)
            if not column.size() or idx >= column.size():
                column.put(idx, AverageCell(float_type, value, format_float))
            else:
                column.get(idx).put_value(value)

        bidx = 0
        for cn in column_names:
            if not self.has_column(cn):
                self.add_column(Column(name=cn))
            if cn == "Time Stamps":
                bidx = bucket_idx(current_time, self.get_column(cn))
                if bidx < 0:
                    bidx = append_bucket(current_time, self.get_column(cn))
            elif cn == "CPU Percent":
                add_average(cn, bidx, psutil.cpu_percent())
            elif cn == "Load Avg":
                add_average(cn, bidx, psutil.getloadavg()[2])
            elif cn == "Total Virtual Memory":
                add_average(cn, bidx, psutil.virtual_memory().total)
            elif cn == "Available Virtual Memory":
                add_average(cn, bidx, psutil.virtual_memory().available)
            elif cn == "Filesystem Percent Full":
                add_average(cn, bidx, psutil.disk_usage("/").percent)
            elif cn == "Filesystem Read Bytes":
                add_average(cn, bidx, psutil.disk_io_counters().read_bytes)
            elif cn == "Filesystem Write Bytes":
                add_average(cn, bidx, psutil.disk_io_counters().write_bytes)
            elif cn == "Network Sent Bytes":
                add_average(cn, bidx, psutil.net_io_counters().bytes_sent)
            elif cn == "Network Received Bytes":
                add_average(cn, bidx, psutil.net_io_counters().bytes_recv)
            elif cn == "Network Connections":
                add_average(cn, bidx, float(len(psutil.net_connections())))
        self.changed()

        DataTable.refresh(self)
예제 #7
0
        def get_bucket(line_spec, value):
            if not self.has_column(value.column_name):
                self.add_column(Column(name=value.column_name))
            bc = self.get_column(value.column_name)
            for idx in range(bc.size()):
                if bc.get(idx).get_value() >= value.get_value():
                    break
            else:
                idx = bc.size()
            if idx < bc.size():
                if line_spec["bucket_type"] == string_type:
                    if bc.get(idx).get_value() != value.get_value():
                        bc.ins(
                            idx,
                            Cell(line_spec["bucket_type"], value.get_value(),
                                 format_map[line_spec["bucket_type"]]))
                return idx
            elif idx == 0 and bc.size() > 0:
                diff = bc.get(idx).get_value() - value.get_value()
                if line_spec["bucket_type"] == date_type:
                    while diff > timedelta(minutes=line_spec["bucket_size"]):
                        new_bucket = bc.get(idx).get_value() - timedelta(
                            minutes=line_spec["bucket_size"])
                        bc.ins(
                            idx,
                            Cell(line_spec["bucket_type"], new_bucket,
                                 format_map[line_spec["bucket_type"]]))
                        diff = bc.get(idx).get_value() - value.get_value()
                    return idx
                elif line_spec["bucket_type"] == string_type:
                    bc.ins(
                        idx,
                        Cell(line_spec["bucket_type"], value.get_value(),
                             format_map[line_spec["bucket_type"]]))
                    return idx
                else:
                    while diff > line_spec["bucket_size"]:
                        new_bucket = bc.get(
                            idx).get_value() - line_spec["bucket_size"]
                        bc.ins(
                            idx,
                            Cell(line_spec["bucket_type"], new_bucket,
                                 format_map[line_spec["bucket_type"]]))
                        diff = bc.get(idx).get_value() - value.get_value()
                    return idx
            elif idx == bc.size():
                if line_spec["bucket_type"] == string_type:
                    bc.put(
                        idx,
                        Cell(line_spec["bucket_type"], value.get_value(),
                             format_map[line_spec["bucket_type"]]))
                    return idx
                else:
                    while True:
                        if idx > 0:
                            prev_bucket = bc.get(idx - 1).get_value()
                        else:
                            prev_bucket = value.get_value()

                        if line_spec["bucket_type"] == date_type:
                            new_bucket = prev_bucket + timedelta(
                                minutes=line_spec["bucket_size"])
                        else:
                            new_bucket = prev_bucket + line_spec["bucket_size"]

                        bc.put(
                            idx,
                            Cell(line_spec["bucket_type"], new_bucket,
                                 format_map[line_spec["bucket_type"]]))
                        if value.get_value() < new_bucket:
                            return idx
                        idx = bc.size()
예제 #8
0
def test_DataTable():
    column_names = [
        "Test Column 1", "Test Column 2", "Test Column 3", "Test Column 4",
        "Test Column 5"
    ]

    dt = DataTable(name="Test Data Table", refresh_minutes=0.0167)

    for cn in column_names:
        dt.add_column(Column(name=cn))

    for cn in column_names:
        assert dt.get_column(cn).get_name() == cn

    base_v = 0
    for cn in column_names:
        for v in range(0, 10):
            dt.put(v, cn, Cell(int_type, v + base_v, format_int))
        base_v += 11

    base_v = 0
    for cn in column_names:
        for v in range(0, 10):
            assert dt.get(v, cn).get_value() == v + base_v
        base_v += 11

    assert not dt.has_column("Bad Column")
    assert dt.has_column(column_names[1])
    assert dt.map_column(column_names[1]) == 1

    cols = dt.get_columns()
    for c in cols:
        assert c.get_name() in column_names
    assert len(cols) == len(column_names)

    names = dt.get_names()
    for cn in column_names:
        assert cn in names
    assert len(names) == len(column_names)

    rows, cols = dt.get_bounds()
    assert rows == 10 and cols == len(column_names)

    nc = Column(name="Test Column 2.5")
    for v in range(0, 10):
        nc.put(v, Cell(int_type, v, format_int))

    dt.insert_column(2, nc)
    rows, cols = dt.get_bounds()
    assert rows == 10 and cols == len(column_names) + 1
    assert dt.has_column("Test Column 2.5")
    assert dt.map_column("Test Column 2.5") == 2

    for v in range(0, 10):
        assert dt.get(v, "Test Column 2.5").get_value() == v

    nc = Column(name="Test Column 2.6")
    for v in range(20, 30):
        nc.put(v, Cell(int_type, v, format_int))

    dt.replace_column(2, nc)
    assert rows == 10 and cols == len(column_names) + 1
    assert not dt.has_column("Test Column 2.5")
    assert dt.has_column("Test Column 2.6")
    assert dt.map_column("Test Column 2.6") == 2

    for v in range(20, 30):
        assert dt.get(v, "Test Column 2.6").get_value() == v

    cl = dt.get_column("Test Column 2")
    assert cl.get_name() == "Test Column 2"

    v = 11
    for cell in ColumnIterator(cl):
        assert cell.get_value() == v
        v = v + 1

    test_DataTable.changed = False

    def change_listener(data_table):
        test_DataTable.changed = True

    dt.listen(change_listener)
    dt.changed()
    assert test_DataTable.changed
    test_DataTable.changed = False
    dt.unlisten(change_listener)
    dt.changed()
    assert not test_DataTable.changed

    dt.refresh()
    timestamp = dt.get_refresh_timestamp()

    dt.start_refresh()
    time.sleep(5)
    dt.stop_refresh()
    new_timestamp = dt.get_refresh_timestamp()

    assert timestamp != new_timestamp
예제 #9
0
def test_Column():
    cc = Column(name="Test Column")
    for idx in range(0, 10):
        cc.put(idx, Cell(int_type, idx, format_int))

    for idx in range(0, 10):
        assert cc.get(idx).get_value() == idx

    assert cc.size() == 10
    cc.delete(5)
    assert cc.size() == 9
    assert cc.get(5).get_value() == 6
    cc.ins(8, Cell(int_type, 27, format_int))
    assert cc.get(8).get_value() == 27 and cc.get(9).get_value() == 9
    assert cc.get_name() == "Test Column"
    cc.set_name("New Name")
    assert cc.get_name() == "New Name"
    cc.put(34, Cell(int_type, 100, format_int))
    assert cc.get(34).get_value() == 100
    assert cc.get(33) == blank_cell
    assert cc.size() == 35
예제 #10
0
    def refresh(self):
        """ refresh or rebuild tables """
        if self.start_time:
            year, month, day, hour, minute, second = self.start_time
            current_time = datetime(year, month, day, hour, minute, second)
        else:
            current_time = datetime.now()
        start_time = current_time - timedelta(hours=self.num_hours)
        syslog_files = glob.glob(self.syslog_glob)

        time_column = Column(name="Time Stamps")
        bucket_time = start_time
        idx = 0
        while bucket_time < current_time:
            time_column.put(idx, Cell(date_type, bucket_time, format_date))
            bucket_time = bucket_time + timedelta(hours=self.bucket_hours)
            idx += 1
        time_column.put(idx, Cell(date_type, current_time, format_date))

        def bucket_idx(timestamp):
            if timestamp < start_time or timestamp > current_time:
                return -1

            for idx in range(time_column.size()):
                if time_column.get(idx).get_value() >= timestamp:
                    return idx
            else:
                return -1

        errors_column = Column(name="Errors by Time")
        warnings_column = Column(name="Warnings by Time")
        messages_column = Column(name="Messages by Time")

        services_column = Column(name="Services")
        errors_service_column = Column(name="Errors by Service")
        warnings_service_column = Column(name="Warnings by Service")
        messages_service_column = Column(name="Messages by Service")

        def service_idx(service):
            for idx in range(services_column.size()):
                if services_column.get(idx).get_value() == service:
                    return idx
            else:
                return -1

        def put_or_sum(column, idx, value):
            current_value = 0
            if idx < column.size():
                c = column.get(idx)
                if c.get_type() != blank_type:
                    current_value = int(c.get_value())
            column.put(idx, Cell(int_type, current_value + value, format_int))

        for slf in syslog_files:
            if slf.endswith(".gz"):
                slf_f = gzip.open(slf, "rt", encoding="utf-8")
            else:
                slf_f = open(slf, "r", encoding="utf-8")

            for line in slf_f:
                line = line.strip()
                m = re.match(
                    r"(\w\w\w\s+\d+\s\d\d:\d\d:\d\d)\s[a-z0-9\-]*\s([a-zA-Z0-9\-\_\.]*)[\[\]0-9]*:\s*(.*)",
                    line)
                if m:
                    log_date = re.sub(r"\s+", " ",
                                      "%d " % current_time.year + m.group(1))
                    log_process = m.group(2)
                    log_message = m.group(3)
                    log_datetime = datetime.strptime(log_date,
                                                     "%Y %b %d %H:%M:%S")
                    b_idx = bucket_idx(log_datetime)
                    if b_idx >= 0:
                        s_idx = service_idx(log_process)
                        if s_idx < 0:
                            s_idx = services_column.size()
                            services_column.put(
                                s_idx,
                                Cell(string_type, log_process, format_string))
                        put_or_sum(messages_column, b_idx, 1)
                        put_or_sum(messages_service_column, s_idx, 1)
                        is_error = re.search(r"[Ee]rror|ERROR", log_message)
                        is_warning = re.search(r"[Ww]arning|WARNING",
                                               log_message)
                        error_count = 0
                        warning_count = 0
                        if is_error and not is_warning:
                            error_count = 1
                        elif is_warning:
                            warning_count = 1
                        put_or_sum(errors_column, b_idx, error_count)
                        put_or_sum(errors_service_column, s_idx, error_count)
                        put_or_sum(warnings_column, b_idx, warning_count)
                        put_or_sum(warnings_service_column, s_idx,
                                   warning_count)

        columns = [
            time_column, errors_column, warnings_column, messages_column,
            services_column, errors_service_column, warnings_service_column,
            messages_service_column
        ]

        for c in columns:
            if self.has_column(c.get_name()):
                self.replace_column(self.map_column(c.get_name()), c)
            else:
                self.add_column(c)

        self.changed()

        DataTable.refresh(self)
예제 #11
0
        def main(stdscr):
            screen_size(40,100)
            stdscr.clear()
            stdscr.refresh()

            python_path = os.path.dirname(os.path.dirname(request.fspath))

            c_names = ["X-Series","Pie Labels","Metric 1","Metric 2","Metric 3","Metric 4","Metric 5","Metric 6"]
            d = DataTable()
            for c in c_names:
                d.add_column(Column(name=c))

            for idx in range(0,10):
                d.put(idx,"X-Series",Cell(int_type,idx*10,format_int))
                d.put(idx,"Pie Labels",Cell(string_type,"Group %d"%idx,format_string))
                d.put(idx,"Metric 1",Cell(float_type,50.0+(idx*20),format_float))
                d.put(idx,"Metric 2",Cell(float_type,75.0+(idx*30),format_float))
                d.put(idx,"Metric 3",Cell(float_type,100.0+(idx*40),format_float))
                d.put(idx,"Metric 4",Cell(float_type,123.0+(idx*23),format_float))
                d.put(idx,"Metric 5",Cell(float_type,143+(idx*33),format_float))
                d.put(idx,"Metric 6",Cell(float_type,171+(idx*51),format_float))


            c = canvas.Canvas(stdscr)
            max_x,max_y = c.get_maxxy()

            db = dashboard.Dashboard(stdscr,None,0)
            p = dashboard.Page(stdscr)
            pp = dashboard.Panel()

            g = graph.BarGraph(d,"X-Series",["Metric 1","Metric 3","Metric 5"],"Metric Units",None,c,0,"Basic Bar Graph")
            pp.add_graph(g)
            g = graph.LineGraph(d,"X-Series",["Metric 2","Metric 4","Metric 6"],"Metric Units",None,c,False,"Basic Line Graph")
            pp.add_graph(g)
            p.add_panel(pp)
            db.add_page(p)

            p = dashboard.Page(stdscr)
            pp = dashboard.Panel()
            g = graph.PieGraph(d,"Pie Labels",["Metric 3"],None,c,"Basic Pie Graph")
            pp.add_graph(g)
            g = graph.TableGraph(d,"Pie Labels",["Metric 1","Metric 2","Metric 3","Metric 4","Metric 5","Metric 6"],None,c,"Basic Table")
            pp.add_graph(g)
            p.add_panel(pp)
            db.add_page(p)

            # force the timestamp to be the same so the screen diffs will match
            d.refresh_timestamp = datetime(2020,8,24,9,49,0,0).timestamp()
            d.changed()

            db.main([])
            dashboard_test_case(stdscr,"db_basic_dashboard",python_path)
            db.main([curses.KEY_NPAGE])
            dashboard_test_case(stdscr,"db_basic_dashboard_1",python_path)
            db.main([9]) # tab
            dashboard_test_case(stdscr,"db_basic_dashboard_2",python_path)
            db.main([curses.KEY_HOME])
            dashboard_test_case(stdscr,"db_basic_dashboard_3",python_path)
            db.main([curses.KEY_ENTER])
            dashboard_test_case(stdscr,"db_basic_dashboard_4",python_path)
            db.main([27,-1]) # esc to exit zoom and redraw
            dashboard_test_case(stdscr,"db_basic_dashboard_5",python_path)