示例#1
0
    def infoSIndex(stats, cluster, **ignore):
        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "Secondary Index Information"
        column_names = ('node'
                        , ('indexname', 'Index Name')
                        ,('ns', 'Namespace')
                        , 'set'
                        , 'bins'
                        , 'num_bins'
                        , ('type', 'Bin Type')
                        , 'state'
                        , 'sync_state')

        t = Table(title, column_names, group_by=1)

        for node_key, n_stats in stats.iteritems():
            node = prefixes[node_key]
            for index_stats in n_stats:
                if isinstance(index_stats, Exception):
                    row = {}
                else:
                    row = index_stats

                row['node'] = node
                t.insertRow(row)

        print t
示例#2
0
    def showConfig(title, service_configs, cluster, like=None, **ignore):
        prefixes = cluster.getPrefixes()

        column_names = set()

        for config in service_configs.itervalues():
            if isinstance(config, Exception):
                continue
            column_names.update(config.keys())

        column_names = sorted(column_names)
        if like:
            likes = CliView.compileLikes(like)

            column_names = filter(likes.search, column_names)

        if len(column_names) == 0:
            return ''

        column_names.insert(0, "NODE")

        t = Table(title
                  , column_names
                  , title_format=TitleFormats.noChange
                  , style=Styles.VERTICAL)

        row = None
        for node_id, row in service_configs.iteritems():
            if isinstance(row, Exception):
                row = {}

            row['NODE'] = prefixes[node_id]
            t.insertRow(row)

        print t
def augment_data():
    new_train_path = "data/augmented_train.jsonl"
    error_cnt = 0
    with open(new_train_path,'w') as f:
        for i in range(len(tables)):
            table_new = Table(tables[i]['id'], tables[i]['header'], tables[i]['types'], tables[i]['rows'])
            try:
                queries = table_new.generate_queries(DB.conn,n=10, max_tries=100, lower=True)
                for query in queries:
                    #print(query[1])
                    info = {}
                    sql = {}
                    sql['conds'] = query[0].conditions
                    #info['ordered'] = query[0].ordered
                    sql['sel'] = query[0].sel_index
                    sql['agg'] = query[0].agg_index
                    sql['exec_answer'] = query[1]
                    info['sql'] = sql
                    info['phase'] = 2 #default
                    info['table_id'] = tables[i]['id']
                    info['question'] = ''
                    f.write(json.dumps(info)+"\n")
            except:
                error_cnt += 1
    print(error_cnt)
示例#4
0
def test():
    # convert query dict to text (without correct column references)
    details = {"sel": 5, "conds": [[3, 0, "SOUTH AUSTRALIA"]], "agg": 0}
    test_str = Query(details["sel"], details["agg"], details["conds"])
    print(test_str)

    db = records.Database('sqlite:///data/train.db')
    conn = db.get_connection()

    # convert query dict to text with table reference (still does not give the correct columns)
    # because header is not supplied
    table = Table.from_db(conn, "1-1000181-1")
    print(table.query_str(test_str))

    # convert query dict to text with table reference after supplying headers
    table_data = {
        "id":
        "1-1000181-1",
        "header": [
            "State/territory", "Text/background colour", "Format",
            "Current slogan", "Current series", "Notes"
        ],
        "types": [],
        "rows": []
    }
    t = Table(table_data["id"], table_data["header"], table_data["types"],
              table_data["rows"])
    print(t.query_str(test_str))
示例#5
0
def toQueryStr(file_name, table_arr, type=0, test_batch_size=1000):
    path = os.path.join(DATA_DIR, '{}.jsonl'.format(file_name))
    print(path)
    with open(path, 'r') as pf:
        data = pf.readlines()
    
    idxs = np.arange(len(data))
    data = np.array(data, dtype=np.object)

    np.random.seed(0)   # set random seed so that random things are reproducible
    np.random.shuffle(idxs)
    data = data[idxs]
    batched_data = chunked(data, test_batch_size)

    print("start processing")
    examples = []
    for batch_idx, batch_data in enumerate(batched_data):
        if len(batch_data) < test_batch_size:
            break # the last batch is smaller than the others, exclude.
        for d_idx, d in enumerate(batch_data): 
            line = json.loads(str(d), encoding='utf-8')
            doc_token = line['question']
            code_arr = line['sql']
            query = Query(code_arr['sel'], code_arr['agg'], code_arr['conds'])

            id = line['table_id']
            table = Table("table_id", "header", "types", "rows")
            code_str = ''
            for table in table_arr:
                if table.table_id == id:
                    table = table
                    code_str = table.query_str(query)
                    break
                else:
                    continue
            isNegative = np.random.randint(2)
            if isNegative == 0:
                random_line_num = np.random.randint(len(data))
                line = json.loads(str(data[random_line_num]), encoding='utf-8')
                doc_token = line['question']
                code_token = code_str
            else:
                code_token = code_str
            example = (str(isNegative), "nothing", "nothing", doc_token, code_token)
            example = '<CODESPLIT>'.join(example)
            examples.append(example)
    data_path = os.path.join(DATA_DIR, 'train_valid/wiki_sql')
    if not os.path.exists(data_path):
        os.makedirs(data_path)
    
    output_file_name = "1.txt"
    if type == 0:
        output_file_name = 'train.txt'
    else:
        output_file_name = 'valid.txt'
    file_path = os.path.join(data_path, output_file_name)
    print(file_path)
    with open(file_path, 'w', encoding='utf-8') as f:
        f.writelines('\n'.join(examples))
示例#6
0
def test_get_column_dependecies(mocker):
    schema_mock = mocker.patch('lib.schema_parser.Schema')
    schema_mock.return_value.parse_create_table.return_value = OrderedDict([
        (1, Column('bla', True, [], 0.5)),
        (2, Column('choose_from_list a.b.c', True, [], 0.5)),
        (3, Column('bleh', True, [], 0.5))
    ])

    table = Table(schema_path='foobar.sql', scaler=0.42)
    deps = table.get_column_dependencies()
    assert deps == set((('a.b', 'c'), ))
示例#7
0
    def infoNetwork(stats, hosts, cluster, **ignore):
        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "Network Information"
        column_names = ('node', 'node_id', 'fqdn', 'ip', ('client_connections',
                                                          'Client Conns'),
                        'current-time', ('heartbeat_received_self', 'HB Self'),
                        ('heartbeat_received_foreign', 'HB Foreign'))

        principal = cluster.getExpectedPrincipal()

        t = Table(title, column_names)

        t.addCellAlert('node_id',
                       lambda data: data['real_node_id'] == principal,
                       color=terminal.fg_green)

        t.addCellAlert('node',
                       lambda data: data['real_node_id'] == principal,
                       color=terminal.fg_green)

        for node_key, n_stats in stats.iteritems():
            if isinstance(n_stats, Exception):
                n_stats = {}
            node = cluster.getNode(node_key)[0]
            row = n_stats
            row['node'] = prefixes[node_key]
            row['real_node_id'] = node.node_id
            row['node_id'] = node.node_id if node.node_id != principal else "*%s" % (
                node.node_id)
            row['fqdn'] = hosts[node_key].sockName(use_fqdn=True)
            row['ip'] = hosts[node_key].sockName(use_fqdn=False)
            t.insertRow(row)
        print t
示例#8
0
        def __init__(self):
            self.tables = {
                'a': Table(schema_path='bla/a.sql', scaler=1),
                'b': Table(schema_path='bleh/b.sql', scaler=10),
                'c': Table(schema_path='bleh/c.sql', scaler=0.1)
            }

            self.graph = {
                'a': ['c', 'b'],
                'b': [],
                'c': ['b'],
            }

            self.entrypoint = 'a'
示例#9
0
    def showConfig(title,
                   service_configs,
                   cluster,
                   like=None,
                   diff=None,
                   **ignore):
        prefixes = cluster.getPrefixes()
        column_names = set()

        if diff and service_configs:
            config_sets = (set(service_configs[d].iteritems())
                           for d in service_configs if service_configs[d])
            union = set.union(*config_sets)
            # Regenerating generator expression for config_sets.
            config_sets = (set(service_configs[d].iteritems())
                           for d in service_configs if service_configs[d])
            intersection = set.intersection(*config_sets)
            column_names = dict(union - intersection).keys()
        else:
            for config in service_configs.itervalues():
                if isinstance(config, Exception):
                    continue
                column_names.update(config.keys())

        column_names = sorted(column_names)
        if like:
            likes = CliView.compileLikes(like)

            column_names = filter(likes.search, column_names)

        if len(column_names) == 0:
            return ''

        column_names.insert(0, "NODE")

        t = Table(title,
                  column_names,
                  title_format=TitleFormats.noChange,
                  style=Styles.VERTICAL)

        row = None
        for node_id, row in service_configs.iteritems():
            if isinstance(row, Exception):
                row = {}

            row['NODE'] = prefixes[node_id]
            t.insertRow(row)

        print t
示例#10
0
    def test_addRowAndCell(self):
        """
		Tests adding one Row with one Cell."""
        from lib.table import Table, Row, Cell
        cell = Cell()
        row = Row()
        table = Table()
        table.test = "addRowAndCellTest"
        cell.text = "Test"
        row.addCell(cell)
        table.addRow(
            row)  #NOTE: BUG found: does something to trip up tableHasRows.
        textFoundInTableCell = table.rows[0].cells[0].text
        self.assertEqual(textFoundInTableCell, "Test")
        dprint("Table address:", table)
示例#11
0
 def get_query_from_json(self, json_line):
     """Returns a Query object for the json input and returns the table object as well"""
     q = Query.from_dict(json_line["sql"])
     t_id = json_line["table_id"]
     table = self.table_map[t_id]
     t = Table("", table["header"], table["types"], table["rows"])
     return t, q
示例#12
0
 def basicTestTable(self):
     from lib.table import Table, Row, Cell
     return Table(rows=[\
     Row(cells=[Cell("1", isHeader=True), Cell("2", isHeader=True)]),\
     Row(cells=[Cell("A1"), Cell("A2")]),\
     Row(cells=[Cell("B1"), Cell("B2")]),\
     ])
示例#13
0
def read_tableDic(file_name, test_batch_size=1000):
    path = os.path.join(DATA_DIR, '{}.jsonl'.format(file_name))
    print(path)
    with open(path, 'r') as pf:
        data = pf.readlines()

    idxs = np.arange(len(data))
    data = np.array(data, dtype=np.object)

    np.random.seed(0)  # set random seed so that random things are reproducible
    np.random.shuffle(idxs)
    data = data[idxs]
    batched_data = chunked(data, test_batch_size)

    print("start processing")
    table_dic = dict()
    for batch_idx, batch_data in enumerate(batched_data):
        if len(batch_data) < test_batch_size:
            break  # the last batch is smaller than the others, exclude.
        for d_idx, d in enumerate(batch_data):
            line = json.loads(str(d), encoding='utf-8')
            id = line['id']
            header = line['header']
            types = line['types']
            rows = line['rows']
            table = Table(id, header, types, rows)
            table_dic[id] = table
    return table_dic
示例#14
0
    def showConfig(title, service_configs, cluster, like=None, diff=None, **ignore):
        prefixes = cluster.getPrefixes()
        column_names = set()

        if diff and service_configs:
                config_sets = (set(service_configs[d].iteritems())
                               for d in service_configs if service_configs[d])
                union = set.union(*config_sets)
                # Regenerating generator expression for config_sets.
                config_sets = (set(service_configs[d].iteritems())
                               for d in service_configs if service_configs[d])
                intersection = set.intersection(*config_sets)
                column_names = dict(union - intersection).keys()
        else:
            for config in service_configs.itervalues():
                if isinstance(config, Exception):
                    continue
                column_names.update(config.keys())

        column_names = sorted(column_names)
        if like:
            likes = CliView.compileLikes(like)

            column_names = filter(likes.search, column_names)

        if len(column_names) == 0:
            return ''

        column_names.insert(0, "NODE")

        t = Table(title
                  , column_names
                  , title_format=TitleFormats.noChange
                  , style=Styles.VERTICAL)

        row = None
        for node_id, row in service_configs.iteritems():
            if isinstance(row, Exception):
                row = {}

            row['NODE'] = prefixes[node_id]
            t.insertRow(row)

        print t
示例#15
0
    def infoSIndex(stats, cluster, **ignore):
        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "Secondary Index Information"
        column_names = ('node', ('indexname', 'Index Name'),
                        ('ns', 'Namespace'), 'set', 'bins', 'num_bins',
                        ('type', 'Bin Type'), 'state', 'sync_state')

        t = Table(title, column_names, group_by=1)

        for node_key, n_stats in stats.iteritems():
            node = prefixes[node_key]
            for index_stats in n_stats:
                if isinstance(index_stats, Exception):
                    row = {}
                else:
                    row = index_stats

                row['node'] = node
                t.insertRow(row)

        print t
示例#16
0
    def render(s):
        (w, l, h) = s.getDim()
        seat_l = 53

        base = Cube([2, l, 2])
        pole = Cube([2, 2, h])
        bar = Cube([87, 1, 1], color=(193, 209, 193))
        seat = Table([12, seat_l, 26])
        s.place(base, rel_to='BL')
        s.place(base, rel_to='BR')
        s.place(pole, rel_to='BL', offset=[0, 0.8 * l, 0])
        s.place(pole, rel_to='BR', offset=[0, 0.8 * l, 0])
        s.place(bar, rel_to='CX', offset=[0, 0.8 * l, h])
        s.place(seat, rel_to='CX', offset=[0, (l / 2.0) - seat_l, 0])
示例#17
0
    def showLatency(latency, cluster, like=None, **ignore):
        prefixes = cluster.getPrefixes()

        if like:
            likes = CliView.compileLikes(like)

            histograms = set(filter(likes.search, latency.keys()))
        else:
            histograms = set(latency.keys())

        for hist_name, node_data in sorted(latency.iteritems()):
            if hist_name not in histograms:
                continue

            title = "%s Latency" % (hist_name)
            all_columns = set()

            for _, (columns, _) in node_data.iteritems():
                for column in columns:
                    if column[0] == '>':
                        column = int(column[1:-2])
                        all_columns.add(column)

            all_columns = [">%sms" % (c) for c in sorted(all_columns)]
            all_columns.insert(0, 'ops/sec')
            all_columns.insert(0, 'Time Span')
            all_columns.insert(0, 'node')

            t = Table(title, all_columns)

            for node_id, (columns, data) in node_data.iteritems():
                node_data = dict(itertools.izip(columns, data))
                node_data['node'] = prefixes[node_id]
                t.insertRow(node_data)

            print t
示例#18
0
    def showDistribution(title
                         , histogram
                         , unit
                         , hist
                         , cluster
                         , like=None
                         , **ignore):
        prefixes = cluster.getPrefixes()

        likes = CliView.compileLikes(like)

        columns = ["%s%%"%(n) for n in xrange(10,110, 10)]
        percentages = columns[:]
        columns.insert(0, 'node')
        description = "Percentage of records having %s less than or "%(hist) + \
                      "equal to value measured in %s"%(unit)

        namespaces = set(filter(likes.search, histogram.keys()))

        for namespace, node_data in histogram.iteritems():
            if namespace not in namespaces:
                continue

            t = Table("%s - %s in %s"%(namespace, title, unit)
                      , columns
                      , description=description)
            for node_id, data in node_data.iteritems():
                percentiles = data['percentiles']
                row = {}
                row['node'] = prefixes[node_id]
                for percent in percentages:
                    row[percent] = percentiles.pop(0)

                t.insertRow(row)

            print t
示例#19
0
    def showDistribution(title,
                         histogram,
                         unit,
                         hist,
                         cluster,
                         like=None,
                         **ignore):
        prefixes = cluster.getPrefixes()

        likes = CliView.compileLikes(like)

        columns = ["%s%%" % (n) for n in xrange(10, 110, 10)]
        percentages = columns[:]
        columns.insert(0, 'node')
        description = "Percentage of records having %s less than or "%(hist) + \
                      "equal to value measured in %s"%(unit)

        namespaces = set(filter(likes.search, histogram.keys()))

        for namespace, node_data in histogram.iteritems():
            if namespace not in namespaces:
                continue

            t = Table("%s - %s in %s" % (namespace, title, unit),
                      columns,
                      description=description)
            for node_id, data in node_data.iteritems():
                percentiles = data['percentiles']
                row = {}
                row['node'] = prefixes[node_id]
                for percent in percentages:
                    row[percent] = percentiles.pop(0)

                t.insertRow(row)

            print t
示例#20
0
    def showLatency(latency, cluster, like=None, **ignore):
        prefixes = cluster.getPrefixes()

        if like:
            likes = CliView.compileLikes(like)

            histograms = set(filter(likes.search, latency.keys()))
        else:
            histograms = set(latency.keys())

        for hist_name, node_data in sorted(latency.iteritems()):
            if hist_name not in histograms:
                continue

            title = "%s Latency"%(hist_name)
            all_columns = set()

            for _, (columns, _) in node_data.iteritems():
                for column in columns:
                    if column[0] == '>':
                        column = int(column[1:-2])
                        all_columns.add(column)

            all_columns = [">%sms"%(c) for c in sorted(all_columns)]
            all_columns.insert(0, 'ops/sec')
            all_columns.insert(0, 'Time Span')
            all_columns.insert(0, 'node')

            t = Table(title, all_columns)

            for node_id, (columns, data) in node_data.iteritems():
                node_data = dict(itertools.izip(columns, data))
                node_data['node'] = prefixes[node_id]
                t.insertRow(node_data)

            print t
示例#21
0
    def infoNetwork(stats, hosts, cluster, **ignore):
        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "Network Information"
        column_names = ('node'
                        , 'node_id'
                        , 'fqdn'
                        , 'ip'
                        , ('client_connections', 'Client Conns')
                        , 'current-time'
                        , ('heartbeat_received_self', 'HB Self')
                        , ('heartbeat_received_foreign', 'HB Foreign'))

        principal = cluster.getExpectedPrincipal()

        t = Table(title, column_names)

        t.addCellAlert('node_id'
                       ,lambda data: data['real_node_id'] == principal
                       , color=terminal.fg_green)

        t.addCellAlert('node'
                       ,lambda data: data['real_node_id'] == principal
                       , color=terminal.fg_green)

        for node_key, n_stats in stats.iteritems():
            if isinstance(n_stats, Exception):
                n_stats = {}
            node = cluster.getNode(node_key)[0]
            row = n_stats
            row['node'] = prefixes[node_key]
            row['real_node_id'] = node.node_id
            row['node_id'] = node.node_id if node.node_id != principal else "*%s"%(node.node_id)
            row['fqdn'] = hosts[node_key].sockName(use_fqdn = True)
            row['ip'] = hosts[node_key].sockName(use_fqdn = False)
            t.insertRow(row)
        print t
示例#22
0
from lib.table import Table

TABLES = {
    'public.a': Table(schema_path='examples/dependencies/a.sql', scaler=1),
    'public.b': Table(schema_path='examples/dependencies/b.sql', scaler=10)
}

GRAPH = {'public.a': ['public.b'], 'public.b': []}

ENTRYPOINT = 'public.a'
示例#23
0
    def infoXDR(stats, builds, xdr_enable, cluster, **ignore):
        if not max(xdr_enable.itervalues()):
            return

        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "XDR Information"
        column_names = ('node', 'build', ('_bytes-shipped', 'Data Shipped'),
                        '_free-dlog-pct', ('_lag-secs', 'Lag (sec)'),
                        '_req-outstanding', '_req-relog', '_req-shipped',
                        'cur_throughput', ('latency_avg_ship',
                                           'Avg Latency (ms)'), '_xdr-uptime')

        t = Table(title, column_names)

        t.addDataSource('_xdr-uptime',
                        Extractors.timeExtractor(('xdr-uptime', 'xdr_uptime')))

        t.addDataSource(
            '_bytes-shipped',
            Extractors.byteExtractor(
                ('esmt-bytes-shipped', 'esmt_bytes_shipped')))

        t.addDataSource('_lag-secs',
                        Extractors.timeExtractor('timediff_lastship_cur_secs'))

        t.addDataSource('_req-outstanding',
                        Extractors.sifExtractor('stat_recs_outstanding'))

        t.addDataSource('_req-relog',
                        Extractors.sifExtractor('stat_recs_relogged'))

        t.addDataSource('_req-shipped',
                        Extractors.sifExtractor('stat_recs_shipped'))

        # Highligh red if lag is more than 30 seconds
        t.addCellAlert(
            '_lag-secs',
            lambda data: int(data['timediff_lastship_cur_secs']) >= 300)

        t.addCellAlert('node',
                       lambda data: data['real_node_id'] == principal,
                       color=terminal.fg_green)

        row = None
        for node_key, row in stats.iteritems():
            if isinstance(row, Exception):
                row = {}

            node = cluster.getNode(node_key)[0]
            if xdr_enable[node_key]:
                if row:
                    row['build'] = builds[node_key]
                    if 'free_dlog_pct' in row:
                        row['_free-dlog-pct'] = row['free_dlog_pct'][:-1]
                    else:
                        row['_free-dlog-pct'] = row['free-dlog-pct'][:-1]
                else:
                    row = {}
                    row['node-id'] = node.node_id
                row['real_node_id'] = node.node_id
            else:
                continue

            row['node'] = prefixes[node_key]

            t.insertRow(row)
        print t
示例#24
0
 def test_tableHasHeaders(self):
     from lib.table import Table
     table = self.basicTestTable
     self.assertTrue(table.hasHeaders)
     self.assertFalse(Table().hasHeaders)
示例#25
0
    def render(s):
        s.place(Table([48, 48, 27]), rel_to='BL', offset=[32, 32, 0])

        return None
from lib.dbengine import DBEngine
from lib.table import Table
#from lib.query import Query
import json
import jsonlines

#create DataBase Engine
DB=DBEngine("data/train.db")

#load table
tables=[json.loads(x) for x in open("data/train.tables.jsonl")]
print(len(tables))
#Create Table 
table=Table(tables[0]['id'], tables[0]['header'], tables[0]['types'], tables[0]['rows'])
print(table)

#sample
print(table.generate_queries(DB.conn,n=5, max_tries=5, lower=True))
queries = table.generate_queries(DB.conn,n=5, max_tries=5, lower=True)
print(str(queries[0][0]))
def augment_data():
    new_train_path = "data/augmented_train.jsonl"
    error_cnt = 0
    with open(new_train_path,'w') as f:
        for i in range(len(tables)):
            table_new = Table(tables[i]['id'], tables[i]['header'], tables[i]['types'], tables[i]['rows'])
            try:
                queries = table_new.generate_queries(DB.conn,n=10, max_tries=100, lower=True)
                for query in queries:
                    #print(query[1])
                    info = {}
示例#27
0
#-*- coding:utf-8 -*-
from params.utrgParams import UtrgParams
from params.utrmParams import UtrmProParams
from params.utrmParams import UtrmPackParams
from lib.sqlUtil import SqlUtil
from lib.pro import Pro
from lib.pack import Pack
from lib.table import Table
from lib.utrRequest import UtrRequest
from time import sleep

SqlUtil.init()
tablePro = Table('test-utrm.xlsx')
tableProG = Table('test-utrg.xlsx')
pro = Pro()
pro.param = tablePro.param

utrmProParams = UtrmProParams(tablePro)
utrgParams = UtrgParams(tableProG)

tablePack = Table('test-utrms.xlsx')
pack = Pack()
utrmPackParams = UtrmPackParams(tablePack)

utrR = UtrRequest(pro=pro)
utrRpack = UtrRequest(pack=pack)
userGroup = 'test01,test02' #报名帐号,多人中间用英文逗号隔开

def test():
	utrR.login(utrgParams.utrInterFaceAddress['login'],utrgParams.loginParam)
	utrR.pro.proId = utrR.utrRequest(utrgParams.newPro)
示例#28
0
from lib.table import Table

TABLES = {
    'public.a': Table(schema_path='examples/simple/a.sql', scaler=1),
    'public.b': Table(schema_path='examples/simple/b.sql', scaler=10)
}

GRAPH = {
    'public.a': [
        'public.b'
    ],
    'public.b': []
}

ENTRYPOINT = 'public.a'
示例#29
0
    def infoNamespace(stats, cluster, **ignore):
        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "Namespace Information"
        column_names = ('node', 'namespace', ('available_pct', 'Avail%'),
                        ('evicted-objects', 'Evictions'), ('_master-objects',
                                                           'Master Objects'),
                        ('_prole-objects', 'Replica Objects'), 'repl-factor',
                        'stop-writes', ('_used-bytes-disk', 'Disk Used'),
                        ('_used-disk-pct',
                         'Disk Used%'), ('high-water-disk-pct', 'HWM Disk%'),
                        ('_used-bytes-memory', 'Mem Used'), ('_used-mem-pct',
                                                             'Mem Used%'),
                        ('high-water-memory-pct',
                         'HWM Mem%'), ('stop-writes-pct', 'Stop Writes%'))

        t = Table(title, column_names, group_by=1)
        t.addDataSource('_used-bytes-disk',
                        Extractors.byteExtractor('used-bytes-disk'))
        t.addDataSource('_used-bytes-memory',
                        Extractors.byteExtractor('used-bytes-memory'))

        t.addDataSource('_master-objects',
                        Extractors.sifExtractor('master-objects'))

        t.addDataSource('_prole-objects',
                        Extractors.sifExtractor('prole-objects'))

        t.addDataSource('_used-disk-pct',
                        lambda data: 100 - int(data['free-pct-disk']))

        t.addDataSource('_used-mem-pct',
                        lambda data: 100 - int(data['free-pct-memory']))

        t.addCellAlert('available_pct',
                       lambda data: int(data['available_pct']) <= 10)

        t.addCellAlert('stop-writes',
                       lambda data: data['stop-writes'] != 'false')

        t.addCellAlert(
            '_used-disk-pct', lambda data: int(data['_used-disk-pct']) >= int(
                data['high-water-disk-pct']))

        t.addCellAlert(
            '_used-mem-pct', lambda data: (100 - int(data['free-pct-memory']))
            >= int(data['high-water-memory-pct']))

        t.addCellAlert(
            '_used-disk-pct', lambda data: (100 - int(data['free-pct-disk']))
            >= int(data['high-water-disk-pct']))

        t.addCellAlert('node',
                       lambda data: data['real_node_id'] == principal,
                       color=terminal.fg_green)

        for node_key, n_stats in stats.iteritems():
            node = cluster.getNode(node_key)[0]
            if isinstance(n_stats, Exception):
                t.insertRow({
                    'real_node_id': node.node_id,
                    'node': prefixes[node_key]
                })
                continue

            for ns, ns_stats in n_stats.iteritems():
                if isinstance(ns_stats, Exception):
                    row = {}
                else:
                    row = ns_stats

                row['namespace'] = ns
                row['real_node_id'] = node.node_id
                row['node'] = prefixes[node_key]
                t.insertRow(row)
        print t
示例#30
0
    def infoXDR(stats, builds, xdr_enable, cluster, **ignore):
        if not max(xdr_enable.itervalues()):
            return

        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "XDR Information"
        column_names = ('node'
                        ,'build'
                        ,('_bytes-shipped', 'Data Shipped')
                        ,'_free-dlog-pct'
                        ,('_lag-secs', 'Lag (sec)')
                        ,'_req-outstanding'
                        ,'_req-relog'
                        ,'_req-shipped'
                        ,'cur_throughput'
                        ,('latency_avg_ship', 'Avg Latency (ms)')
                        ,'_xdr-uptime')

        t = Table(title, column_names)

        t.addDataSource('_xdr-uptime', Extractors.timeExtractor('xdr-uptime'))

        t.addDataSource('_bytes-shipped',
                        Extractors.byteExtractor('esmt-bytes-shipped'))

        t.addDataSource('_lag-secs',
                        Extractors.timeExtractor('timediff_lastship_cur_secs'))

        t.addDataSource('_req-outstanding',
                        Extractors.sifExtractor('stat_recs_outstanding'))

        t.addDataSource('_req-relog',
                        Extractors.sifExtractor('stat_recs_relogged'))

        t.addDataSource('_req-shipped',
                        Extractors.sifExtractor('stat_recs_shipped'))

        # Highligh red if lag is more than 30 seconds
        t.addCellAlert('_lag-secs'
                       , lambda data: int(data['timediff_lastship_cur_secs']) >= 300)

        t.addCellAlert('node'
                       ,lambda data: data['real_node_id'] == principal
                       , color=terminal.fg_green)

        row = None
        for node_key, row in stats.iteritems():
            if isinstance(row, Exception):
                row = {}

            node = cluster.getNode(node_key)[0]
            if xdr_enable[node_key]:
                if row:
                    row['build'] = builds[node_key]
                    row['_free-dlog-pct'] = row['free-dlog-pct'][:-1]
                else:
                    row = {}
                    row['node-id'] = node.node_id
                row['real_node_id'] = node.node_id
            else:
                continue

            row['node'] = prefixes[node_key]

            t.insertRow(row)
        print t
示例#31
0
    def infoService(stats, builds, visibilities, cluster, **ignore):
        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "Service Information"
        column_names = ('node', 'build', 'cluster_size', 'cluster_visibility',
                        '_cluster_integrity', ('free-pct-disk', 'Free Disk%'),
                        ('free-pct-memory',
                         'Free Mem%'), ('_migrates', 'Migrates (tx,rx,q)'),
                        ('_paxos_principal',
                         'Principal'), '_objects', '_uptime')

        t = Table(title, column_names)
        t.addDataSource(
            '_migrates', lambda data: "(%s,%s,%s)" %
            (row.get('migrate_tx_partitions_remaining', False) or row[
                'migrate_progress_send'],
             row.get('migrate_rx_partitions_remaining', False) or row[
                 'migrate_progress_recv'],
             int(row.get('migrate_progress_send', 0)) + int(
                 row.get('migrate_progress_recv', 0))))
        t.addDataSource('_objects', Extractors.sifExtractor('objects'))
        t.addDataSource(
            '_cluster_integrity', lambda data: True
            if row['cluster_integrity'] == 'true' else False)
        t.addDataSource('_uptime', Extractors.timeExtractor('uptime'))

        t.addCellAlert('cluster_visibility',
                       lambda data: data['cluster_visibility'] is not True)

        t.addCellAlert('_cluster_integrity',
                       lambda data: data['cluster_integrity'] != 'true')

        t.addCellAlert('free-pct-disk',
                       lambda data: int(data['free-pct-disk']) < 40)

        t.addCellAlert('free-pct-memory',
                       lambda data: int(data['free-pct-memory']) < 40)

        t.addCellAlert('node',
                       lambda data: data['real_node_id'] == principal,
                       color=terminal.fg_green)

        for node_key, n_stats in stats.iteritems():
            if isinstance(n_stats, Exception):
                n_stats = {}

            node = cluster.getNode(node_key)[0]
            row = n_stats
            row['real_node_id'] = node.node_id
            row['node'] = prefixes[node_key]
            try:
                paxos_node = cluster.getNode(row['paxos_principal'])[0]
                row['_paxos_principal'] = prefixes[paxos_node.key]
            except KeyError:
                # The principal is a node we currently do not know about
                # So return the principal ID
                try:
                    row['_paxos_principal'] = row['paxos_principal']
                except KeyError:
                    pass

            build = builds[node_key]
            if not isinstance(build, Exception):
                row['build'] = build

            if node_key in visibilities:
                row['cluster_visibility'] = visibilities[node_key]

            t.insertRow(row)

        print t
示例#32
0
    def infoNamespace(stats, cluster, **ignore):
        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "Namespace Information"
        column_names = ('node'
                        ,'namespace'
                        ,('available_pct', 'Avail%')
                        ,('evicted-objects', 'Evictions')
                        ,'_objects'
                        ,'repl-factor'
                        ,'stop-writes'
                        ,('_used-bytes-disk', 'Disk Used')
                        ,('_used-disk-pct', 'Disk Used%')
                        ,('high-water-disk-pct', 'HWM Disk%')
                        ,('_used-bytes-memory', 'Mem Used')
                        ,('_used-mem-pct', 'Mem Used%')
                        ,('high-water-memory-pct', 'HWM Mem%')
                        ,('stop-writes-pct', 'Stop Writes%'))

        t = Table(title, column_names, group_by=1)
        t.addDataSource('_used-bytes-disk'
                        ,Extractors.byteExtractor('used-bytes-disk'))
        t.addDataSource('_used-bytes-memory'
                        ,Extractors.byteExtractor(
                            'used-bytes-memory'))
        t.addDataSource('_objects'
                        ,Extractors.sifExtractor('objects'))

        t.addDataSource('_used-disk-pct'
                        , lambda data: 100 - int(data['free-pct-disk']))

        t.addDataSource('_used-mem-pct'
                        , lambda data: 100 - int(data['free-pct-memory']))

        t.addCellAlert('available_pct'
                       , lambda data: int(data['available_pct']) <= 10)

        t.addCellAlert('stop-writes'
                       , lambda data: data['stop-writes'] != 'false')

        t.addCellAlert('_used-disk-pct'
                       , lambda data: int(data['_used-disk-pct']) >= int(data['high-water-disk-pct']))

        t.addCellAlert('_used-mem-pct'
                       , lambda data: (100 - int(data['free-pct-memory'])) >= int(data['high-water-memory-pct']))

        t.addCellAlert('_used-disk-pct'
                       , lambda data: (100 - int(data['free-pct-disk'])) >= int(data['high-water-disk-pct']))

        t.addCellAlert('node'
                       ,lambda data: data['real_node_id'] == principal
                       , color=terminal.fg_green)

        for node_key, n_stats in stats.iteritems():
            node = cluster.getNode(node_key)[0]
            if isinstance(n_stats, Exception):
                t.insertRow({'real_node_id':node.node_id
                             , 'node':prefixes[node_key]})
                continue

            for ns, ns_stats in n_stats.iteritems():
                if isinstance(ns_stats, Exception):
                    row = {}
                else:
                    row = ns_stats

                row['namespace'] = ns
                row['real_node_id'] = node.node_id
                row['node'] = prefixes[node_key]
                t.insertRow(row)
        print t
示例#33
0
    def infoService(stats, builds, visibilities, cluster, **ignore):
        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "Service Information"
        column_names = ('node'
                        , 'build'
                        , 'cluster_size'
                        , 'cluster_visibility'
                        , '_cluster_integrity'
                        , ('free-pct-disk', 'Free Disk%')
                        , ('free-pct-memory', 'Free Mem%')
                        , '_migrates'
                        , ('_paxos_principal', 'Principal')
                        , '_objects'
                        , '_uptime')

        t = Table(title, column_names)
        t.addDataSource('_migrates'
                        ,lambda data:
                        "(%s,%s)"%(row['migrate_progress_send']
                                   ,row['migrate_progress_recv']))
        t.addDataSource('_objects'
                        ,Extractors.sifExtractor('objects'))
        t.addDataSource('_cluster_integrity'
                        , lambda data:
                        True if row['cluster_integrity'] == 'true' else False)
        t.addDataSource('_uptime', Extractors.timeExtractor('uptime'))

        t.addCellAlert('cluster_visibility'
                       , lambda data: data['cluster_visibility'] is not True)

        t.addCellAlert('_cluster_integrity'
                       ,lambda data: data['cluster_integrity'] != 'true')

        t.addCellAlert('free-pct-disk'
                       ,lambda data: int(data['free-pct-disk']) < 40)

        t.addCellAlert('free-pct-memory'
                       ,lambda data: int(data['free-pct-memory']) < 40)

        t.addCellAlert('node'
                       ,lambda data: data['real_node_id'] == principal
                       , color=terminal.fg_green)

        for node_key, n_stats in stats.iteritems():
            if isinstance(n_stats, Exception):
                n_stats = {}

            node = cluster.getNode(node_key)[0]
            row = n_stats
            row['real_node_id'] = node.node_id
            row['node'] = prefixes[node_key]
            try:
                paxos_node = cluster.getNode(row['paxos_principal'])[0]
                row['_paxos_principal'] = prefixes[paxos_node.key]
            except KeyError:
                # The principal is a node we currently do not know about
                # So return the principal ID
                try:
                    row['_paxos_principal'] = row['paxos_principal']
                except KeyError:
                    pass

            build = builds[node_key]
            if not isinstance(build, Exception):
                row['build'] = build

            if node_key in visibilities:
                row['cluster_visibility'] = visibilities[node_key]

            t.insertRow(row)

        print t