Beispiel #1
0
    def infoService(stats, builds, visibilities, cluster, **ignore):
        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "Service Information"
        column_names = ('node'
                        , 'build'
                        , 'cluster_size'
                        , 'cluster_visibility'
                        , '_cluster_integrity'
                        , ('free-pct-disk', 'Free Disk%')
                        , ('free-pct-memory', 'Free Mem%')
                        , '_migrates'
                        , ('_paxos_principal', 'Principal')
                        , '_objects'
                        , '_uptime')

        t = Table(title, column_names)
        t.addDataSource('_migrates'
                        ,lambda data:
                        "(%s,%s)"%(row['migrate_progress_send']
                                   ,row['migrate_progress_recv']))
        t.addDataSource('_objects'
                        ,Extractors.sifExtractor('objects'))
        t.addDataSource('_cluster_integrity'
                        , lambda data:
                        True if row['cluster_integrity'] == 'true' else False)
        t.addDataSource('_uptime', Extractors.timeExtractor('uptime'))

        t.addCellAlert('cluster_visibility'
                       , lambda data: data['cluster_visibility'] is not True)

        t.addCellAlert('_cluster_integrity'
                       ,lambda data: data['cluster_integrity'] != 'true')

        t.addCellAlert('free-pct-disk'
                       ,lambda data: int(data['free-pct-disk']) < 40)

        t.addCellAlert('free-pct-memory'
                       ,lambda data: int(data['free-pct-memory']) < 40)

        t.addCellAlert('node'
                       ,lambda data: data['real_node_id'] == principal
                       , color=terminal.fg_green)

        for node_key, n_stats in stats.iteritems():
            if isinstance(n_stats, Exception):
                n_stats = {}

            node = cluster.getNode(node_key)[0]
            row = n_stats
            row['real_node_id'] = node.node_id
            row['node'] = prefixes[node_key]
            try:
                paxos_node = cluster.getNode(row['paxos_principal'])[0]
                row['_paxos_principal'] = prefixes[paxos_node.key]
            except KeyError:
                # The principal is a node we currently do not know about
                # So return the principal ID
                try:
                    row['_paxos_principal'] = row['paxos_principal']
                except KeyError:
                    pass

            build = builds[node_key]
            if not isinstance(build, Exception):
                row['build'] = build

            if node_key in visibilities:
                row['cluster_visibility'] = visibilities[node_key]

            t.insertRow(row)

        print t
Beispiel #2
0
    def infoXDR(stats, builds, xdr_enable, cluster, **ignore):
        if not max(xdr_enable.itervalues()):
            return

        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "XDR Information"
        column_names = ('node'
                        ,'build'
                        ,('_bytes-shipped', 'Data Shipped')
                        ,'_free-dlog-pct'
                        ,('_lag-secs', 'Lag (sec)')
                        ,'_req-outstanding'
                        ,'_req-relog'
                        ,'_req-shipped'
                        ,'cur_throughput'
                        ,('latency_avg_ship', 'Avg Latency (ms)')
                        ,'_xdr-uptime')

        t = Table(title, column_names)

        t.addDataSource('_xdr-uptime', Extractors.timeExtractor('xdr-uptime'))

        t.addDataSource('_bytes-shipped',
                        Extractors.byteExtractor('esmt-bytes-shipped'))

        t.addDataSource('_lag-secs',
                        Extractors.timeExtractor('timediff_lastship_cur_secs'))

        t.addDataSource('_req-outstanding',
                        Extractors.sifExtractor('stat_recs_outstanding'))

        t.addDataSource('_req-relog',
                        Extractors.sifExtractor('stat_recs_relogged'))

        t.addDataSource('_req-shipped',
                        Extractors.sifExtractor('stat_recs_shipped'))

        # Highligh red if lag is more than 30 seconds
        t.addCellAlert('_lag-secs'
                       , lambda data: int(data['timediff_lastship_cur_secs']) >= 300)

        t.addCellAlert('node'
                       ,lambda data: data['real_node_id'] == principal
                       , color=terminal.fg_green)

        row = None
        for node_key, row in stats.iteritems():
            if isinstance(row, Exception):
                row = {}

            node = cluster.getNode(node_key)[0]
            if xdr_enable[node_key]:
                if row:
                    row['build'] = builds[node_key]
                    row['_free-dlog-pct'] = row['free-dlog-pct'][:-1]
                else:
                    row = {}
                    row['node-id'] = node.node_id
                row['real_node_id'] = node.node_id
            else:
                continue

            row['node'] = prefixes[node_key]

            t.insertRow(row)
        print t
Beispiel #3
0
    def infoNamespace(stats, cluster, **ignore):
        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "Namespace Information"
        column_names = ('node'
                        ,'namespace'
                        ,('available_pct', 'Avail%')
                        ,('evicted-objects', 'Evictions')
                        ,'_objects'
                        ,'repl-factor'
                        ,'stop-writes'
                        ,('_used-bytes-disk', 'Disk Used')
                        ,('_used-disk-pct', 'Disk Used%')
                        ,('high-water-disk-pct', 'HWM Disk%')
                        ,('_used-bytes-memory', 'Mem Used')
                        ,('_used-mem-pct', 'Mem Used%')
                        ,('high-water-memory-pct', 'HWM Mem%')
                        ,('stop-writes-pct', 'Stop Writes%'))

        t = Table(title, column_names, group_by=1)
        t.addDataSource('_used-bytes-disk'
                        ,Extractors.byteExtractor('used-bytes-disk'))
        t.addDataSource('_used-bytes-memory'
                        ,Extractors.byteExtractor(
                            'used-bytes-memory'))
        t.addDataSource('_objects'
                        ,Extractors.sifExtractor('objects'))

        t.addDataSource('_used-disk-pct'
                        , lambda data: 100 - int(data['free-pct-disk']))

        t.addDataSource('_used-mem-pct'
                        , lambda data: 100 - int(data['free-pct-memory']))

        t.addCellAlert('available_pct'
                       , lambda data: int(data['available_pct']) <= 10)

        t.addCellAlert('stop-writes'
                       , lambda data: data['stop-writes'] != 'false')

        t.addCellAlert('_used-disk-pct'
                       , lambda data: int(data['_used-disk-pct']) >= int(data['high-water-disk-pct']))

        t.addCellAlert('_used-mem-pct'
                       , lambda data: (100 - int(data['free-pct-memory'])) >= int(data['high-water-memory-pct']))

        t.addCellAlert('_used-disk-pct'
                       , lambda data: (100 - int(data['free-pct-disk'])) >= int(data['high-water-disk-pct']))

        t.addCellAlert('node'
                       ,lambda data: data['real_node_id'] == principal
                       , color=terminal.fg_green)

        for node_key, n_stats in stats.iteritems():
            node = cluster.getNode(node_key)[0]
            if isinstance(n_stats, Exception):
                t.insertRow({'real_node_id':node.node_id
                             , 'node':prefixes[node_key]})
                continue

            for ns, ns_stats in n_stats.iteritems():
                if isinstance(ns_stats, Exception):
                    row = {}
                else:
                    row = ns_stats

                row['namespace'] = ns
                row['real_node_id'] = node.node_id
                row['node'] = prefixes[node_key]
                t.insertRow(row)
        print t
Beispiel #4
0
    def infoService(stats, builds, visibilities, cluster, **ignore):
        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "Service Information"
        column_names = ('node', 'build', 'cluster_size', 'cluster_visibility',
                        '_cluster_integrity', ('free-pct-disk', 'Free Disk%'),
                        ('free-pct-memory',
                         'Free Mem%'), ('_migrates', 'Migrates (tx,rx,q)'),
                        ('_paxos_principal',
                         'Principal'), '_objects', '_uptime')

        t = Table(title, column_names)
        t.addDataSource(
            '_migrates', lambda data: "(%s,%s,%s)" %
            (row.get('migrate_tx_partitions_remaining', False) or row[
                'migrate_progress_send'],
             row.get('migrate_rx_partitions_remaining', False) or row[
                 'migrate_progress_recv'],
             int(row.get('migrate_progress_send', 0)) + int(
                 row.get('migrate_progress_recv', 0))))
        t.addDataSource('_objects', Extractors.sifExtractor('objects'))
        t.addDataSource(
            '_cluster_integrity', lambda data: True
            if row['cluster_integrity'] == 'true' else False)
        t.addDataSource('_uptime', Extractors.timeExtractor('uptime'))

        t.addCellAlert('cluster_visibility',
                       lambda data: data['cluster_visibility'] is not True)

        t.addCellAlert('_cluster_integrity',
                       lambda data: data['cluster_integrity'] != 'true')

        t.addCellAlert('free-pct-disk',
                       lambda data: int(data['free-pct-disk']) < 40)

        t.addCellAlert('free-pct-memory',
                       lambda data: int(data['free-pct-memory']) < 40)

        t.addCellAlert('node',
                       lambda data: data['real_node_id'] == principal,
                       color=terminal.fg_green)

        for node_key, n_stats in stats.iteritems():
            if isinstance(n_stats, Exception):
                n_stats = {}

            node = cluster.getNode(node_key)[0]
            row = n_stats
            row['real_node_id'] = node.node_id
            row['node'] = prefixes[node_key]
            try:
                paxos_node = cluster.getNode(row['paxos_principal'])[0]
                row['_paxos_principal'] = prefixes[paxos_node.key]
            except KeyError:
                # The principal is a node we currently do not know about
                # So return the principal ID
                try:
                    row['_paxos_principal'] = row['paxos_principal']
                except KeyError:
                    pass

            build = builds[node_key]
            if not isinstance(build, Exception):
                row['build'] = build

            if node_key in visibilities:
                row['cluster_visibility'] = visibilities[node_key]

            t.insertRow(row)

        print t
Beispiel #5
0
    def infoXDR(stats, builds, xdr_enable, cluster, **ignore):
        if not max(xdr_enable.itervalues()):
            return

        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "XDR Information"
        column_names = ('node', 'build', ('_bytes-shipped', 'Data Shipped'),
                        '_free-dlog-pct', ('_lag-secs', 'Lag (sec)'),
                        '_req-outstanding', '_req-relog', '_req-shipped',
                        'cur_throughput', ('latency_avg_ship',
                                           'Avg Latency (ms)'), '_xdr-uptime')

        t = Table(title, column_names)

        t.addDataSource('_xdr-uptime',
                        Extractors.timeExtractor(('xdr-uptime', 'xdr_uptime')))

        t.addDataSource(
            '_bytes-shipped',
            Extractors.byteExtractor(
                ('esmt-bytes-shipped', 'esmt_bytes_shipped')))

        t.addDataSource('_lag-secs',
                        Extractors.timeExtractor('timediff_lastship_cur_secs'))

        t.addDataSource('_req-outstanding',
                        Extractors.sifExtractor('stat_recs_outstanding'))

        t.addDataSource('_req-relog',
                        Extractors.sifExtractor('stat_recs_relogged'))

        t.addDataSource('_req-shipped',
                        Extractors.sifExtractor('stat_recs_shipped'))

        # Highligh red if lag is more than 30 seconds
        t.addCellAlert(
            '_lag-secs',
            lambda data: int(data['timediff_lastship_cur_secs']) >= 300)

        t.addCellAlert('node',
                       lambda data: data['real_node_id'] == principal,
                       color=terminal.fg_green)

        row = None
        for node_key, row in stats.iteritems():
            if isinstance(row, Exception):
                row = {}

            node = cluster.getNode(node_key)[0]
            if xdr_enable[node_key]:
                if row:
                    row['build'] = builds[node_key]
                    if 'free_dlog_pct' in row:
                        row['_free-dlog-pct'] = row['free_dlog_pct'][:-1]
                    else:
                        row['_free-dlog-pct'] = row['free-dlog-pct'][:-1]
                else:
                    row = {}
                    row['node-id'] = node.node_id
                row['real_node_id'] = node.node_id
            else:
                continue

            row['node'] = prefixes[node_key]

            t.insertRow(row)
        print t
Beispiel #6
0
    def infoNamespace(stats, cluster, **ignore):
        prefixes = cluster.getPrefixes()
        principal = cluster.getExpectedPrincipal()

        title = "Namespace Information"
        column_names = ('node', 'namespace', ('available_pct', 'Avail%'),
                        ('evicted-objects', 'Evictions'), ('_master-objects',
                                                           'Master Objects'),
                        ('_prole-objects', 'Replica Objects'), 'repl-factor',
                        'stop-writes', ('_used-bytes-disk', 'Disk Used'),
                        ('_used-disk-pct',
                         'Disk Used%'), ('high-water-disk-pct', 'HWM Disk%'),
                        ('_used-bytes-memory', 'Mem Used'), ('_used-mem-pct',
                                                             'Mem Used%'),
                        ('high-water-memory-pct',
                         'HWM Mem%'), ('stop-writes-pct', 'Stop Writes%'))

        t = Table(title, column_names, group_by=1)
        t.addDataSource('_used-bytes-disk',
                        Extractors.byteExtractor('used-bytes-disk'))
        t.addDataSource('_used-bytes-memory',
                        Extractors.byteExtractor('used-bytes-memory'))

        t.addDataSource('_master-objects',
                        Extractors.sifExtractor('master-objects'))

        t.addDataSource('_prole-objects',
                        Extractors.sifExtractor('prole-objects'))

        t.addDataSource('_used-disk-pct',
                        lambda data: 100 - int(data['free-pct-disk']))

        t.addDataSource('_used-mem-pct',
                        lambda data: 100 - int(data['free-pct-memory']))

        t.addCellAlert('available_pct',
                       lambda data: int(data['available_pct']) <= 10)

        t.addCellAlert('stop-writes',
                       lambda data: data['stop-writes'] != 'false')

        t.addCellAlert(
            '_used-disk-pct', lambda data: int(data['_used-disk-pct']) >= int(
                data['high-water-disk-pct']))

        t.addCellAlert(
            '_used-mem-pct', lambda data: (100 - int(data['free-pct-memory']))
            >= int(data['high-water-memory-pct']))

        t.addCellAlert(
            '_used-disk-pct', lambda data: (100 - int(data['free-pct-disk']))
            >= int(data['high-water-disk-pct']))

        t.addCellAlert('node',
                       lambda data: data['real_node_id'] == principal,
                       color=terminal.fg_green)

        for node_key, n_stats in stats.iteritems():
            node = cluster.getNode(node_key)[0]
            if isinstance(n_stats, Exception):
                t.insertRow({
                    'real_node_id': node.node_id,
                    'node': prefixes[node_key]
                })
                continue

            for ns, ns_stats in n_stats.iteritems():
                if isinstance(ns_stats, Exception):
                    row = {}
                else:
                    row = ns_stats

                row['namespace'] = ns
                row['real_node_id'] = node.node_id
                row['node'] = prefixes[node_key]
                t.insertRow(row)
        print t