def do(sshc, pid_group_count):
    processes = ParserProcesses(sshc).get2()
    cpus = {}

    # this is to be able to send group of PIDs at once
    for i in range(0, len(processes), pid_group_count):
        try:
            pids = []
            for y in range(pid_group_count):
                pids.append(processes[i + y]['PID'])
        except IndexError:
            pids = []
            for p in processes[i:]:
                pids.append(p['PID'])

        for stat in ParserProcessStat(sshc).get(pids):
            if stat['current_CPU'] not in cpus: cpus[stat['current_CPU']] = []
            cpus[stat['current_CPU']].append("'%s'[%s](%i)" % (
                stat['name'],
                stat['state'],
                stat['PID'],
            ))

    etime = ParserCurrentTime(sshc).get()

    for cpu in sorted(cpus.keys()):
        print "[%s] CPU#%i: %s" % (
            etime,
            cpu,
            " ".join(cpus[cpu]),
        )

    sys.stdout.flush()
    return etime
def do(sshc, known_sessions, status, direction, max_age):
    etime = ParserCurrentTime(sshc).get()

    matched_sessions = {}

    latest_sessions = ParserIPSecGW(sshc).get()
    for gw in latest_sessions:
        if gw['cookie_local'] == None or gw['cookie_remote'] == None: continue
        if status != None and gw['status'] not in status:
            #print "ignored [status] for '%s'" % (gw['name'],)
            continue
        if direction != None and gw['direction'] not in direction:
            #print "ignored [direction] for '%s'" % (gw['name'],)
            continue

        cookie = "%s/%s" % (
            gw['cookie_local'],
            gw['cookie_remote'],
        )
        src = "%s:%i" % (
            gw['source_ip'],
            gw['source_port'],
        )
        dst = "%s:%i" % (
            gw['destination_ip'],
            gw['destination_port'],
        )
        session = "%-16s %-20s %-34s %-22s -> %-22s %14s %14s" % (
            gw['vdom'],
            gw['name'],
            cookie,
            src,
            dst,
            gw['direction'],
            gw['status'],
        )

        if session not in known_sessions:
            if max_age == None or gw['created_ago'] <= max_age:
                print prepend_timestamp(
                    "{new}     " + session + " %10i" % (gw['created_ago'], ),
                    etime, 'ikegw')

            known_sessions[session] = {
                'created_ago': gw['created_ago'],
                'name': gw['name']
            }

        matched_sessions[session] = True

    # manage removed sessions
    for known in known_sessions.keys():
        if known not in matched_sessions:
            if max_age == None or gw['created_ago'] <= max_age:
                print prepend_timestamp(
                    "{deleted} " + known + " %10i" %
                    (known_sessions[known]['created_ago'], ), etime, 'ikegw')

            del known_sessions[known]
def do(sshc, info, show_raw):
    etime = ParserCurrentTime(sshc).get()

    s = ParserGenericLineSplit(sshc).get("\s*:\s*",
                                         "diag vpn ike stats",
                                         vdom="",
                                         key_index=0,
                                         value_index=1,
                                         value_type=int)
    ikesa_add = s['ha.ike.sa.add.tx.queued']
    ikesa_del = s['ha.ike.sa.del.tx']
    ipsecsa_add = s['ha.ipsec.sa.add.tx']
    ipsecsa_del = s['ha.ipsec.sa.del.tx']

    if 'ikesa_add' in info and 'ikesa_del' in info:
        prev_ike_add = info['ikesa_add']
        prev_ike_del = info['ikesa_del']
        prev_ipsec_add = info['ipsecsa_add']
        prev_ipsec_del = info['ipsecsa_del']

        diff_ike_add = ikesa_add - prev_ike_add
        diff_ike_del = ikesa_del - prev_ike_del
        diff_ipsec_add = ipsecsa_add - prev_ipsec_add
        diff_ipsec_del = ipsecsa_del - prev_ipsec_del

        if diff_ike_add > 0 or diff_ike_del > 0 or diff_ipsec_add > 0 or diff_ipsec_del > 0:
            if show_raw:
                print prepend_timestamp(
                    "IKE SAs: deleted %5i added %5i (raw deleted %8i added %8i), IPSEC SAs: deleted %5i added %5i (raw deleted %8i added %8i)"
                    % (
                        diff_ike_del,
                        diff_ike_add,
                        ikesa_del,
                        ikesa_add,
                        diff_ipsec_del,
                        diff_ipsec_add,
                        ipsecsa_del,
                        ipsecsa_add,
                    ), etime, 'vpnsa')
            else:
                print prepend_timestamp(
                    "IKE SAs: deleted %5i added %5i, IPSEC SAs: deleted %5i added %5i"
                    % (
                        diff_ike_del,
                        diff_ike_add,
                        diff_ipsec_del,
                        diff_ipsec_add,
                    ), etime, 'vpnsa')

    info['ikesa_add'] = ikesa_add
    info['ikesa_del'] = ikesa_del
    info['ipsecsa_add'] = ipsecsa_add
    info['ipsecsa_del'] = ipsecsa_del
Beispiel #4
0
def do(sshc, process_re):
    etime = ParserCurrentTime(sshc).get()

    processes = ParserProcesses(sshc).get()
    for process in processes:
        if not process_re.search(process['cmd']): continue
        print simple_command_with_timestamp(
            sshc, etime, "fnsysctl cat /proc/%i/stack" % (process['PID'], ),
            "'%s':%i:%s" % (
                process['cmd'],
                process['PID'],
                process['state'],
            ))
        sys.stdout.flush()
Beispiel #5
0
def start(sshc, commands, vdom, outs):
    etime = ParserCurrentTime(sshc).get()

    for command in commands:
        g = re.search('^<(.*?)>\s*(.*?)\s*$', command)
        if not g:
            this_vdom = vdom
            this_command = command
        else:
            this_vdom = g.group(1)
            this_command = g.group(2)
            if this_vdom.lower() == 'global': this_vdom = None

        out = sshc.clever_exec(this_command, this_vdom)
        for line in out.split("\n"):
            for out in outs:
                print >> out, prepend_timestamp(line, etime, this_command)
                out.flush()
Beispiel #6
0
def do(sshc, silent, show_cpus, desc, show_time, colors):
    etime = ParserCurrentTime(sshc).get()

    # colors disabled?
    if not colors:
        global colours
        colours = {}

    # get the structure with all information
    # it is a map with irq number as a key, containing another map with 'cpus' and 'handlers' keys
    # 'cpus' is a list of cpu numbers
    # 'handlers' is a list of string
    cpus, irqs = get(sshc, silent, colors)
    irqs_on_all_cpus = []

    # print one CPU on a line
    for cpu in range(cpus):
        # do we want to display only some cpus?
        if show_cpus != None and cpu not in show_cpus: continue

        line = "CPU %3d:" % (cpu, )

        # for through all the irqs and find those that run
        # on the current cpu id
        hcount = 0
        for irq in irqs.keys():
            if cpu not in irqs[irq]['cpus']: continue

            # ignore irqs that are handled by all the cpus for this moment
            if len(irqs[irq]['cpus']) == cpus:
                if irq not in irqs_on_all_cpus: irqs_on_all_cpus.append(irq)
                continue

            # get nicely formatted handlers
            pline, handlers = join_handlers(irqs[irq]['handlers'],
                                            irqs[irq]['cpus'], desc, True)
            line += pline
            hcount += handlers

        if desc == None or hcount > 0:
            if show_time: print prepend_timestamp(line, etime, "cpuint")
            else: print line
def do(sshc, ifaces, spec, info):
    change_view(info)

    etime = ParserCurrentTime(sshc).get()

    # if no interfaces were specified, select all of them
    if ifaces == None:
        check_ifaces = spec.ports.keys()
    else:
        check_ifaces = ifaces

    # select only those that are up
    up_ifaces = ParserSystemInterfaceList(sshc).get2(None, True)

    for iface in up_ifaces.keys():
        if iface not in check_ifaces: del up_ifaces[iface]

    # get counters
    buf_np6 = {}
    buf_counters = {}
    stats = {}
    for iface in up_ifaces:
        stats[iface] = {'collected_on': None}

        for t in ('front', 'npu', 'kernel'):
            # get counters
            if spec.ports[iface][t] == None:
                stats[iface][t] = None
            elif spec.ports[iface][t].source == spec.ports[iface][t].SRC_HWNIC:
                if iface not in buf_counters:
                    buf_counters[iface] = ParserNIC2(sshc).get(iface)
                stats[iface][t] = buf_counters[iface]['counters'][
                    spec.ports[iface][t].counter]
                stats[iface]['collected_on'] = buf_counters[iface][
                    'collected_on']
            elif spec.ports[iface][t].source == spec.ports[iface][
                    t].SRC_NP6_PORTSTATS:
                (npid, cname) = spec.ports[iface][t].counter.split('/', 1)
                # collect only necessary np6 and only once
                npid = int(npid)
                if npid not in buf_np6:
                    buf_np6[npid] = ParserNP6Links(sshc).get(npid)
                #
                stats[iface][t] = buf_np6[npid][cname]['counters']
                stats[iface]['collected_on'] = buf_np6[npid][cname][
                    'collected_on']
            else:
                stats[iface][t] = None

            # get speed
            if spec.ports[iface][t] == None:
                stats[iface][t] = None
            elif spec.ports[iface][t].maxspeed == spec.ports[iface][t].SPD_S1G:
                stats[iface][t]['speed'] = 1000
            elif spec.ports[iface][t].maxspeed == spec.ports[iface][
                    t].SPD_S10G:
                stats[iface][t]['speed'] = 10000
            elif spec.ports[iface][t].maxspeed == spec.ports[iface][
                    t].SPD_IFACE:
                if iface not in buf_counters:
                    buf_counters[iface] = ParserNIC2(sshc).get(iface)
                stats[iface][t]['speed'] = buf_counters[iface]['speed']
            else:
                stats[iface][t]['speed'] = 0

    # check if we have previous value
    if 'prev' not in info:
        info['prev'] = stats
        return
    else:
        prev = info['prev']

    # calculate diff
    diff = {}
    for iface in up_ifaces:
        diff[iface] = {}

        for t in ('front', 'npu', 'kernel'):
            try:
                diff[iface][t] = diff_counters(
                    prev[iface][t], stats[iface][t],
                    stats[iface]['collected_on'] - prev[iface]['collected_on'])
            except KeyError:
                del diff[iface]
                break

    # calculate npu drops
    buf_np6drops = {}
    for iface in diff.keys():
        if spec.ports[iface]['npudrops'] == None:
            diff[iface]['npudrops'] = None
        elif spec.ports[iface]['npudrops'].source == spec.ports[iface][
                'npudrops'].SRC_NP6_DROPS:
            npuid = spec.ports[iface]['npudrops'].npuid
            if npuid not in buf_np6drops:
                buf_np6drops[npuid] = ParserNP6Drops(sshc).get(npuid)
            diff[iface]['npudrops'] = {
                'dce': buf_np6drops[npuid]['dce']['summary'],
                'dce_changed': 0,
                'ano': buf_np6drops[npuid]['ano']['summary'],
                'ano_changed': 0,
                'hrx': buf_np6drops[npuid]['hrx']['summary'],
                'hrx_changed': 0,
            }

        if info['clear_drops'] and prev[iface]['npudrops'] != None:
            for dc in ('dce', 'ano', 'hrx'):
                prev[iface]['npudrops'][dc] = 0
                prev[iface]['npudrops'][dc + "_changed"] = 0

        if (diff[iface]['npudrops'] != None) and (
                'npudrops' in prev[iface] and prev[iface]['npudrops'] != None):
            for dc in ('dce', 'ano', 'hrx'):
                if diff[iface]['npudrops'][dc] > 0:
                    diff[iface]['npudrops'][dc] += prev[iface]['npudrops'][dc]
                    diff[iface]['npudrops'][dc + "_changed"] = time.time()
                else:
                    diff[iface]['npudrops'][dc] = prev[iface]['npudrops'][dc]
                    diff[iface]['npudrops'][
                        dc + "_changed"] = prev[iface]['npudrops'][dc +
                                                                   "_changed"]

        stats[iface]['npudrops'] = diff[iface]['npudrops']

    info['clear_drops'] = False

    # calculate offloaded
    for iface in diff.keys():
        if ('front' not in diff[iface] or diff[iface]['front'] == None) or (
                'kernel' not in diff[iface] or diff[iface]['kernel'] == None):
            diff[iface]['offloaded'] = None
            continue
        else:
            diff[iface]['offloaded'] = {}

        for c in diff[iface]['front']:
            try:
                diff[iface]['offloaded'][c] = int(
                    round(100 - float(diff[iface]['kernel'][c] * 100) /
                          float(diff[iface]['front'][c])))
                if diff[iface]['offloaded'][c] < 0:
                    diff[iface]['offloaded'][c] = 0
            except ZeroDivisionError:
                diff[iface]['offloaded'][c] = 0

    # show
    show(diff, info, etime)

    # save for next cycle
    info['prev'] = stats
Beispiel #8
0
def do(sshc, cache, history, hz, raw, show_cpu):
	frags = ParserFragmentation(sshc).get()
	usage = ParserProcessCPU(sshc).get([])
	etime = ParserCurrentTime(sshc).get()
	
	if 'last' not in cache:
		cache['last'] = {
			'collected_on': etime.as_timestamp(),
			'frags': frags,
			'cpu' : usage,
		}
		return

	time_difference = etime.as_timestamp() - cache['last']['collected_on']

	overall_cpus = {}
	for tmp in ['user', 'system', 'idle', 'iowait', 'irq', 'softirq']:
		overall_cpus[tmp] = int(round(((usage['global'][tmp] - cache['last']['cpu']['global'][tmp])*100)/(time_difference*hz)))

	pdiff = {}
	for p in frags['frags']:
		if p not in frags['frags']:
			print >>sys.stderr, 'Error: fragmentation key %s missing in current statistics' % (p,)
			return
		elif p not in cache['last']['frags']['frags']:
			print >>sys.stderr, 'Error: fragmentation key %s missing in previous statistics' % (p,)
			return

		if raw:
			pdiff[p] = frags['frags'][p] - cache['last']['frags']['frags'][p]
		else:
			pdiff[p] = int(round((((frags['frags'][p] - cache['last']['frags']['frags'][p]))/(time_difference))))

	if os.name == 'nt':
		os.system('cls')
		print "Packet fragmentation    (written by Ondrej Holecek <*****@*****.**>)"
	else:
		print "\x1b[2J\x1b[H\033[1mPacket fragmentation   (written by Ondrej Holecek <*****@*****.**>)\033[0m"

	
	filters_applied = "Applied filters: "
	if raw: filters_applied += "CNTS[raw] "
	else: filters_applied += "CNTS[diff] "
	filters_applied += "HIST[%i] " % (history,)

	print prepend_timestamp("Overall CPU utilization: %3.1f %% user, %3.1f %% system, %3.1f %% idle" % (
		overall_cpus['user'], overall_cpus['system'], overall_cpus['idle'],
	), etime, 'fragtop')
	print prepend_timestamp("Overall CPU utilization: %3.1f %% iowait, %3.1f %% irq, %3.1f %% softirq" % (
		overall_cpus['iowait'], overall_cpus['irq'], overall_cpus['softirq'],
	), etime, 'fragtop')
	print prepend_timestamp(filters_applied, etime, 'fragtop')

	prehdr = "         |     Received fragments reassembly counters    |  Outgoing fragmentation counters  |"
	if show_cpu: prehdr += "   Historical CPU percentage    |"
	print prepend_timestamp(prehdr, etime, 'fragtop')
	hdr = " %7s | %9s | %9s | %9s | %9s | %9s | %9s | %9s |" % ("history", "fragments", "packets", "timeout", "error", "packets", "fragments", "unable",)
	if show_cpu: hdr += " %8s | %8s | %8s |" % ("system%", "irq%", "softirq%",)
	print prepend_timestamp(hdr, etime, 'fragtop')

	# current line
	current_line = " %7i " % ( 0, )
	for k in ('ReasmReqds', 'ReasmOKs', 'ReasmTimeout', 'ReasmFails', 'FragOKs', 'FragCreates', 'FragFails'):
		current_line += "| %9i " % (pdiff[k],)
	current_line += "|"
	if show_cpu: current_line += " %8i | %8i | %8i |" % (overall_cpus['system'], overall_cpus['irq'], overall_cpus['softirq'],)
	print prepend_timestamp(current_line, etime, 'fragtop')

	# older lines
	for odata in cache['history']:
		old_line = " %7i " % ( -int(round(etime.as_timestamp()-odata[0])),)
		for k in ('ReasmReqds', 'ReasmOKs', 'ReasmTimeout', 'ReasmFails', 'FragOKs', 'FragCreates', 'FragFails'):
			old_line += "| %9i " % (odata[1][k],)
		old_line += "|"
		if show_cpu: old_line += " %8i | %8i | %8i |" % (odata[2], odata[3], odata[4],)
		print prepend_timestamp(old_line, etime, 'fragtop')
		
	cache['history'].insert(0, (etime.as_timestamp(), pdiff, overall_cpus['system'], overall_cpus['irq'], overall_cpus['softirq'],) )
	if len(cache['history']) > history: cache['history'] = cache['history'][:history]
	cache['last']['frags'] = frags
	cache['last']['cpu'] = usage
	cache['last']['collected_on'] = etime.as_timestamp()

	sys.stdout.flush()
	return etime
Beispiel #9
0
def do(sshc, cache, history, hz, raw, percentage):
    packets = ParserPacketDistribution(sshc).get()
    usage = ParserProcessCPU(sshc).get([])
    etime = ParserCurrentTime(sshc).get()

    if 'last' not in cache:
        cache['last'] = {
            'collected_on': etime.as_timestamp(),
            'packets': packets,
            'cpu': usage,
        }
        return

    time_difference = etime.as_timestamp() - cache['last']['collected_on']

    overall_cpus = {}
    for tmp in ['user', 'system', 'idle', 'iowait', 'irq', 'softirq']:
        overall_cpus[tmp] = int(
            round(
                ((usage['global'][tmp] - cache['last']['cpu']['global'][tmp]) *
                 100) / (time_difference * hz)))

    pdiff = {}
    for p in packets['packets']:
        if p not in packets['packets']:
            print >> sys.stderr, 'Error: packet distribution key %s missing in current statistics' % (
                p, )
            return
        elif p not in cache['last']['packets']['packets']:
            print >> sys.stderr, 'Error: packet distribution key %s missing in previous statistics' % (
                p, )
            return

        if raw:
            pdiff[p] = packets['packets'][p] - cache['last']['packets'][
                'packets'][p]
        else:
            pdiff[p] = int(
                round((((packets['packets'][p] -
                         cache['last']['packets']['packets'][p])) /
                       (time_difference))))

    total = sum(pdiff[x] for x in pdiff.keys())
    if percentage:
        for p in pdiff.keys():
            pdiff[p] = int(round((float(pdiff[p]) * 100) / total))

    if os.name == 'nt':
        os.system('cls')
        print "Packet size distribution   (written by Ondrej Holecek <*****@*****.**>)"
    else:
        print "\x1b[2J\x1b[H\033[1mPacket size distribution   (written by Ondrej Holecek <*****@*****.**>)\033[0m"

    filters_applied = "Applied filters: "
    if raw: filters_applied += "CNTS[raw] "
    else: filters_applied += "CNTS[diff] "
    if percentage: filters_applied += "PERC[yes] "
    else: filters_applied += "PERC[no] "
    filters_applied += "HIST[%i] " % (history, )

    print prepend_timestamp(
        "Overall CPU utilization: %3.1f %% user, %3.1f %% system, %3.1f %% idle"
        % (
            overall_cpus['user'],
            overall_cpus['system'],
            overall_cpus['idle'],
        ), etime, 'pkttop')
    print prepend_timestamp(
        "Overall CPU utilization: %3.1f %% iowait, %3.1f %% irq, %3.1f %% softirq"
        % (
            overall_cpus['iowait'],
            overall_cpus['irq'],
            overall_cpus['softirq'],
        ), etime, 'pkttop')
    print prepend_timestamp(filters_applied, etime, 'pkttop')

    # header
    skeys = sorted(pdiff.keys())
    hdr = " history "
    for k in skeys:
        left = k[0]
        right = k[1]
        c = ""
        if left == None:
            c = " <= %i" % (right, )
        elif right == None:
            c = " >= %i" % (left, )
        else:
            c = "<%i, %i>" % (
                left,
                right,
            )

        hdr += "| %12s " % (c, )
    hdr += "| %12s |" % ('total pkts', )
    print prepend_timestamp(hdr, etime, 'pkttop')

    # current line
    current_line = " %7i " % (0, )
    for k in skeys:
        current_line += "| %12i " % (pdiff[k], )
    current_line += "| %12i |" % (total, )
    print prepend_timestamp(current_line, etime, 'pkttop')

    # older lines
    for odata in cache['history']:
        old_line = " %7i " % (-int(round(etime.as_timestamp() - odata[0])), )
        for k in skeys:
            old_line += "| %12i " % (odata[1][k], )
        old_line += "| %12i |" % (odata[2], )
        print prepend_timestamp(old_line, etime, 'pkttop')

    cache['history'].insert(0, (etime.as_timestamp(), pdiff, total))
    if len(cache['history']) > history:
        cache['history'] = cache['history'][:history]
    cache['last']['packets'] = packets
    cache['last']['cpu'] = usage
    cache['last']['collected_on'] = etime.as_timestamp()

    sys.stdout.flush()
    return etime
Beispiel #10
0
def do(sshc, info):
	etime = ParserCurrentTime(sshc).get()
	
	# calculate the expected number of processing engines
	# (this is to recognize when the sessions output is incomplete, which can happen when somebody else is debugging for example)
	if info['engines'] == None:
		ipssum = ParserIPSSummary(sshc).get()
		engines = 0
		for ipse in ipssum:
			if ipse['cfg'] == False:
				engines += 1
		info['engines'] = engines

	#
	ipss = ParserIPSSessionsStat(sshc).get()
	if (len(ipss.keys()) != info['engines']):
		print >>sys.stderr, "Error in collected outputs - expected %i engines but got %i, isn't somebody else also debugging?" % (info['engines'], len(ipss.keys()),)
		return { "error" : True }


	if info['cycles'] == 0 or (info['repeat_header'] > 0 and (info['cycles'] % info['repeat_header'] == 0)): 
		show_header(ipss, etime, only_total=info['only_total'])

	if info['show']['sessions_in_use']:
		show_numbers(ipss, etime, 'ses_in_use', lambda x: x['sessions']['total']['inuse'], only_total=info['only_total'])
	if info['show']['recent_pps']:
		show_numbers(ipss, etime, 'rec_packps', lambda x: x['pps'], only_total=info['only_total'])
	if info['show']['recent_bps']:
		show_numbers(ipss, etime, 'rec_bitps', lambda x: x['bps'], human=True, only_total=info['only_total'])
	if info['show']['tcp_sessions_in_use']:
		show_numbers(ipss, etime, 'tcp_in_use', lambda x: x['sessions']['tcp']['inuse'], only_total=info['only_total'])
	if info['show']['udp_sessions_in_use']:
		show_numbers(ipss, etime, 'udp_in_use', lambda x: x['sessions']['udp']['inuse'], only_total=info['only_total'])
	if info['show']['icmp_sessions_in_use']:
		show_numbers(ipss, etime, 'icmp_in_use', lambda x: x['sessions']['icmp']['inuse'], only_total=info['only_total'])
	if info['show']['ip_sessions_in_use']:
		show_numbers(ipss, etime, 'ip_in_use', lambda x: x['sessions']['ip']['inuse'], only_total=info['only_total'])
	if info['show']['tcp_sessions_active']:
		show_numbers(ipss, etime, 'tcp_active', lambda x: x['sessions']['tcp']['active'], only_total=info['only_total'])
	if info['show']['udp_sessions_active']:
		show_numbers(ipss, etime, 'udp_active', lambda x: x['sessions']['udp']['active'], only_total=info['only_total'])
	if info['show']['icmp_sessions_active']:
		show_numbers(ipss, etime, 'icmp_active', lambda x: x['sessions']['icmp']['active'], only_total=info['only_total'])
	if info['show']['ip_sessions_active']:
		show_numbers(ipss, etime, 'ip_active', lambda x: x['sessions']['ip']['active'], only_total=info['only_total'])
	if info['show']['tcp_sessions_per_second'] and info['previous'] != None:
		tmp = diff_per_interval(ipss, info['previous'], lambda x: x['sessions']['tcp']['total'])
		show_numbers(tmp, etime, 'tcp_s_p_sec', lambda x: x['result'], only_total=info['only_total'])
	if info['show']['udp_sessions_per_second'] and info['previous'] != None:
		tmp = diff_per_interval(ipss, info['previous'], lambda x: x['sessions']['udp']['total'])
		show_numbers(tmp, etime, 'udp_s_p_sec', lambda x: x['result'], only_total=info['only_total'])
	if info['show']['icmp_sessions_per_second'] and info['previous'] != None:
		tmp = diff_per_interval(ipss, info['previous'], lambda x: x['sessions']['icmp']['total'])
		show_numbers(tmp, etime, 'icmp_s_p_sec', lambda x: x['result'], only_total=info['only_total'])
	if info['show']['ip_sessions_per_second'] and info['previous'] != None:
		tmp = diff_per_interval(ipss, info['previous'], lambda x: x['sessions']['ip']['total'])
		show_numbers(tmp, etime, 'ip_s_p_sec', lambda x: x['result'], only_total=info['only_total'])
	if info['show']['all_sessions_per_second'] and info['previous'] != None:
		tmp = diff_per_interval(ipss, info['previous'], lambda x: x['sessions']['calculated_total']['total'])
		show_numbers(tmp, etime, 'all_s_p_sec', lambda x: x['result'], only_total=info['only_total'])
	
	
	info['cycles']   += 1
	info['previous'] = ipss

	if info['empty_line']: print ""
Beispiel #11
0
def do(sshc, cycle_time):
    info = {'info': {'last_time': ParserCurrentTime(sshc).get()}}
    sshc.continuous_exec("diag sys mpstat %i" % (cycle_time, ), divide, result,
                         finished, info)
Beispiel #12
0
def do(sshc, cache, max_lines, display_type, hz, soft, hard, show_zeros, description):
	ints  = ParserInterrupts(sshc).get(soft=soft, hard=hard, description=description)
	usage = ParserProcessCPU(sshc).get([])
	etime = ParserCurrentTime(sshc).get()
	
	if 'last' not in cache:
		cache['last'] = {
			'interrupts': ints,
			'cpu' : usage,
		}
		return

	time_difference = ints['collected_on'] - cache['last']['interrupts']['collected_on']

	overall_cpus = {}
	for tmp in ['user', 'system', 'idle', 'iowait', 'irq', 'softirq']:
		overall_cpus[tmp] = int(round(((usage['global'][tmp] - cache['last']['cpu']['global'][tmp])*100)/(time_difference*hz)))

	diff = difference_per_second(cache['last']['interrupts'], ints, time_difference)
	diff_sorted_keys = sort_interrupts(diff, display_type, not show_zeros)
	if max_lines != 0: diff_sorted_keys = diff_sorted_keys[:max_lines]

	total_ticks_soft = sum([diff[x]['total'] for x in diff.keys() if diff[x]['source'] == 'soft'])
	total_ticks_hard = sum([diff[x]['total'] for x in diff.keys() if diff[x]['source'] == 'hard'])

	if os.name == 'nt':
		os.system('cls')
		print "Interrupt lines utilization    (written by Ondrej Holecek <*****@*****.**>)"
	else:
		print "\x1b[2J\x1b[H\033[1mInterrupt lines utilization    (written by Ondrej Holecek <*****@*****.**>)\033[0m"

	
	filters_applied = "Applied filters: "
	if type(display_type) == tuple: filters_applied += "CPU[" + ",".join([str(x) for x in sorted(display_type)]) + "] "
	elif display_type == 'total': filters_applied += "CPU[total] "
	elif display_type == 'each': filters_applied += "CPU[separate] "
	if max_lines != 0: filters_applied += "TOP[%i] " % (max_lines,)
	if soft and hard: filters_applied += "TYPE[soft,hard] "
	elif soft: filters_applied += "TYPE[soft] "
	elif hard: filters_applied += "TYPE[hard] "
	if show_zeros: filters_applied += "ZERO[yes] "
	else: filters_applied += "ZERO[no] "
	if description != None: filters_applied += "DESC[%s] " % (description,)

	print prepend_timestamp("Overall CPU utilization: %3.1f %% user, %3.1f %% system, %3.1f %% idle" % (
		overall_cpus['user'], overall_cpus['system'], overall_cpus['idle'],
	), etime, 'inttop')
	print prepend_timestamp("Overall CPU utilization: %3.1f %% iowait, %3.1f %% irq, %3.1f %% softirq" % (
		overall_cpus['iowait'], overall_cpus['irq'], overall_cpus['softirq'],
	), etime, 'inttop')
	print prepend_timestamp(filters_applied, etime, 'inttop')
	print prepend_timestamp("%-11s %5s %9s %10s %4s  %s" % ("LINE", "SOURCE", "CPU(s)", "RUNS", "PERC", "DESCRIPTION",), etime, 'inttop')

	for k in diff_sorted_keys:
		((iname, itype), iticks) = k
		source = ints['interrupts'][iname]['source']
		desc = ints['interrupts'][iname]['description']
		if len(desc) > 30: desc = desc[:25] + "[...]"
		if type(itype) == tuple: itype = 'selected'

		if source == 'soft':
			if total_ticks_soft > 0: perc = (iticks*100)/total_ticks_soft
			else: perc = 0
			source_a = 'S'
		elif source == 'hard':
			if total_ticks_hard > 0: perc = (iticks*100)/total_ticks_hard
			else: perc = 0
			source_a = 'H'

		print prepend_timestamp("%-16s %1s %9s %10i %4i  %s" % (iname, source_a, itype, iticks, perc, desc,), etime, 'inttop')
	
	cache['last'] = {
		'interrupts': ints,
		'cpu': usage,
	}
	sys.stdout.flush()
	return etime
def do(sshc, known, status, direction, phase, age, use, colors, repeat_header):
    if os.name == 'nt': colors = False

    etime = ParserCurrentTime(sshc).get()
    latest = ParserIPSecGW(sshc).get()

    # select only the counters user is interested in
    counter_names = []
    for cntr in all_counters:
        accept = True

        if phase != None:
            if cntr.startswith("IKE_") and '1' not in phase: accept = False
            if cntr.startswith("IPSEC_") and '2' not in phase: accept = False

        if age != None:
            if cntr.endswith("_current") and 'current' not in age:
                accept = False
            if cntr.endswith("_all") and 'total' not in age: accept = False

        if use != None:
            if "_created_" in cntr and 'created' not in use: accept = False
            if "_established_" in cntr and 'established' not in use:
                accept = False

        if accept:
            counter_names.append(cntr)

    #counter_names= ('IKE_SA_created_current', 'IKE_SA_established_current', 'IKE_SA_created_all', 'IKE_SA_established_all', 'IPSEC_SA_created_current', 'IPSEC_SA_established_current', 'IPSEC_SA_created_all', 'IPSEC_SA_established_all')
    #counter_names= ('IKE_SA_created_current', 'IKE_SA_created_all', 'IKE_SA_established_current', 'IKE_SA_established_all', 'IPSEC_SA_created_current', 'IPSEC_SA_created_all', 'IPSEC_SA_established_current', 'IPSEC_SA_established_all')
    #counter_names= ('IKE_SA_created_all', 'IKE_SA_established_all', 'IPSEC_SA_created_all', 'IPSEC_SA_established_all')

    to_print = []

    for gw in latest:
        # do we want this one?
        if status != None and gw['status'] not in status: continue
        if direction != None and gw['direction'] not in direction: continue

        # retrieve current counters
        this = {}
        for t in counter_names:
            this[t] = gw[t]

        # retrieve previous counters
        try:
            prev = known[(gw['vdom'], gw['name'])]
        except KeyError:
            prev = {}
            for t in counter_names:
                prev[t] = 0

        # count difference
        diff = {}
        non_zero = []
        for t in counter_names:
            diff[t] = this[t] - prev[t]
            if diff[t] != 0: non_zero.append(t)

        # prepare lines to print
        if len(non_zero) > 0:
            line = "%-16s %-20s %-10s %-12s" % (gw['vdom'], gw['name'],
                                                gw['direction'], gw['status'])

            for t in counter_names:
                if colors:
                    if diff[t] == 0: line += '\33[2m'
                    elif diff[t] > 0: line += '\33[0;32;40m'
                    elif diff[t] < 0: line += '\33[0;31;40m'

                if diff[t] == 0:
                    line += " %10i" % (0, )
                else:
                    line += " %+10i" % (diff[t], )

                if colors: line += '\33[0m'

            # save number of appearences of this p1
            if (gw['vdom'], gw['name']) not in known['appeared']:
                known['appeared'][(gw['vdom'], gw['name'])] = 0
            known['appeared'][(gw['vdom'], gw['name'])] += 1

            # add number of appearances to the output line
            if known['iters'] > 0:
                repeat_perc = int(
                    round(
                        float(known['appeared'][(gw['vdom'], gw['name'])] *
                              100) / float(known['iters'])))
            else:
                repeat_perc = 100

            if colors:
                if repeat_perc > 50: line += '\33[0;31;40m'
                elif repeat_perc > 20: line += '\33[0;33;40m'

            line += " %7i" % (repeat_perc, )

            if colors: line += '\33[0m'

            # save line to be printed later
            to_print.append(prepend_timestamp(line, etime, 'ipsec_rekeys'))

        # save ...
        known[(gw['vdom'], gw['name'])] = this

    # prepare header
    if known['header'] == None:
        header = ''
        if colors: header += '\033[1m'

        header += "%-16s %-20s %-10s %-12s" % (
            'vdom',
            'name',
            'direction',
            'status',
        )
        for t in counter_names:
            if t == 'IKE_SA_created_all': header += " %10s" % ('P1:CreAll', )
            elif t == 'IKE_SA_established_all':
                header += " %10s" % ('P1:EstAll', )
            elif t == 'IKE_SA_created_current':
                header += " %10s" % ('P1:CreCur', )
            elif t == 'IKE_SA_established_current':
                header += " %10s" % ('P1:EstCur', )
            elif t == 'IPSEC_SA_created_all':
                header += " %10s" % ('P2:CreAll', )
            elif t == 'IPSEC_SA_established_all':
                header += " %10s" % ('P2:EstAll', )
            elif t == 'IPSEC_SA_created_current':
                header += " %10s" % ('P2:CreCur', )
            elif t == 'IPSEC_SA_established_current':
                header += " %10s" % ('P2:EstCur', )

        header += " %7s" % ('repeat%', )
        if colors: header += '\033[0m'
        known['header'] = header

    # do not show the first round but show header
    known['iters'] += 1
    if known['iters'] == 1:
        known['appeared'] = {}
        print prepend_timestamp(known['header'], etime, 'ipsec_rekeys')
        return

    # print data
    for p in to_print:
        if repeat_header != 0 and known[
                'printed'] != 0 and known['printed'] % repeat_header == 0:
            print prepend_timestamp(known['header'], etime, 'ipsec_rekeys')

        print p
        known['printed'] += 1
Beispiel #14
0
def do(sshc, cache, pid_group_count, max_lines, sort_by, process_name, cpus,
       states, hz, ppid, tpid, negate):
    # filter only desired process (or all if process == None)
    if process_name != None: process_re = re.compile(process_name)
    else: process_re = None

    processes = []
    for process in ParserProcesses(sshc).get2():
        if process_re == None or process_re.search(process['cmd']):
            processes.append(process)

    # save information about applied filters
    filters_applied = "Applied filters: "
    if cpus != None:
        filters_applied += "CPU[%s] " % (",".join(str(cpu) for cpu in cpus))
    if states != None:
        filters_applied += "STATE[%s] " % (",".join(
            str(state) for state in states))
    if process_name != None: filters_applied += "NAME[%s] " % (process_name, )
    if max_lines != 0: filters_applied += "TOP[%i] " % (max_lines, )
    if sort_by != None: filters_applied += "SORT[%s] " % (sort_by, )

    #
    previous = cache['previous']
    etime = ParserCurrentTime(sshc).get()

    # this is to be able to send group of PIDs at once
    current = []
    for i in range(0, len(processes), pid_group_count):
        try:
            pids = []
            for y in range(pid_group_count):
                pids.append(processes[i + y]['PID'])
        except IndexError:
            pids = []
            for p in processes[i:]:
                pids.append(p['PID'])

        current.append(ParserProcessCPU(sshc).get(pids))

    if previous != None:
        overall_cpus = {
            'user': 0,
            'system': 0,
            'idle': 0,
            'iowait': 0,
            'irq': 0,
            'softirq': 0
        }
        util = {}
        for i in range(len(previous)):
            diff_overall, diff_processes, diff_time = ParserProcessCPU(
                sshc).diff(previous[i], current[i])
            for pid in diff_processes.keys():
                if cpus != None and diff_processes[pid]['last_cpu'] not in cpus:
                    continue
                if states != None and diff_processes[pid][
                        'last_state'] not in states:
                    continue

                show = False
                if (tpid == None and ppid == None): show = True
                elif (ppid != None and diff_processes[pid]['parent'] in ppid):
                    show = True
                elif (tpid != None and pid in tpid):
                    show = True
                if (not negate) and (not show): continue
                elif (negate) and (show): continue

                util[pid] = {}
                util[pid]['name'] = diff_processes[pid]['name']
                util[pid]['pid'] = pid
                util[pid]['parent'] = diff_processes[pid]['parent']
                util[pid]['last_cpu'] = diff_processes[pid]['last_cpu']
                util[pid]['last_state'] = diff_processes[pid]['last_state']

                if diff_overall['user'] != 0:
                    util[pid]['user'] = (float(diff_processes[pid]['user']) *
                                         100) / diff_overall['user']
                else:
                    util[pid]['user'] = float(0.0)

                if diff_overall['system'] != 0:
                    util[pid]['system'] = (float(diff_processes[pid]['system'])
                                           * 100) / diff_overall['system']
                else:
                    util[pid]['system'] = float(0.0)

                if diff_time != 0:
                    util[pid]['global_user'] = (float(
                        diff_processes[pid]['user']) * 100) / (diff_time * hz)
                    util[pid]['global_system'] = (
                        float(diff_processes[pid]['system']) *
                        100) / (diff_time * hz)
                else:
                    util[pid]['global_user'] = float(0.0)
                    util[pid]['global_system'] = float(0.0)

                util[pid]['total'] = util[pid]['user'] + util[pid]['system']

            # overall - we will count an average
            for tmp in ['user', 'system', 'idle', 'iowait', 'irq', 'softirq']:
                overall_cpus[tmp] += (float(diff_overall[tmp]) *
                                      100) / (diff_time * hz)

        for tmp in overall_cpus.keys(
        ):  # max average and convert to percentages
            if len(previous) > 0:
                overall_cpus[tmp] = overall_cpus[tmp] / len(previous)
            else:
                overall_cpus[tmp] = 0

        print_formatted(util, overall_cpus, max_lines, etime, sort_by,
                        filters_applied)

        sys.stdout.flush()
        cache['previous'] = current
        return etime

    cache['previous'] = current
    return None