コード例 #1
0
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'config'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'lib'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'vendor'))

import csv 
import time
import tempfile

import common
import csv_configurator
import backend


if __name__ == "__main__":
	parser = common.get_default_argument_parser("Script for importing a device list into the monitoring process")
	parser.add_argument("location_file", help="CSV file that contains a list of locations/networks that should be included into the monitoring process")

	args = parser.parse_args()

	# prepare the target database
	dst_db = backend.databackend.getBackendObject(
		args.backend, args.dst_host, args.dst_port,
		args.dst_user, args.dst_password, args.dst_database)

	measurement_map_filename =  os.path.join(os.path.dirname(__file__), "..", "config",  "monitoring_devices.csv")
	for name, fields in csv_configurator.read_field_dict_from_csv(args.backend, measurement_map_filename).items():
		dst_db.prepareCollection(name, fields)

	location_table = dst_db.getCollection("location_table")
コード例 #2
0
	queries. It takes a list of tuples of (ip_list, community_string)
	that can be used to query the device for the OID ???
	"""
	snmpwalk_pipe = subprocess.Popen([os.path.join(os.path.dirname(__file__), '..', 'tools', 'snmpwalk-worker'), output_dir, config.snmp_oid_file], stdout=subprocess.PIPE,stdin=subprocess.PIPE)

	input_for_snmpwalk_worker = ""
	for ip in ip_list_community_strings:
		community_string = ip_list_community_strings[ip]['community_string']
		input_for_snmpwalk_worker += ip + " " + community_string + "\n"

	output = snmpwalk_pipe.communicate(input=input_for_snmpwalk_worker)[0].split('\n')
	# TODO: decide on what to do with the output. do we want to dump it?
	

if __name__ == "__main__":
	parser = common.get_default_argument_parser("Tool for performing live checks on the devices that require the monitoring")

	args = parser.parse_args()

	dst_db = backend.databackend.getBackendObject(
		args.backend, args.dst_host, args.dst_port,
		args.dst_user, args.dst_password, args.dst_database, "INSERT")

	measurement_map_filename =  os.path.join(os.path.dirname(__file__), "..", "config",  "monitoring_devices.csv")
	for name, fields in csv_configurator.read_field_dict_from_csv(args.backend, measurement_map_filename).items():
		dst_db.prepareCollection(name, fields)


	device_table = dst_db.getCollection("device_table")
	snmp_availability_table = dst_db.getCollection("snmp_availability")
コード例 #3
0
def main():
	doc = {}

        parser = common.get_default_argument_parser("Parse SNMP data files and import data to database")

	parser.add_argument("data_path", help="Path to the data that should be inserted. This must be a file if neither -d or -r are given.")
	parser.add_argument("-d", "--directory", action="store_true", help="Parse directory instead of a single file. The directory will be scanned for <directory>/*.txt:")
	parser.add_argument("-r", "--recursive", action="store_true", help="Recurse direcory, i.e. expecting files in <directory>/*/*.txt")
	args = parser.parse_args()

	dst_db = backend.databackend.getBackendObject(
		args.backend, args.dst_host, args.dst_port,
		args.dst_user, args.dst_password, args.dst_database, "INSERT")

	if args.clear_database:
		dst_db.clearDatabase()

	collections = prepare_snmp_collections(dst_db, args.backend)
	
	# TODO: hacky ... make something more general ...
	if backend == "mongo":
		db = pymongo.Connection(args.dst_host, args.dst_port)[args.dst_database]
		collection = db["snmp_raw"]
		collection.ensure_index([("router", pymongo.ASCENDING), ("if_number", pymongo.ASCENDING), ("timestamp", pymongo.ASCENDING), ("type", pymongo.ASCENDING)])
		collection.ensure_index([("router", pymongo.ASCENDING), ("if_ip", pymongo.ASCENDING), ("timestamp", pymongo.ASCENDING), ("type", pymongo.ASCENDING)])
		collection.ensure_index([("ip_src", pymongo.ASCENDING), ("ip_dst", pymongo.ASCENDING), ("timestamp", pymongo.ASCENDING), ("type", pymongo.ASCENDING)])
		collection.ensure_index([("ip_src", pymongo.ASCENDING), ("ip_dst", pymongo.ASCENDING), ("mask_dst", pymongo.ASCENDING), ("ip_gtw", pymongo.ASCENDING), ("timestamp", pymongo.ASCENDING), ("type", pymongo.ASCENDING)])

		# restore generic backend collection
		collection = dst_db.getCollection(name)
	else: 
	#	collection.createIndex("router")
	#	collection.createIndex("if_number")
	#	collection.createIndex("timestamp")
	#	collection.createIndex("type")
	#	collection.createIndex("ip_src")
	#	collection.createIndex("ip_dst")
		pass

	# enviromental settings
	cache_treshold = 10000000

	# TODO: implies precedence of operators, maybe something better can be done here
	if args.directory:
		files = glob.glob(args.data_path + "/*.txt")
	elif args.recursive:
		files = glob.glob(args.data_path + "/*/*.txt")
	else:
		files = [ args.data_path ]
	
	# statistical counters
	time_begin = time.time()
	time_last = time_begin
	counter = 0

	# local document storage
	lines_since_commit = 0
	timestamps = set()
	
	# loop over all files
	for file in files:
			(read_lines, timestamp, doc) = parse_snmp_file(file, doc)
			lines_since_commit += read_lines
			counter += read_lines
			timestamps.add(timestamp)

			# files are commited after parse_snmp_file is done, so files are commit at once
			if lines_since_commit > cache_treshold:
				commit_doc(doc, collections)
				lines_since_commit = 0

			# do statistical calculation
			time_current = time.time()
			if (time_current - time_last > 5):
				print "Processed %s lines in %s seconds (%s lines per second)" % (
					counter, time_current - time_begin, counter / (time_current - time_begin))
				time_last = time_current

	
	#	print "counter: %s" % counter


	# commit local doc to databackend in the end
	
	commit_doc(doc, collections)

	for collection in collections.itervalues():
		collection.flushCache()

	# do some statistics in the end
	time_current = time.time()
	print "Processed %s lines in %s seconds (%s lines per second)" % (
			counter, time_current - time_begin, counter / (time_current - time_begin))
コード例 #4
0
def main():
    doc = {}

    parser = common.get_default_argument_parser("Parse SNMP data files and import data to database")

    parser.add_argument(
        "data_path", help="Path to the data that should be inserted. This must be a file if neither -d or -r are given."
    )
    parser.add_argument(
        "-d",
        "--directory",
        action="store_true",
        help="Parse directory instead of a single file. The directory will be scanned for <directory>/*.txt:",
    )
    parser.add_argument(
        "-r", "--recursive", action="store_true", help="Recurse direcory, i.e. expecting files in <directory>/*/*.txt"
    )
    args = parser.parse_args()

    dst_db = backend.databackend.getBackendObject(
        args.backend, args.dst_host, args.dst_port, args.dst_user, args.dst_password, args.dst_database, "INSERT"
    )

    if args.clear_database:
        dst_db.clearDatabase()

    collections = prepare_snmp_collections(dst_db, args.backend)

    cache_treshold = 10000000

    # TODO: implies precedence of operators, maybe something better can be done here
    if args.directory:
        files = glob.glob(args.data_path + "/*.txt")
    elif args.recursive:
        files = glob.glob(args.data_path + "/*/*.txt")
    else:
        files = [args.data_path]

        # sort by timestamp, then router
    files.sort(key=lambda file: os.path.basename(file).rstrip(".txt").split("-")[2:0:-1])

    # statistical counters
    time_begin = time.time()
    time_last = time_begin
    counter = 0

    # local document storage
    lines_since_commit = 0

    if len(files) == 0:
        sys.exit(1)
    (last_router, last_timestamp) = os.path.basename(files[0]).rstrip(".txt").split("-")[1:3]

    # loop over all files
    for file in files:
        # timestamp and router of current file
        (router, timestamp) = os.path.basename(file).rstrip(".txt").split("-")[1:3]

        # files are commited after parse_snmp_file is done, so files are commit at once
        # to merge ifXTable and interface_phy we need to ensure that all files for one timestamp and router are comitted at once
        # files are sorted by timestamp and router in the beginning, so another timestamp or another router mean it's safe to commit now
        if lines_since_commit > cache_treshold and (timestamp != last_timestamp or router != last_router):
            commit_doc(doc, collections)
            lines_since_commit = 0

        (read_lines, doc) = parse_snmp_file(file, doc)
        lines_since_commit += read_lines
        counter += read_lines
        last_router = router
        last_timestamp = timestamp

        # do statistical calculation
        time_current = time.time()
        if time_current - time_last > 5:
            print "Processed %s lines in %s seconds (%s lines per second)" % (
                counter,
                time_current - time_begin,
                counter / (time_current - time_begin),
            )
            time_last = time_current

            # 	print "counter: %s" % counter

            # commit local doc to databackend in the end

    commit_doc(doc, collections)

    for collection in collections.itervalues():
        collection.flushCache()

        # do some statistics in the end
    time_current = time.time()
    print "Processed %s lines in %s seconds (%s lines per second)" % (
        counter,
        time_current - time_begin,
        counter / (time_current - time_begin),
    )
コード例 #5
0
			doc = dict()
			entry = dict()
			if rtt == -1:
				entry['success'] = False
				entry['rtt'] = 0
			else:
				entry['success'] = True
				entry['rtt'] = rtt
			doc["$set"] = entry
			result_collection.update({'location_id': loc_id, 'timestamp': timestamp}, doc)
	result_collection.flushCache()

	

if __name__ == "__main__":
	parser = common.get_default_argument_parser("Tool for performing RTT measurements on the monitored locations")

	args = parser.parse_args()

	dst_db = backend.databackend.getBackendObject(
		args.backend, args.dst_host, args.dst_port,
		args.dst_user, args.dst_password, args.dst_database, "INSERT")

	measurement_map_filename =  os.path.join(os.path.dirname(__file__), "..", "config",  "monitoring_devices.csv")
	for name, fields in csv_configurator.read_field_dict_from_csv(args.backend, measurement_map_filename).items():
		dst_db.prepareCollection(name, fields)


	location_table = dst_db.getCollection("location_table")
	results_table  = dst_db.getCollection("location_results")
コード例 #6
0
                                     stdout=subprocess.PIPE,
                                     stdin=subprocess.PIPE)

    input_for_snmpwalk_worker = ""
    for ip in ip_list_community_strings:
        community_string = ip_list_community_strings[ip]['community_string']
        input_for_snmpwalk_worker += ip + " " + community_string + "\n"

    output = snmpwalk_pipe.communicate(
        input=input_for_snmpwalk_worker)[0].split('\n')
    # TODO: decide on what to do with the output. do we want to dump it?


if __name__ == "__main__":
    parser = common.get_default_argument_parser(
        "Tool for performing live checks on the devices that require the monitoring"
    )

    args = parser.parse_args()

    dst_db = backend.databackend.getBackendObject(args.backend, args.dst_host,
                                                  args.dst_port, args.dst_user,
                                                  args.dst_password,
                                                  args.dst_database, "INSERT")

    measurement_map_filename = os.path.join(os.path.dirname(__file__), "..",
                                            "config", "monitoring_devices.csv")
    for name, fields in csv_configurator.read_field_dict_from_csv(
            args.backend, measurement_map_filename).items():
        dst_db.prepareCollection(name, fields)