def main(): pass if len(sys.argv) == 1: continous_measuring() return elif sys.argv[1] == "scan": rsa_dir = lib.search_dir(".", "ssh_needs", 2) if rsa_dir is not None: rsa_file = str(rsa_dir) + "/id_rsa" lib.Connection.connection_builder = \ lib.ConnectionBuilder(slice_name, rsa_file, None) else: logger.info("RSA key not found!") return node_states = lib.get_collection("node_state") def stdout_proc(node): stdout = node["stdout"] if "release" in stdout: stdout = stdout.replace("\r", "") os = stdout.split("\n")[0] result = {"ip": node["ip"], "ts": time.time(), "os": os} node_states.insert_one(result) node["stdout"] = os print "Good node: ip=%s, os=%s" % (node["ip"], os) else: node["error"] = "not valid response: " + stdout node["stderr"] = stdout def stderr_proc(node): node["error"] = node["error"].replace(node["ip"], "*ip*") node["error"] = node["error"].replace(node["dns"], "*dns*") return node["error"] args = { "cmd": "cat /etc/issue", "save_result": False, "do_statistics": True, "stdout_proc": stdout_proc, "stderr_proc": stderr_proc } # nodes=None, cmd=None, stdout_proc=None, stderr_proc=None, # timeout=10, save_erroneous=True, # save_stdout=True, save_stderr=True, # node_script=scan_script, save_result=True start = time.time() print "scan started at: ", start scan(**args) end = time.time() print "scan ended at: ", end print 'scan duration: %.2f seconds' % (end - start) return if sys.argv[1] == "dev": print "\n".join(get_good_nodes()) return
def main(): # data_vis = {"nodes": [{"group": 0, "id": 0, "label": "0"}, ...], # "edges": [{"from": 1, "to": 0}, ...]} # data_vivagraph = {"nodes":[{"name": "test", "group":1}, ...], # "links":[{"source":1,"target":0,"value":1},... ]} # as_data = { # "asn": "123", # "core_ips": [ip2, ip3, ...], # "gateways_to_as": { # "321": [(ip1, ip2), ...], # "65": [], # ... # } # } as_collection = lib.get_collection("as_graph", True) mongo_filter = {"asn": {"$gt": 0, "$ne": None}} print "get as numbers" as_numbers = as_collection.find(mongo_filter).distinct("asn") nodes = [{ "group": int(asn), "id": int(asn), "label": str(asn) } for asn in as_numbers] print "get links between as-es" as_neighbours = as_collection.find(mongo_filter, {"core_ips": False}) edges = [] for asn in as_neighbours: for neighbour in asn["gateways_to_as"].iterkeys(): edges.append({ "from": int(asn["asn"]), "to": int(neighbour), "value": len(asn["gateways_to_as"][neighbour]) }) print "write them out" data = {"nodes": nodes, "edges": edges} from collections import Counter link_weights = Counter() for edge in edges: link_weights[edge["value"]] += 1 print link_weights.most_common() with open("data.js", "w") as f: f.write("function getData(){ return " + json.dumps(data, indent=2) + ";}")
def get_good_nodes(): node_states = lib.get_collection("node_state") mongo_filter = {"ts": {"$gt": int(time.time()-21600)}} iter_nodes = node_states.find(mongo_filter, {"ip": True}).distinct("ip") return [x for x in iter_nodes]
def get_good_nodes(): node_states = lib.get_collection("node_state") mongo_filter = {"ts": {"$gt": int(time.time() - 21600)}} iter_nodes = node_states.find(mongo_filter, {"ip": True}).distinct("ip") return [x for x in iter_nodes]
def scan(nodes=None, cmd=None, stdout_proc=None, stderr_proc=None, timeout=10, save_erroneous=True, do_statistics=True, save_stdout=True, save_stderr=True, node_script=scan_script, save_result=True): global node_len log = logging.getLogger().info if nodes is None: log("get planet lab ip list") # nodes = lib.getPlanetLabNodes(slice_name) nodes = lib.getBestNodes()[:5] node_len = len(nodes) log("start scanning them ") node_calls = [{"cmd": cmd, "timeout": timeout, "ip": ip} for ip in nodes] def orchestrate(args): res = node_script(args) if stdout_proc is not None and "stdout" in res: res["data"] = stdout_proc(res) if stderr_proc is not None and \ "error" in res and \ res["error"] is not None: res["error"] = stderr_proc(res) return res nodes = utils.thread_map(orchestrate, node_calls, used_threads) log("filter not needed informations") if not save_erroneous: new_list = [] for node in nodes: if "error" not in node and \ node["online"] == "online": new_list.append(node) nodes = new_list if not save_stderr: for node in nodes: node.pop("stderr", None) if not save_stdout: for node in nodes: node.pop("stdout", None) if save_result: log("write out the results") with open("results/scan.json", "w") as f: f.write(json.dumps(nodes)) if do_statistics: log("calculate statistics") stats = scan_statistics(nodes) stats["ts"] = time.time() node_statistics = lib.get_collection("node_statistics") tmp = stats.copy() node_statistics.insert_one(tmp) print json.dumps(stats, indent=2)
def main(): pass if len(sys.argv) == 1: continous_measuring() return elif sys.argv[1] == "scan": rsa_dir = lib.search_dir(".", "ssh_needs", 2) if rsa_dir is not None: rsa_file = str(rsa_dir) + "/id_rsa" lib.Connection.connection_builder = \ lib.ConnectionBuilder(slice_name, rsa_file, None) else: logger.info("RSA key not found!") return node_states = lib.get_collection("node_state") def stdout_proc(node): stdout = node["stdout"] if "release" in stdout: stdout = stdout.replace("\r", "") os = stdout.split("\n")[0] result = { "ip": node["ip"], "ts": time.time(), "os": os } node_states.insert_one(result) node["stdout"] = os print "Good node: ip=%s, os=%s" % (node["ip"], os) else: node["error"] = "not valid response: " + stdout node["stderr"] = stdout def stderr_proc(node): node["error"] = node["error"].replace(node["ip"], "*ip*") node["error"] = node["error"].replace(node["dns"], "*dns*") return node["error"] args = { "cmd": "cat /etc/issue", "save_result": False, "do_statistics": True, "stdout_proc": stdout_proc, "stderr_proc": stderr_proc } # nodes=None, cmd=None, stdout_proc=None, stderr_proc=None, # timeout=10, save_erroneous=True, # save_stdout=True, save_stderr=True, # node_script=scan_script, save_result=True start = time.time() print "scan started at: ", start scan(**args) end = time.time() print "scan ended at: ", end print 'scan duration: %.2f seconds' % (end - start) return if sys.argv[1] == "dev": print "\n".join(get_good_nodes()) return
def scan(nodes=None, cmd=None, stdout_proc=None, stderr_proc=None, timeout=10, save_erroneous=True, do_statistics=True, save_stdout=True, save_stderr=True, node_script=scan_script, save_result=True): global node_len log = logging.getLogger().info if nodes is None: log("get planet lab ip list") # nodes = lib.getPlanetLabNodes(slice_name) nodes = lib.getBestNodes()[:5] node_len = len(nodes) log("start scanning them ") node_calls = [{ "cmd": cmd, "timeout": timeout, "ip": ip } for ip in nodes] def orchestrate(args): res = node_script(args) if stdout_proc is not None and "stdout" in res: res["data"] = stdout_proc(res) if stderr_proc is not None and \ "error" in res and \ res["error"] is not None: res["error"] = stderr_proc(res) return res nodes = utils.thread_map(orchestrate, node_calls, used_threads) log("filter not needed informations") if not save_erroneous: new_list = [] for node in nodes: if "error" not in node and \ node["online"] == "online": new_list.append(node) nodes = new_list if not save_stderr: for node in nodes: node.pop("stderr", None) if not save_stdout: for node in nodes: node.pop("stdout", None) if save_result: log("write out the results") with open("results/scan.json", "w") as f: f.write(json.dumps(nodes)) if do_statistics: log("calculate statistics") stats = scan_statistics(nodes) stats["ts"] = time.time() node_statistics = lib.get_collection("node_statistics") tmp = stats.copy() node_statistics.insert_one(tmp) print json.dumps(stats, indent=2)