def main(): global results_queue global event_loop global pinger args = parse_args() log_format = '%(asctime)s %(levelname)s:%(module)s:%(funcName)s# ' \ + '%(message)s' if args.foreground: logging.basicConfig(format=log_format, level=args.log_level) else: log_filename = env.get_env_string('PROBER_LOG_FILE') logging.basicConfig(filename=log_filename, format=log_format, level=args.log_level) setup_signal_handler() results_queue = TQueue() unconfirmed_list = [] hosts = [] logging.info("Starting ping thread") pinger = Pinger(hosts, output=results_queue) pinger.start() logging.info("Starting event loop") event_loop = asyncio.get_event_loop() main_task = maintain_collector_connection(results_queue, unconfirmed_list) try: event_loop.run_until_complete(main_task) except KeyboardInterrupt: logging.debug("Caught KeyboardInterrupt in main()") shutdown()
def main(): p = Pinger(timeout=5) p.loop() while True: try: sleep(1) except KeyboardInterrupt: break p.stop()
def monitor(): ping = Pinger() ping.thread_count = 4 while True: ping.hosts = pingips() nodes=ping.start() for ip in nodes['dead']: updatedead(ip) time.sleep(1) os.system("clear") select()
def __init__(self): self.blah = "blah" self.ping = Pinger() self.network = [ "10.35.0", "10.35.180", "10.35.155", "10.36.0", "10.36.1", "10.255.0", "10.255.1", "10.255.2", "10.255.3", "172.18.1", "172.18.3", "172.18.10" ] # self.network = ["10.35.180"] # testing network self.today = datetime.datetime.now() self.dayNow = self.today.strftime('%Y-%m-%d') self.parentDir = os.path.dirname(os.path.realpath(__file__)) self.logDir_main = os.path.join(self.parentDir, "logs") self.logDir_responses = os.path.join(self.parentDir, "logs", "ping_responses") self.htmlOutDir = os.path.join("/" + "var", "www", "html", "pings" + "/")
def updateScilentVm(self): from pinger import Pinger alivestatus = self.engine.getModel('AliveStatus') ne_server = self.engine.getModel('NEServer') dtnow = datetime.now() interval = dtnow - timedelta(minutes=self.interval) ping = Pinger() hosts = [] for alive_rs in self.db_session.query(alivestatus).filter( alivestatus.updated_at < interval): ne_rs = self.db_session.query(ne_server).filter( ne_server.uuid == alive_rs.uuid).one() if ping.ping(ne_rs.nic2): self.db_session.query(alivestatus).filter_by(uuid=uuid).update( {'updated_at': datetime.now()}) self.db_session.commit()
help='Ping the specified host until stopped.') # Number of pings parser.add_argument('-n', dest='count', type=int, help='Number of echo requests to send.') # Interval between pings (for Continous is True or Count > 1) parser.add_argument( '-i', dest='interval', type=int, default=defaults.interval, help='Wait interval seconds between sending each packet.') args = parser.parse_args() # If multiple pings, user Pinger() if args.continuous or args.count: sys.exit( Pinger(args.hostname, args.size, args.timeout, args.continuous, args.count, args.interval)) # Otherwise, just use single ping else: p = Ping(args.hostname, args.size, args.timeout) delay = p.do() print p print delay
def reportAlive(self): aliveinfo = AliveInfo() ping = Pinger() hosts = [] hosts.append(self.api_conf['public_router']) hosts.append(self.api_conf['public_gateway']) ex_hosts = [] ex_hosts.append(self.api_conf['aws']) ex_hosts.append(self.api_conf['kt']) ex_hosts.append(self.api_conf['legacy']) ping.hosts = hosts status = ping.start() status2 = ping.start() if self.api_conf['public_router'] in status['alive'] or self.api_conf[ 'public_router'] in status2['alive']: aliveinfo.normal_public_fx = True aliveinfo.normal_public_fx_latency = status['alive'][ self.api_conf['public_router']] else: aliveinfo.normal_public_fx = False aliveinfo.normal_public_fx_latency = status['dead'][ self.api_conf['public_router']] if self.api_conf['public_gateway'] in status['alive'] or self.api_conf[ 'public_gateway'] in status2['alive']: aliveinfo.normal_public_fl = True aliveinfo.normal_public_fl_latency = status['alive'][ self.api_conf['public_gateway']] else: ping.hosts = ex_hosts ex_status = ping.start() if len(ex_status['alive']) > 0: aliveinfo.normal_public_fl = True if ex_status['alive'].has_key(self.api_conf['aws']): aliveinfo.normal_public_fl_latency = ex_status['alive'][ self.api_conf['aws']] elif ex_status['alive'].has_key(self.api_conf['kt']): aliveinfo.normal_public_fl_latency = ex_status['alive'][ self.api_conf['kt']] elif ex_status['alive'].has_key(self.api_conf['legacy']): aliveinfo.normal_public_fl_latency = ex_status['alive'][ self.api_conf['legacy']] else: aliveinfo.normal_public_fl_latency = -1 else: aliveinfo.normal_public_fl = False aliveinfo.normal_public_fl_latency = ex_status['dead'][ self.api_conf['aws']] aliveinfo.wmi_private_fx = None aliveinfo.wmi_private_fl = None aliveinfo.wmi_public_fx = None aliveinfo.wmi_public_fl = None #influx_container = "tc_npath_monitor,host=%s normal_public_fx=%d,normal_public_fl=%d,normal_public_fx_latency=%.4f,normal_public_fl_latency=%.4f" %(self.data['name'], aliveinfo.normal_public_fx, #aliveinfo.normal_public_fl, #aliveinfo.normal_public_fx_latency, #aliveinfo.normal_public_fl_latency) metric = "normal_npath" json_body = [{ "measurement": metric, "tags": { "host": self.data['name'], }, "fields": { "normal_public_fx": aliveinfo.normal_public_fx, "normal_public_fl": aliveinfo.normal_public_fl, "normal_public_fx_latency": round(aliveinfo.normal_public_fx_latency, 3), "normal_public_fl_latency": round(aliveinfo.normal_public_fl_latency, 3) } }] self.influxdb_client.write_points(json_body) #print influx_container #headers = { "Content-Type" : "text/plain"} #headers = {} #postData(DB_URL,influx_container,headers) ret = self.tcrpc.reportAlive(self.data['uuid'], aliveinfo) return ret
from flask import Flask, render_template, json from pinger import Pinger from configparser import ConfigParser import _thread import time app = Flask(__name__, static_url_path='', static_folder='../static', template_folder='../templates') config = ConfigParser() pinger = Pinger(config) def update_values(): while True: pinger.ping_all() time.sleep(config.get_check_interval()) @app.route('/') def index(): return render_template('index.html', title=config.get_name(), description=config.get_description(), icon=config.get_icon()) @app.route("/json") def get_data(): response = app.response_class(response=json.dumps(pinger.get_values()),
def ping(timeout=500, packet_size=55, *args, **kwargs): hosts = ('192.168.5.5', '192.168.5.18', '8.8.8.8', 'ucla.edu') p = Pinger(hosts, timeout, packet_size, *args, **kwargs) p.run()
from pinger import Pinger import db_module pinger = Pinger(db=db_module) pinger.start_ping()
# Define the number of discrete depths between the surface and seabed. num_actions = 6 # Define the environment with the number of discrete depths for the detectable # object. env = Environment(num_actions) # Define the LRI automata with the same number of actions. This number does # not correspond to the number of receivers on the array. It is merely the # representation of the array's ability to detect the object at that depth. lri = LRI(num_actions) # The learning automata. # The most probable depth that the object exists at, as calculated by the # learner. bestdepth = np.zeros(num_actions) # The penalty probabilities for the learner. E = [0.1, 0.2, 0.4, 0.2, 0.01, 0.09] det_obj = Pinger(E) # Create the detectable object. # Run 5 individual experiments experiments. for k in range(5): # Generate an ensemble of 100000 experiments for j in range(100000): # reset the action probabilities. lri.reset_actions() # Run a single experiment. Terminate if it reaches 10000 iterations. for i in range(10000): # Define m as the next action predicting the depth of the object. m = lri.next_action() # Defin req as the next detectable object depth. req = det_obj.request() # reward if m = req. resp = env.response(m, req)
class MainClass: def __init__(self): self.blah = "blah" self.ping = Pinger() self.network = [ "10.35.0", "10.35.180", "10.35.155", "10.36.0", "10.36.1", "10.255.0", "10.255.1", "10.255.2", "10.255.3", "172.18.1", "172.18.3", "172.18.10" ] # self.network = ["10.35.180"] # testing network self.today = datetime.datetime.now() self.dayNow = self.today.strftime('%Y-%m-%d') self.parentDir = os.path.dirname(os.path.realpath(__file__)) self.logDir_main = os.path.join(self.parentDir, "logs") self.logDir_responses = os.path.join(self.parentDir, "logs", "ping_responses") self.htmlOutDir = os.path.join("/" + "var", "www", "html", "pings" + "/") def pingNetwork(self): self.setupEnv() # pause = raw_input("ctrl-C to stop, enter to continue") for network in self.network: networkFile = (os.path.join( self.logDir_responses, network + "-" + self.dayNow + "-" + "responses.txt")) with open(networkFile, "a+") as outfile: with open(os.path.join(self.logDir_main, network), "a+") as netFileOut: netSummary = [] count = 0 for _ in range(1, 254): #testing result = self.ping.pingIt(network, _) if result == True: count += 1 outfile.write( str(network) + "." + str(_) + "," + "True" + "\n") else: netSummary.append(result) # for item in netSummary: outfile.write(result + "," + "False" + "\n") print "Pings from network %s.x: %s" % (network, count) netFileOut.write(str(count) + "," + self.dayNow + "\n") self.makeWebPage(networkFile, network) def setupEnv(self): if not os.path.exists(self.logDir_main): print "Creating log directory" os.mkdir(self.logDir_main) if not os.path.exists(self.logDir_responses): print "Creating responses directory" os.mkdir(self.logDir_responses) def makeWebPage(self, networkFile, network): with open(networkFile, "r") as inFile: with open(os.path.join(self.htmlOutDir + network + ".html"), "w") as outFile: outFile.write("<!DOCTYPE html>") outFile.write("<html>") outFile.write("<head>") outFile.write(" <link rel=stylesheet href=style.css>") outFile.write("</head>") outFile.write("<body>") outFile.write("<h2>" + "Pings from " + network + " on " + self.dayNow + "</h2>") outFile.write("<table border=1>" + "\n") for line in inFile: splitLine = line.split(",") pingResponse = splitLine[1].strip() if pingResponse == "True": responseColor = "#00FF00" else: responseColor = "#FF0000" outFile.write("<tr>" + "<td bgcolor=" + responseColor + ">" + "{:>15}".format(splitLine[0]) + "</td>" + "<td>" + "{:>25}".format(splitLine[1]) + "</td>" + " " + "<td>" + "{:>40}".format("placeholder2" + "</td></tr>" + "\n")) outFile.write("</table>") outFile.write("</body>") outFile.write("</html>")
# object. env = Environment(num_actions) # Define the LRI automata with the same number of actions. This number does # not correspond to the number of receivers on the array. It is merely the # representation of the array's ability to detect the object at that depth. lrp = LRP(num_actions) # The learning automata. # The most probable depth that the object exists at, as calculated by the # learner. bestdepth = np.zeros(num_actions) # Define the Markovian Switching Environment that will feed probabilities to # the Pinger object. Es = [[0.1, 0.2, 0.4, 0.2, 0.01, 0.09], [0, 0, 0.8, 0.1, 0, 0.1], [0, 0, 0, 1, 0, 0], [0.1, 0.1, 0.6, 0.05, 0.01, 0.04]] mse = MSE(Es) det_obj = Pinger(mse.env_now()) # Create the detectable object. # Run 5 individual experiments experiments. for k in range(len(Es)): # Generate an ensemble of n experiments for j in range(n): # reset the action probabilities. lrp.reset_actions() # Run a single experiment. Terminate if it reaches 10000 iterations. while (True): # Define m as the next action predicting the depth of the object. m = lrp.next_action() # Defin req as the next detectable object depth. req = det_obj.request() # reward if m = req. resp = env.response(m, req)
def service(): app = Pinger() app.run()
# Define the Markovian Switching Environment that will feed probabilities to # the Pinger object. Es = [[0.48796, 0.024438, 0.067891, 0.41971, 0.00], [0.021431, 0.071479, 0.40562, 0.50147, 0.00], [0.018288, 0.083153, 0.50582, 0.39274, 0.00], [0.48455, 0.015527, 0.18197, 0.31795, 0.00], [0.01675, 0.58845, 0.11313, 0.28167, 0.00]] En = [[0.021431, 0.071479, 0.40562, 0.50147, 0.00], [0.48796, 0.024438, 0.067891, 0.41971, 0.00], [0.018288, 0.083153, 0.50582, 0.39274, 0.00], [0.01675, 0.58845, 0.11313, 0.28167, 0.00], [0.48455, 0.015527, 0.18197, 0.31795, 0.00]] mse = MSE(Es) mse1 = MSE(En) det_obj = Pinger(mse.env_now()) # Create the detectable object. first_uav = Pinger(mse1.env_now()) # set up transmission vectors for i in range(num_actions): transmission.append(turtle.Turtle()) transmission1.append(turtle.Turtle()) # Run 5 individual experiments experiments. for k in range(len(mse.envs)): # Generate an ensemble of n experiments source.goto(-300, depths[k]) receiver.clear() receiver1.clear() for i in range(num_actions): transmission[i].clear()
'''Elevator test.''' from discretized_lri import DLRI as DLRI from environment import Environment from pinger import Pinger import numpy as np import helpers as h import math num_actions = 6 env = Environment(num_actions) dlri = DLRI(num_actions) bestdepth = np.zeros(num_actions) E = [0.1, 0.2, 0.4, 0.2, 0.01, 0.09] det_obj = Pinger(E) for k in range(5): for j in range(1000): # Caught me again... dlri.p = np.array(h.make_dp(num_actions)) m = math.floor(num_actions / 2) while (True): req = det_obj.request() resp = env.response(m, req) if (not resp): dlri.do_reward(m) else: dlri.do_penalty() m = dlri.next_action() if (max(dlri.p) == (num_actions * num_actions)): # The best depth counting from 0 (seasurface). bestdepth[np.argmax(dlri.p)] += 1
if __name__ == '__main__': monitor_port = 5000 app_port = 5001 if len(sys.argv) == 2: app_port = sys.argv[1] interval = 15 log.info('PORT {} ARGS {}'.format(app_port, len(sys.argv))) log.info(sys.argv) pinger = Pinger( app_name='reporting', app_host='127.0.0.1', app_port=app_port, monitor_host='127.0.0.1', monitor_port=monitor_port, interval=interval ) rt = RepeatedTimer(interval=interval, function=pinger.ping) try: # app = Flask(__name__) # app.run(host='127.0.0.1', port=5001) app = connexion.App(__name__, specification_dir='./openapi') app.add_api('swagger.yml') # NOTE: debug=True causes the restart app.run(host='127.0.0.1', port=app_port, debug=False) finally: pinger.shutdown() rt.stop()
from lrp import Linear_Reward_Penalty as LRP from environment import Environment from pinger import Pinger import tune_lrp as tune import numpy as np test_lrp = LRP(5) penaly_probs = [0.3, 0.1, 0.1, 0.1, 0.4] penalizer = Pinger(np.array(penaly_probs)) env = Environment(5) a = tune.find_optimal_a(test_lrp, env, penalizer) print("The value for a after tuning is " + str(test_lrp.a)) b = tune.find_optimal_b(test_lrp, env, penalizer) print("The value for b after tuning is " + str(test_lrp.b)) test_lrp.a = a test_lrp.b = b n = 10000 bestdepth = np.zeros(5) for j in range(n): # reset the action probabilities. test_lrp.reset_actions() # Run a single experiment. Terminate if it reaches 10000 iterations. while (True): # Define m as the next action predicting the depth of the object. m = test_lrp.next_action() # Define req as the next detectable object depth. req = penalizer.request() # reward if m = req. resp = env.response(m, req) if (not resp): test_lrp.do_reward(m)