Example #1
0
 def machine_ip(self):
     ip = util.get_ip_address('eno1')
     print("ip1", ip)
     if ip is None or ip == '':
         ip = util.get_ip_address('enp67s0')
         print("ip2", ip)
         if ip is None or ip == '':
             ip = util.get_ip_address('p1p1')
             print("ip3", ip)
     return ip
Example #2
0
def install_zookeeper():
	# /mnt/zookeeper/data chown
	config = json.load(open('zookeeper-config.json'));
	data_dir_maked = subprocess.check_call(["sudo", "mkdir", "-p", "/mnt/zookeeper/data"])
	if 0 == data_dir_maked:
		subprocess.call(["sudo", "chown", "-R", "cloud-user", "/mnt/zookeeper"])
	else:
		print("Create dirctory /mnt/zookeeper/data failed")
		sys.exist(1)
	print("Create dirctory /mnt/zookeeper/data successfully")
	# myid
	myip = get_ip_address()
	mynode = [node for node in config['nodes'] if node['ip'] == myip][0]
	open("/mnt/zookeeper/data/myid", "w").write(str(mynode['id']))
	print("Set myid for zookeeper successfully")
	# cp zookeeper
	subprocess.call(['sudo', 'rm', '-rf', '/usr/local/zookeeper'])
	subprocess.call(['sudo', 'cp', '-r', './zookeeper', '/usr/local/zookeeper'])
	for node in config['nodes']:
		appendline('/usr/local/zookeeper/conf/zoo.cfg', 'server.'+str(node['id'])+'=zoo'+str(node['id'])+':2888:3888')
	
	subprocess.call(['sudo', 'chown', '-R', 'cloud-user', '/usr/local/zookeeper'])
	# hosts
	for node in config['nodes']:
		appendline('/etc/hosts', node['ip']+'\t'+'zoo'+str(node['id']))
Example #3
0
def install_zookeeper():
    # /mnt/zookeeper/data chown
    config = json.load(open('zookeeper-config.json'))
    data_dir_maked = subprocess.check_call(
        ["sudo", "mkdir", "-p", "/mnt/zookeeper/data"])
    if 0 == data_dir_maked:
        subprocess.call(
            ["sudo", "chown", "-R", "cloud-user", "/mnt/zookeeper"])
    else:
        print("Create dirctory /mnt/zookeeper/data failed")
        sys.exist(1)
    print("Create dirctory /mnt/zookeeper/data successfully")
    # myid
    myip = get_ip_address()
    mynode = [node for node in config['nodes'] if node['ip'] == myip][0]
    open("/mnt/zookeeper/data/myid", "w").write(str(mynode['id']))
    print("Set myid for zookeeper successfully")
    # cp zookeeper
    subprocess.call(['sudo', 'rm', '-rf', '/usr/local/zookeeper'])
    subprocess.call(
        ['sudo', 'cp', '-r', './zookeeper', '/usr/local/zookeeper'])
    for node in config['nodes']:
        appendline(
            '/usr/local/zookeeper/conf/zoo.cfg', 'server.' + str(node['id']) +
            '=zoo' + str(node['id']) + ':2888:3888')

    subprocess.call(
        ['sudo', 'chown', '-R', 'cloud-user', '/usr/local/zookeeper'])
    # hosts
    for node in config['nodes']:
        appendline('/etc/hosts', node['ip'] + '\t' + 'zoo' + str(node['id']))
Example #4
0
    def show(self):
        img = Image.new('1', (epd2in7.EPD_WIDTH, epd2in7.EPD_HEIGHT),
                        255)  # 255: clear the frame
        draw = ImageDraw.Draw(img)
        font12 = ImageFont.truetype(
            '/usr/share/fonts/truetype/freefont/FreeSans.ttf', 12)

        Y0 = 60
        line_height = 12
        margin = 5
        for k in range(len(self.lines)):
            draw.text((margin, Y0 + k * line_height),
                      self.lines[k],
                      font=font12,
                      fill=0)

        bigY = epd2in7.EPD_HEIGHT - 46
        draw.line((0, bigY, epd2in7.EPD_WIDTH, bigY), fill=0)
        draw.text((10, bigY + 5), util.date(), font=font12, fill=0)
        draw.text((10, bigY + 17),
                  'connecté au réseau ' + util.get_essid(),
                  font=font12,
                  fill=0)
        draw.text((10, bigY + 29),
                  'adresse IP : ' + util.get_ip_address(),
                  font=font12,
                  fill=0)
        bmp = Image.open('/home/pi/Miniban/src/lib/display/explorer-logo.bmp')
        img.paste(bmp, (0, 0))
        self.epd.display(self.epd.getbuffer(img))
        self.clear()
  def __init__(self, configuration_file, database_file='jonnyboards.db'):
      self.config = {}
      self.database = database.Database(database_file)

      (self.api_key, self.sensors) = util.loadConfig(configuration_file)
      self.ipaddress = util.get_ip_address()
      self._configureSensors()
Example #6
0
def check_db(level=0):
    ip = "debug"
    if config.DEBUG is False:
        ip = get_ip_address(config.NET_PORT)
    msg = time.ctime() + '\n' +  ip +'\n'
    fail = False
    try:
        if level >= 0:
            if not check_last_block():
               msg = msg + ("check last blk fail\n")
               fail = True
            else:
               msg = msg + ("check last blk success\n")

            if not check_last_tx():
               msg = msg + ("check last tx fail\n")
               fail = True
            else:
               msg = msg + ("check last tx success\n")

        if level >= 1:
            if not check_blk_count():
               msg = msg + ("check blk count fail\n")
               fail = True
            else:
               msg = msg + ("check blk count success\n")


            if not check_addr_balance():
               msg = msg + ("check address fail\n")
               fail = True
            else:
               msg = msg + ("check address success\n")

            if not check_all_blk_count():
               msg = msg + ("check all blk fail\n")
               fail = True
            else:
               msg = msg + ("check all blk success\n")

        if level >= 2:
            if not check_tx_count():
               msg = msg + ("check tx count fail\n")
               fail = True
            else:
               msg = msg + ("check tx count success\n")

            if not check_all_tx_count():
               msg = msg + ("check all tx count fail\n")
               fail = True
            else:
               msg = msg + ("check all tx count success\n")


    except Exception, e:
        msg = msg + ("check db fail:\n %s" % e)
        fail = True
Example #7
0
def get_node():
    my_node = node.get_my_node()

    if my_node is None:
        new_node = Node(util.get_ip_address('en0'))
        node.add_node(new_node)
        return new_node

    else:
        return my_node
Example #8
0
def check_db(level=0):
    ip = "debug"
    if config.DEBUG is False:
        ip = get_ip_address(config.NET_PORT)
    msg = time.ctime() + '\n' + ip + '\n'
    fail = False
    try:
        if level >= 0:
            if not check_last_block():
                msg = msg + ("check last blk fail\n")
                fail = True
            else:
                msg = msg + ("check last blk success\n")

            if not check_last_tx():
                msg = msg + ("check last tx fail\n")
                fail = True
            else:
                msg = msg + ("check last tx success\n")

        if level >= 1:
            if not check_blk_count():
                msg = msg + ("check blk count fail\n")
                fail = True
            else:
                msg = msg + ("check blk count success\n")

            if not check_addr_balance():
                msg = msg + ("check address fail\n")
                fail = True
            else:
                msg = msg + ("check address success\n")

            if not check_all_blk_count():
                msg = msg + ("check all blk fail\n")
                fail = True
            else:
                msg = msg + ("check all blk success\n")

        if level >= 2:
            if not check_tx_count():
                msg = msg + ("check tx count fail\n")
                fail = True
            else:
                msg = msg + ("check tx count success\n")

            if not check_all_tx_count():
                msg = msg + ("check all tx count fail\n")
                fail = True
            else:
                msg = msg + ("check all tx count success\n")

    except Exception, e:
        msg = msg + ("check db fail:\n %s" % e)
        fail = True
    def find_idefix(self, widget=None):

        results = []
        arp = []
        try:
            arp = subprocess.check_output(["arp", "-a"])
            arp = arp.decode("cp850")
        except FileNotFoundError:
            self.controller.arw["network_summary_status"].set_text(
                _("arp utility not found. Cannot detect Idefix"))

        valid_addresses = set()

        # filter duplicate ip addresses
        for line1 in arp.split("\n"):
            result = get_ip_address(line1)
            if result:
                valid_addresses.add(result.group(0))

        def try_connect(ip):
            print("Trying: " + ip)
            h1 = http.client.HTTPConnection(ip, timeout=10)
            try:
                h1.connect()
            except:
                h1.close()
                return
            h1.request("GET", "/network-info.php")
            res = h1.getresponse()
            if res.status == 200:
                content = res.read().decode("cp850")
                # some devices which are protected by a password will give a positive (200) answer to any file name.
                # We must check for a string which is specific to Idefix
                if "idefix network info" in content:
                    h1.close()
                    results.append([ip, content])
            h1.close()

        # Speeds up detection by trying multiple connections at the same time using threads
        with ThreadPoolExecutor(max_workers=3) as executor:
            for ip, result in zip(valid_addresses,
                                  executor.map(try_connect, valid_addresses)):
                while Gtk.events_pending():
                    Gtk.main_iteration()
                self.controller.arw["network_summary_status"].set_text(
                    _("Connected : " + ip))
                if result:
                    results.append(result)

        return results
Example #10
0
def logrotate_thread_task(writables, tgen_writable, torctl_writable, docroot, nickname, done_ev):
    next_midnight = None

    while not done_ev.wait(1):
        # get time
        utcnow = datetime.datetime.utcnow()

        # setup the next expiration time (midnight tonight)
        if next_midnight is None:
            next_midnight = datetime.datetime(utcnow.year, utcnow.month, utcnow.day, 23, 59, 59)
            # make sure we are not already past the above time today
            if (next_midnight - utcnow).total_seconds() < 0:
                next_midnight -= datetime.timedelta(1)  # subtract 1 day

        # if we are past midnight, launch the rotate task
        if (next_midnight - utcnow).total_seconds() < 0:
            # handle the general writables we are watching
            for w in writables:
                w.rotate_file(filename_datetime=next_midnight)

            # handle tgen and tor writables specially, and do analysis
            if tgen_writable is not None or torctl_writable is not None:
                try:

                    # get our public ip address, do this every night in case it changes
                    public_measurement_ip_guess = util.get_ip_address()

                    # set up the analysis object with our log files
                    anal = analysis.Analysis(nickname=nickname, ip_address=public_measurement_ip_guess)
                    if tgen_writable is not None:
                        anal.add_tgen_file(tgen_writable.rotate_file(filename_datetime=next_midnight))
                    if torctl_writable is not None:
                        anal.add_torctl_file(torctl_writable.rotate_file(filename_datetime=next_midnight))

                    # run the analysis, i.e. parse the files
                    anal.analyze(do_simple=False, date_filter=next_midnight.date())

                    # save the results in onionperf and torperf format in the www docroot
                    anal.save(output_prefix=docroot, do_compress=True)
                    anal.export_torperf_version_1_1(output_prefix=docroot, do_compress=False)

                    # update the xml index in docroot
                    generate_docroot_index(docroot)
                except Exception as e:
                    logging.warning("Caught and ignored exception in TorPerf log parser: {0}".format(repr(e)))
                    logging.warning("Formatted traceback: {0}".format(traceback.format_exc()))
            # reset our timer
            next_midnight = None
Example #11
0
def logrotate_thread_task(writables, tgen_writable, torctl_writable, docroot, nickname, done_ev):
    next_midnight = None

    while not done_ev.wait(1):
        # get time
        utcnow = datetime.datetime.utcnow()

        # setup the next expiration time (midnight tonight)
        if next_midnight is None:
            next_midnight = datetime.datetime(utcnow.year, utcnow.month, utcnow.day, 23, 59, 59)
            # make sure we are not already past the above time today
            if (next_midnight - utcnow).total_seconds() < 0:
                next_midnight -= datetime.timedelta(1)  # subtract 1 day

        # if we are past midnight, launch the rotate task
        if (next_midnight - utcnow).total_seconds() < 0:
            # handle the general writables we are watching
            for w in writables:
                w.rotate_file(filename_datetime=next_midnight)

            # handle tgen and tor writables specially, and do analysis
            if tgen_writable is not None or torctl_writable is not None:
                try:

                    # get our public ip address, do this every night in case it changes
                    public_measurement_ip_guess = util.get_ip_address()

                    # set up the analysis object with our log files
                    anal = analysis.Analysis(nickname=nickname, ip_address=public_measurement_ip_guess)
                    if tgen_writable is not None:
                        anal.add_tgen_file(tgen_writable.rotate_file(filename_datetime=next_midnight))
                    if torctl_writable is not None:
                        anal.add_torctl_file(torctl_writable.rotate_file(filename_datetime=next_midnight))

                    # run the analysis, i.e. parse the files
                    anal.analyze(do_simple=False, date_filter=next_midnight.date())

                    # save the results in onionperf and torperf format in the www docroot
                    anal.save(output_prefix=docroot, do_compress=True)
                    anal.export_torperf_version_1_1(output_prefix=docroot, do_compress=False)

                    # update the xml index in docroot
                    generate_docroot_index(docroot)
                except Exception as e:
                    logging.warning("Caught and ignored exception in TorPerf log parser: {0}".format(repr(e)))
                    logging.warning("Formatted traceback: {0}".format(traceback.format_exc()))
            # reset our timer
            next_midnight = None
Example #12
0
    def start(self):
        """Start listener in a background thread

    Returns:
      address of the Server as a tuple of (host, port)
    """
        server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        server_sock.bind(('', 0))
        server_sock.listen(10)

        # hostname may not be resolvable but IP address probably will be
        host = util.get_ip_address()
        port = server_sock.getsockname()[1]
        addr = (host, port)
        logging.info("listening for reservations at {0}".format(addr))

        def _listen(self, sock):
            CONNECTIONS = []
            CONNECTIONS.append(sock)

            while not self.done:
                read_socks, write_socks, err_socks = select.select(
                    CONNECTIONS, [], [], 60)
                for sock in read_socks:
                    if sock == server_sock:
                        client_sock, client_addr = sock.accept()
                        CONNECTIONS.append(client_sock)
                        logging.debug(
                            "client connected from {0}".format(client_addr))
                    else:
                        try:
                            msg = self.receive(sock)
                            self._handle_message(sock, msg)
                        except Exception as e:
                            logging.debug(e)
                            sock.close()
                            CONNECTIONS.remove(sock)

            server_sock.close()

        t = threading.Thread(target=_listen, args=(self, server_sock))
        t.daemon = True
        t.start()

        return addr
Example #13
0
 def __init__(self, name=None, ip=None, port=None):
     abstractserver.AbstractServer.__init__(self, name)
     #self.ip = ip
     self.ip = get_ip_address('eth0')
     self.port = port
        pd_socket = Pd('localhost', user.port)
        # pd_socket.send_async(f'{user.audio_conf["reverb"]} {user.audio_conf["delay"]} {user.audio_conf["damp"]}', repeat_until_connect=True)
        pd_socket.send(user.audio_conf_as_pd_payload(),
                       repeat_until_connect=True)
        icecast_url = f'http://{util.get_ip_address()}:{config.STREAM_ENDPOINT_PORT}'
        #   Wait for pd to successfully connect to icecast. Bad dirty way but quick and simple
        socketio.sleep(1)
        socketio.emit(
            'stream', {
                'source': f'{icecast_url}/{stream_mountpoint}',
                'raw': f'{icecast_url}/{raw_mountpoint}'
            })


if __name__ == "__main__":
    ip = util.get_ip_address()
    print(f'To access externally, open this address from your device {ip}')
    #   Write/Update json config
    import json
    import shutil
    import os
    import ntpath
    import time
    with open('../frontend/src/config.json', mode='w') as file:
        json.dump(
            {
                "endpoint_port": str(config.API_ENDPOINT_PORT),
                "endpoint_ip": ip
            }, file)

    #   Copy file from ../sound to this directory.
Example #15
0
 def __init__(self, name = None, ip = None, port = None):
   abstractserver.AbstractServer.__init__(self, name)
   #self.ip = ip
   self.ip = get_ip_address('eth0')
   self.port = port
Example #16
0
    def run(self, do_onion=True, do_inet=True, client_tgen_listen_port=58888, client_tgen_connect_ip='0.0.0.0', client_tgen_connect_port=8080, client_tor_ctl_port=59050, client_tor_socks_port=59000,
             server_tgen_listen_port=8080, server_tor_ctl_port=59051, server_tor_socks_port=59001, twistd_port=50080):
        '''
        only `server_tgen_listen_port` and `twistd_port` are "public" and need to be opened on the firewall.
        if `client_tgen_connect_port` != `server_tgen_listen_port`, then you should have installed a forwarding rule in the firewall.
        all ports need to be unique though, and unique among multiple onionperf instances.

        here are some sane defaults:
        client_tgen_listen_port=58888, client_tgen_connect_port=8080, client_tor_ctl_port=59050, client_tor_socks_port=59000,
        server_tgen_listen_port=8080, server_tor_ctl_port=59051, server_tor_socks_port=59001, twistd_port=50080
        '''
        self.threads = []
        self.done_event = threading.Event()

        # if ctrl-c is pressed, shutdown child processes properly
        try:
            # make sure stem and Tor supports ephemeral HS (version >= 0.2.7.1-alpha)
            # and also the NEWNYM mode that clears descriptor cache (version >= 0.2.7.3-rc)
            if do_onion:
                try:
                    tor_version = get_system_tor_version(self.tor_bin_path)
                    if tor_version < Requirement.ADD_ONION or tor_version < Version('0.2.7.3-rc'):  # ADD_ONION is a stem 1.4.0 feature
                        logging.warning("OnionPerf in onion mode requires Tor version >= 0.2.7.3-rc, you have {0}, aborting".format(tor_version))
                        return
                except:
                    logging.warning("OnionPerf in onion mode requires stem version >= 1.4.0, you have {0}, aborting".format(stem_version))
                    return

            logging.info("Bootstrapping started...")
            logging.info("Log files for the client and server processes will be placed in {0}".format(self.datadir_path))

            general_writables = []
            tgen_client_writable, torctl_client_writable = None, None

            if do_onion or do_inet:
                general_writables.append(self.__start_tgen_server(server_tgen_listen_port))

            if do_onion:
                tor_writable, torctl_writable = self.__start_tor_server(server_tor_ctl_port, server_tor_socks_port, {server_tgen_listen_port:client_tgen_connect_port})
                general_writables.append(tor_writable)
                general_writables.append(torctl_writable)

            if do_onion or do_inet:
                tor_writable, torctl_client_writable = self.__start_tor_client(client_tor_ctl_port, client_tor_socks_port)
                general_writables.append(tor_writable)

            server_urls = []
            if do_onion and self.hs_service_id is not None: server_urls.append("{0}.onion:{1}".format(self.hs_service_id, server_tgen_listen_port))
            if do_inet:
                connect_ip = client_tgen_connect_ip if client_tgen_connect_ip != '0.0.0.0' else util.get_ip_address()
                server_urls.append("{0}:{1}".format(connect_ip, client_tgen_connect_port))

            if do_onion or do_inet:
                assert len(server_urls) > 0

                tgen_client_writable = self.__start_tgen_client(server_urls, client_tgen_listen_port, client_tor_socks_port)
                general_writables.append(self.__start_twistd(twistd_port))

                self.__start_log_processors(general_writables, tgen_client_writable, torctl_client_writable)

                logging.info("Bootstrapping finished, entering heartbeat loop")
                time.sleep(1)
                while True:
                    # TODO add status update of some kind? maybe the number of files in the twistd directory?
                    # logging.info("Heartbeat: {0} downloads have completed successfully".format(self.__get_download_count(tgen_client_writable.filename)))

                    if self.__is_alive():
                        logging.info("All helper processes seem to be alive :)")
                    else:
                        logging.warning("Some parallel components failed too many times or have died :(")
                        logging.info("We are in a broken state, giving up and exiting now")
                        break

                    logging.info("Next main process heartbeat is in 1 hour (helper processes run on their own schedule)")
                    logging.info("press CTRL-C for graceful shutdown...")
                    time.sleep(3600)
            else:
                logging.info("No measurement mode set, nothing to do")

        except KeyboardInterrupt:
            logging.info("Interrupt received, please wait for graceful shutdown")
            self.__is_alive()
        finally:
            logging.info("Cleaning up child processes now...")

            if self.hs_service_id is not None:
                try:
                    with Controller.from_port(port=self.hs_control_port) as torctl:
                        torctl.authenticate()
                        torctl.remove_ephemeral_hidden_service(self.hs_service_id)
                except: pass  # this fails to authenticate if tor proc is dead

#            logging.disable(logging.INFO)
            self.done_event.set()
            for t in self.threads:
                logging.info("Joining {0} thread...".format(t.getName()))
                t.join()
            time.sleep(1)
#            logging.disable(logging.NOTSET)

            logging.info("Child processes terminated")
            logging.info("Child process cleanup complete!")
            logging.info("Exiting")
Example #17
0
 def _request_is_loopback(self, req):
     try:
         return get_ip_address(req[0]).is_loopback
     except Exception:
         pass
Example #18
0
 def bridge_ip(self):
     return util.get_ip_address('lxcbr0')
Example #19
0
import time
from shutil import copyfile
from os import path

import alsaaudio as aa
import sounddevice as sd
import numpy as np

from config_secret import *
from util import get_ip_address

BASE_PATH = path.abspath(path.dirname(__file__))

DEVICE_ID = 'robin-prototype'
IP = get_ip_address()

CHANNELS = 2
FORMAT = aa.PCM_FORMAT_S16_LE
PERIOD_SIZE = 1000
RATE = 192000

# Format fun


def format_size(format):
    return {
        aa.PCM_FORMAT_S16_LE: 2,
        aa.PCM_FORMAT_S24_LE: 3,
        aa.PCM_FORMAT_FLOAT_LE: 4
    }.get(format)
Example #20
0
util.log("Ciao Vecio")
bme280_gt.setup()
bme280_gt.get_calib_param()

dati = bme280_gt.readData()

#salva sul file locale
f = open(nome_file, 'a')
for x in dati:
    item = item_prefix + x
    csv = util.ora() + ";" + util.origin + ";" + item + ";" + dati[x]
    print(csv)
    f.write(csv + '\n')
f.close()

#salva sul database
ip_local = util.get_ip_address()
if (ip_local != ''):
    util.log(ip_local)
    for x in dati:
        item = item_prefix + x
        payload = {'origin': util.origin, 'item': item, 'value': dati[x]}
        try:
            r = requests.get(sito, params=payload)
            #log( "comunicazione con server: " + r.text )
        except requests.exceptions.RequestException as e:
            util.log(e)
else:
    util.log("Nessuna rete")
Example #21
0
 def _request_is_loopback(self, req):
     try:
         return get_ip_address(req[0]).is_loopback
     except Exception:
         pass
Example #22
0
    def run(self, do_onion=True, do_inet=True, client_tgen_listen_port=58888, client_tgen_connect_ip='0.0.0.0', client_tgen_connect_port=8080, client_tor_ctl_port=59050, client_tor_socks_port=59000,
             server_tgen_listen_port=8080, server_tor_ctl_port=59051, server_tor_socks_port=59001):
        '''
        only `server_tgen_listen_port` are "public" and need to be opened on the firewall.
        if `client_tgen_connect_port` != `server_tgen_listen_port`, then you should have installed a forwarding rule in the firewall.
        all ports need to be unique though, and unique among multiple onionperf instances.

        here are some sane defaults:
        client_tgen_listen_port=58888, client_tgen_connect_port=8080, client_tor_ctl_port=59050, client_tor_socks_port=59000,
        server_tgen_listen_port=8080, server_tor_ctl_port=59051, server_tor_socks_port=59001
        '''
        self.threads = []
        self.done_event = threading.Event()

        # if ctrl-c is pressed, shutdown child processes properly
        try:
            # make sure stem and Tor supports ephemeral HS (version >= 0.2.7.1-alpha)
            # and also the NEWNYM mode that clears descriptor cache (version >= 0.2.7.3-rc)
            if do_onion:
                try:
                    tor_version = get_system_tor_version(self.tor_bin_path)
                    if tor_version < Requirement.ADD_ONION or tor_version < Version('0.2.7.3-rc'):  # ADD_ONION is a stem 1.4.0 feature
                        logging.warning("OnionPerf in onion mode requires Tor version >= 0.2.7.3-rc, you have {0}, aborting".format(tor_version))
                        return
                except:
                    logging.warning("OnionPerf in onion mode requires stem version >= 1.4.0, you have {0}, aborting".format(stem_version))
                    return

            logging.info("Bootstrapping started...")
            logging.info("Log files for the client and server processes will be placed in {0}".format(self.datadir_path))

            general_writables = []
            tgen_client_writable, torctl_client_writable = None, None

            if do_onion or do_inet:
                general_writables.append(self.__start_tgen_server(server_tgen_listen_port))

            if do_onion:
                logging.info("Onion Service private keys will be placed in {0}".format(self.privatedir_path))
                tor_writable, torctl_writable = self.__start_tor_server(server_tor_ctl_port, server_tor_socks_port, {client_tgen_connect_port:server_tgen_listen_port})
                general_writables.append(tor_writable)
                general_writables.append(torctl_writable)

            if do_onion or do_inet:
                tor_writable, torctl_client_writable = self.__start_tor_client(client_tor_ctl_port, client_tor_socks_port)
                general_writables.append(tor_writable)

            server_urls = []
            if do_onion and self.hs_service_id is not None and self.hs_v3_service_id is not None:
                server_urls.append("{0}.onion:{1}".format(self.hs_service_id, client_tgen_connect_port))
                server_urls.append("{0}.onion:{1}".format(self.hs_v3_service_id, client_tgen_connect_port))
            if do_inet:
                connect_ip = client_tgen_connect_ip if client_tgen_connect_ip != '0.0.0.0' else util.get_ip_address()
                server_urls.append("{0}:{1}".format(connect_ip, client_tgen_connect_port))

            if do_onion or do_inet:
                assert len(server_urls) > 0

                tgen_client_writable = self.__start_tgen_client(server_urls, client_tgen_listen_port, client_tor_socks_port)

                self.__start_log_processors(general_writables, tgen_client_writable, torctl_client_writable)

                logging.info("Bootstrapping finished, entering heartbeat loop")
                time.sleep(1)
                if self.oneshot:
                    logging.info("Onionperf is running in Oneshot mode. It will download a 5M file and shut down gracefully...")
                while True:
                    # TODO add status update of some kind? maybe the number of files in the www directory?
                    # logging.info("Heartbeat: {0} downloads have completed successfully".format(self.__get_download_count(tgen_client_writable.filename)))
                    if self.oneshot:
                        downloads = 0
                        while True:
                            downloads = self.__get_download_count(tgen_client_writable.filename)
                            if downloads >= 1:
                               logging.info("Onionperf has downloaded a 5M file in oneshot mode, and will now shut down.")
                               break
                        else:
                            continue
                        break

                    if self.__is_alive():
                        logging.info("All helper processes seem to be alive :)")
                    else:
                        logging.warning("Some parallel components failed too many times or have died :(")
                        logging.info("We are in a broken state, giving up and exiting now")
                        break

                    logging.info("Next main process heartbeat is in 1 hour (helper processes run on their own schedule)")
                    logging.info("press CTRL-C for graceful shutdown...")
                    time.sleep(3600)
            else:
                logging.info("No measurement mode set, nothing to do")

        except KeyboardInterrupt:
            logging.info("Interrupt received, please wait for graceful shutdown")
            self.__is_alive()
        finally:
            logging.info("Cleaning up child processes now...")

            if self.hs_service_id is not None:
                try:
                    with Controller.from_port(port=self.hs_control_port) as torctl:
                        torctl.authenticate()
                        torctl.remove_ephemeral_hidden_service(self.hs_service_id)
                except: pass  # this fails to authenticate if tor proc is dead

            if self.hs_v3_service_id is not None:
                try:
                    with Controller.from_port(port=self.hs_v3_control_port) as torctl:
                        torctl.authenticate()
                        torctl.remove_ephemeral_hidden_service(self.hs_v3_service_id)
                except: pass  # this fails to authenticate if tor proc is dead


#            logging.disable(logging.INFO)
            self.done_event.set()
            for t in self.threads:
                logging.info("Joining {0} thread...".format(t.getName()))
                t.join()
            time.sleep(1)
#            logging.disable(logging.NOTSET)

            logging.info("Child processes terminated")
            logging.info("Child process cleanup complete!")
            logging.info("Exiting")
    def __init__(self, options=None, conf=None, node_manager_obj=None):

        # ---------------- Default values ----------------
        # Parameters
        self.BIG_TIME_DIFF = 1000000
        self.RRD_HTTP_SERVER_PORT = 8181
        self.NODE_CREATION_TIMEOUT = 30
        self.MAX_BYTES_LOGFILE = 5000000
        self.MAX_CMT_CONF_WAIT = 600
        self.CMT_CONF_WAIT = 10

        # Runtime collections
        self.node_list = []
        self.active_node_list = []
        self.dead_node_set = set()
        self.new_dead_node_set = set()
        self.master_list = []

        # Runtime classes
        #self.udp_listener = None
        self.ntf_reader = None
        self.nodelist_reader = None
        self.ntf_manager = None
        self.node_manager = None
        self.conf = None
        self.this_node = None
        #self.timer_dead_node = None
        #self.timer_delayed_dead_node = None
        #self.timer_heartbeat = None
        self.logger = None
        self.resource_lock = None

        # State variables
        #role = "SLAVE"
        self.mode = "RUN"
        self.my_master = None

        # Configuration variables
        self.heartbeat_period = 1
        self.rrd_scan_period = 1
        self.dead_node_timeout = 1
        self.heartbeats_received = 0
        self.min_time_diff = self.BIG_TIME_DIFF
        self.log_level = ""
        self.log_file = ""
        self.node_list_file = ""
        self.active_node_list_file = ""
        # ---------------- End of default values ----------------

        # Extract properties from configuration file or command line
        self.this_node = node.Node()
        self.conf = conf

        if self.conf:
            self.node_list_file = os.path.join(conf.hm_root, 'nodechecker', "etc", "nodelist.conf")
            self.active_node_list_file = os.path.join(conf.hm_root, 'nodechecker', "etc", "active_nodelist.conf")
            self.nodelist_reader = filereader.FileReader(self.node_list_file)
            self.node_manager = node_manager_obj
            self.heartbeat_period = int(conf.node_heartbeat_period)
            self.rrd_scan_period = int(conf.node_rrd_scan_period)
            self.dead_node_timeout = int(conf.node_dead_node_timeout)
            self.node_master_timeout = int(conf.node_master_timeout)

            if options and options.log_level:
                self.log_level = options.log_level
            else:
                self.log_level = conf.node_log_level

            if options and options.log_file:
                self.log_file = options.log_file
            else:
                self.log_file = conf.node_log_file

            if options and options.port:
                self.this_node.port = int(options.port)
            else:
                self.this_node.port = int(conf.node_udp_port)

            if options and options.ip_address:
                self.this_node.ip_address = options.ip_address
            elif conf.node_ip_address == "auto":
                self.this_node.ip_address = util.get_ip_address()
            else:
                self.this_node.ip_address = conf.node_ip_address

            if options and options.ip_address_public:
                self.this_node.ip_address_public = options.ip_address_public

            # can't know which ip address to use automatically. this must be configured in config file
            #elif conf.node_ip_address_public == "auto":
            #    node.ip_address_public = os.environ["OI_PUBLIC_IP"]
            else:
                self.this_node.ip_address_public = conf.node_ip_address_public

            if options and options.instance_id:
                self.this_node.instance_id = options.instance_id
            #elif conf.node_instance_id == "auto":
            #    node.instance_id = os.environ["OI_INSTANCE_ID"]
            else:
                self.this_node.instance_id = conf.node_instance_id

            if options and options.cluster_id:
                self.this_node.cluster_id = options.cluster_id
            #elif conf.node_cluster_id == "auto":
            #    node.cluster_id = os.environ["OI_CLUSTER_ID"]
            else:
                self.this_node.cluster_id = conf.node_cluster_id

            if options and options.machine_id:
                self.this_node.machine_id = options.machine_id
            #elif conf.node_machine_id == "auto":
            #    node.machine_id = os.environ["OI_MACHINE_ID"]
            else:
                self.this_node.machine_id = conf.node_machine_id

            if options and options.cloud_zone:
                self.this_node.cloud_zone = options.cloud_zone
            #elif conf.node_cloud_zone == "auto":
            #    node.cloud_zone = os.environ["OI_CLOUD_ZONE"]
            else:
                self.this_node.cloud_zone = conf.node_cloud_zone

            if options and options.mode:
                self.mode = options.mode
            else:
                self.mode = conf.node_mode

            # Construct the rest
            if self.nodelist_reader:
                self.this_node.group_name = self.nodelist_reader.read_attribute(self.this_node.ip_address, 'GROUP_NAME')
                self.this_node.machine_type = self.nodelist_reader.read_attribute(self.this_node.ip_address, 'MACHINE_TYPE')
                self.this_node.hostname = self.nodelist_reader.read_attribute(self.this_node.ip_address, 'HOST_NAME')

            self.resource_lock = threading.RLock()

            self.ntf_reader = notification.parser.NotificationParser(self.this_node, conf)
            self.ntf_manager = notification.manager.NotificationManager(self.this_node, conf)

            self.construct_logger()
Example #24
0
 def bridge_ip(self):
     return util.get_ip_address('docker0')
Example #25
0
 def get_ip(self):
     if self.ip == '':
         return get_ip_address()
     else:
         return self.ip
Example #26
0
 def machine_ip(self):
     ip = util.get_ip_address('em1')
     if ip is None or ip == '':
         ip = util.get_ip_address('p1p1')
     return ip