Example #1
0
    def run(self) -> None:
        logger.debug('Running Daemon: {}'.format(self._isAlive))
        set_proctitle('UDSActorDaemon')

        # Linux daemon will continue running unless something is requested to
        # Unmanaged services does not initializes "on start", but rather when user logs in (because userservice does not exists "as such" before that)
        if self.isManaged():
            if not self.initialize():
                self.finish()
                return # Stop daemon if initializes told to do so

            # logger.debug('Initialized, setting ready')
            # Initialization is done, set machine to ready for UDS, communicate urls, etc...
            self.setReady()

        # Start listening for petitions
        self.startHttpServer()

        # *********************
        # * Main Service loop *
        # *********************
        # Counter used to check ip changes only once every 10 seconds, for
        # example
        counter = 0
        while self._isAlive:
            counter += 1
            try:
                if counter % 5 == 0:
                    self.loop()
            except Exception as e:
                logger.error('Got exception on main loop: %s', e)
            # In milliseconds, will break
            self.doWait(1000)

        self.finish()
Example #2
0
    def run(self) -> None:
        logger.debug('Running Daemon: {}'.format(self._isAlive))
        set_proctitle('UDSActorDaemon')

        # Linux daemon will continue running unless something is requested to
        if not self.initialize():
            self.finish()
            return # Stop daemon if initializes told to do so

        # logger.debug('Initialized, setting ready')
        # Initialization is done, set machine to ready for UDS, communicate urls, etc...
        self.setReady()

        # *********************
        # * Main Service loop *
        # *********************
        # Counter used to check ip changes only once every 10 seconds, for
        # example
        counter = 0
        while self._isAlive:
            counter += 1
            try:
                if counter % 5 == 0:
                    self.loop()
            except Exception as e:
                logger.error('Got exception on main loop: %s', e)
            # In milliseconds, will break
            self.doWait(1000)

        self.finish()
Example #3
0
    def run(self):
        logger.debug('** Running Daemon **')
        set_proctitle('OGAgent')

        self.initialize()

        # Call modules initialization
        # They are called in sequence, no threading is done at this point, so ensure modules onActivate always returns
        

        # *********************
        # * Main Service loop *
        # *********************
        # Counter used to check ip changes only once every 10 seconds, for
        # example
        try:
            while self.isAlive:
                # In milliseconds, will break
                self.doWait(1000)
        except (KeyboardInterrupt, SystemExit) as e:
            logger.error('Requested exit of main loop')
        except Exception as e:
            logger.exception()
            logger.error('Caught exception on main loop: {}'.format(e))

        self.terminate()

        self.notifyStop()
Example #4
0
async def valami(nev):
    prctl.set_proctitle(nev)
    print(f"{nev} start")
    await asyncio.sleep(10)
    for i in range(2**32):
        prctl.set_proctitle(f"{nev}{i}")
    print(f"{nev} stop")
Example #5
0
	def worker(self):
		lock = lockfile.FileLock(self.pidfile_path())
		lock.acquire(0)
		try:
			self.pidfile_create()

			try:
				import prctl
				prctl.set_proctitle('CONTROL/defer/%s' % config.main.product_name)
			except ImportError:
				pass

			env = os.environ
			env['PYTHONPATH'] = config.main.basedir
			params = ['celery', 'worker', '--pool=gevent', '--loader=util.defer.Loader',
				'--logfile=logs/defer.log', '--loglevel=INFO',
			]
			stdin = open('/dev/null', 'r')
			stdout = open(os.path.join(config.main.basedir, config.main.log_dir, 'service_defer.out'), 'a+')
			stderr = open(os.path.join(config.main.basedir, config.main.log_dir, 'service_defer.err'), 'a+')
			self.defer_process = subprocess.Popen(params, stdin=stdin, stdout=stdout, stderr=stderr, cwd=config.main.basedir, env=env)
			stdin.close()
			stdout.close()
			stderr.close()
			self.defer_process.wait()
			del self.defer_process
		finally:
			self.pidfile_destroy()
			lock.release()
Example #6
0
def set_proc_name(new_name): # {{{
	try:
		import prctl
		prctl.set_proctitle(new_name)
		prctl.set_name(new_name)
	except:
		logging.warning("Unable to set proc name %s:"%new_name, exc_info=True)
Example #7
0
	def worker(self):
		lock = lockfile.FileLock(self.pidfile_path())
		lock.acquire(0)
		try:
			self.pidfile_create()

			try:
				import prctl
				prctl.set_proctitle('CONTROL/db_mongo/%s' % config.main.product_name)
			except ImportError:
				pass

			env = os.environ
			params = ['mongod', '--nohttpinterface', '--auth', '--logpath', os.path.join(config.main.basedir, config.main.log_dir, 'service_db_mongo.log'), '--logappend', '--dbpath', config.main.db_mongo['dir'], '--port', str(config.main.db_mongo['port'])]
			if (not config.stage or config.stage == ('live', )) and config.main.db_mongo['journal']:
				params.append('--journal')
			stdin = open('/dev/null', 'r')
			stdout = open('/dev/null', 'a+')
			stderr = open('/dev/null', 'a+')
			self.db_mongo_process = subprocess.Popen(params, stdin=stdin, stdout=stdout, stderr=stderr, cwd=config.main.basedir, env=env)
			stdin.close()
			stdout.close()
			stderr.close()
			self.db_mongo_process.wait()
			del self.db_mongo_process
		finally:
			self.pidfile_destroy()
			lock.release()
Example #8
0
def set_process_cmdline(proc_title):
    """Sets custom title to current process
        Requires installed python-prctl module - http://pythonhosted.org/python-prctl/
    """
    try:
        import prctl
        prctl.set_proctitle(proc_title)
        return True
    except (ImportError, AttributeError):
        return False
Example #9
0
def set_process_title(proc_title):
    """Sets custom title to current process
        Requires installed python-prctl module - http://pythonhosted.org/python-prctl/
    """
    try:
        import prctl
        prctl.set_name(proc_title)
        prctl.set_proctitle(proc_title)
        return True
    except (ImportError, AttributeError):
        return False
Example #10
0
 def test_proctitle(self):
     """Test setting the process title, including too long titles"""
     title = "This is a test!"
     prctl.set_proctitle(title)
     ps_output = subprocess.Popen(['ps', '-f', '-p', '%d' % os.getpid()],
                                  stdout=subprocess.PIPE).communicate()[0].decode('ascii')
     self.assertTrue(ps_output.strip().endswith(title))
     # This should not segfault but truncate
     title2 = "And this is a test too! Don't segfault."
     prctl.set_proctitle(title2)
     ps_output = subprocess.Popen(['ps', '-f', '-p', '%d' % os.getpid()],
                                  stdout=subprocess.PIPE).communicate()[0].decode('ascii')
     self.assertTrue(ps_output.strip().endswith(title2[:len(title)]))
Example #11
0
 def test_proctitle(self):
     """Test setting the process title, including too long titles"""
     title = "This is a test!"
     prctl.set_proctitle(title)
     ps_output = subprocess.Popen(['ps', '-f', '-p', '%d' % os.getpid()],
                                  stdout=subprocess.PIPE).communicate()[0].decode('ascii')
     self.assertTrue(ps_output.strip().endswith(title))
     # This should not segfault but truncate
     title2 = "And this is a test too! Don't segfault."
     prctl.set_proctitle(title2)
     ps_output = subprocess.Popen(['ps', '-f', '-p', '%d' % os.getpid()],
                                  stdout=subprocess.PIPE).communicate()[0].decode('ascii')
     self.assertTrue(ps_output.strip().endswith(title2[:len(title)]))
Example #12
0
def main():
    # noinspection PyUnresolvedReferences
    prctl.set_name('networkd_main')
    prctl.set_proctitle('networkd')
    options.parse_command_line()
    dm = DeviceManager()
    application = Application([
        (r"/devices", DeviceInfoHandler, dict(ethmanager=dm)),
        (r"/devices/([0-9]+)", DeviceInfoHandler, dict(ethmanager=dm)),

        (r"/devices/actions/(.+)", DevicesActionHandler, dict(ethmanager=dm)),
        (r"/devices/([0-9]+)/actions/(.+)", DeviceActionHandler, dict(ethmanager=dm)),
    ])
    application.listen(8888)
    IOLoop.instance().start()
Example #13
0
 def test_proctitle(self):
     """Test setting the process title, including too long titles"""
     with open('/proc/self/cmdline') as fd:
        orig = len(fd.read())
     title = "This is a test!"
     prctl.set_proctitle(title)
     with open('/proc/self/cmdline') as fd:
        cmdline = fd.read().rstrip('\n\0')
     self.assertEqual(cmdline, title)
     # This should not segfault
     title2 = "And this is a test too! Don't segfault." * 3
     prctl.set_proctitle(title2)
     with open('/proc/self/cmdline') as fd:
        cmdline = fd.read().rstrip('\n\0')
     self.assertEqual(cmdline, title2[:orig-1])
Example #14
0
def main():
    #if __name__ == "__main__":

    try:
        import prctl
        if len(sys.argv) == 1:
            prctl.set_proctitle("mantid")
        else:
            prctl.set_proctitle("mantid " + " ".join(sys.argv[1:]))
    except:
        pass

    global app
    Gtk.init()
    app = App()
    app.setup()
    Gtk.main()
Example #15
0
def start_process(cycle, name_list, ip_list):
    """
    启动子进程,执行主进程循环,清理过期日志文件。
    :param cycle: 整型,保存多少天的log。
    :param name_list: 字符串列表,主机名称列表。
    :param ip_list: 字符串列表,IP地址列表。
    :return:
    """
    process_list = []

    logging.info("Check logs directory structure.")
    logging.debug("Turn to check_logs_path().")
    if check_logs_path(name_list):
        logging.info("Check logs directory structure OK.")
        logging.info("Begin to start subprocess.")
        ppid = str(os.getpid())
        for (i, n) in enumerate(name_list):
            process = multiprocessing.Process(name=n,
                                              target=ping_write_log,
                                              args=(n, ip_list[i], ppid))
            process.daemon = True
            process.start()
            logging.info("Start subprocess, PPID:%s, PID:%s, HOST:%s, IP:%s." %
                         (ppid, str(process.pid), n, ip_list[i]))
            process_list.append(process)

        logging.debug("Turn to create_pid_file().")
        create_pid_file(name_list, ppid, process_list)

        first_day = datetime.datetime.now().date()
        prctl.set_proctitle(prctl.get_name() + " " + sys.argv[0] + " -d")
        logging.info("Main process run loop task.")
        while True:
            day_temp = datetime.datetime.now().date()
            if day_temp > first_day:
                first_day = day_temp
                logging.info("Clean expired logs.")
                clean_log(cycle, name_list)
                logging.info("Clean expired logs OK.")
            else:
                time.sleep(3600)
    else:
        logging.error("Check logs directory structure error. Program exit.")
Example #16
0
def setprocname(name):
    """Set the process name if possible.

    Requires setproctitle (python-setproctitle)
    from https://github.com/dvarrazzo/py-setproctitle
    (preferred) or python-prctl (debian package)
    from https://github.com/seveas/python-prctl .
    """
    try:
        import setproctitle
        setproctitle.setproctitle(name)
    except ImportError:
        try:
            import prctl
            # for ps and top, up to 16 bytes long
            prctl.set_name(name)
            # for ps aux and top -c
            # will silently truncate to **argv (see docs)
            prctl.set_proctitle(name)
        except ImportError:
            return
Example #17
0
    def __init__(self, ID, connections_filename):
        super(AbstractKnot, self).__init__()
        prctl.set_proctitle(self._name + '-' + str(ID))
        self._ID = str(ID)
        self.__connections_filename = connections_filename
        self._ips_and_ports = None
        self.__ip = None
        self.__port = None
        self._listeningSocket = None
        self._neighbours = {}
        self._system_random = SystemRandom()

        #init logging
        self.logger = Logger(__name__ + '-' + str(ID))
        formatter = logging.Formatter('%(name)s %(levelname)s %(asctime)s: %(message)s')
        filehandler = FileHandler('./Logging/' + self._name + '-' + str(ID) + '.log', 'w')
        filehandler.setFormatter(formatter)
        filehandler.setLevel(logging.NOTSET)
        for hdlr in self.logger.handlers:  # remove all old handlers
            self.logger.removeHandler(hdlr)
        self.logger.addHandler(filehandler)
Example #18
0
def setprocname(name):
    """Set the process name if possible.

    Requires setproctitle (python-setproctitle)
    from https://github.com/dvarrazzo/py-setproctitle
    (preferred) or python-prctl (debian package)
    from https://github.com/seveas/python-prctl .
    """
    try:
        import setproctitle
        setproctitle.setproctitle(name)
    except ImportError:
        try:
            import prctl
            # for ps and top, up to 16 bytes long
            prctl.set_name(name)
            # for ps aux and top -c
            # will silently truncate to **argv (see docs)
            prctl.set_proctitle(name)
        except ImportError:
            return
Example #19
0
	def worker(self):
		lock = lockfile.FileLock(self.pidfile_path())
		lock.acquire(0)
		try:
			self.pidfile_create()

			try:
				import prctl
				prctl.set_proctitle('CONTROL/web/%s' % config.main.product_name)
			except ImportError:
				pass

			import multiprocessing
			try:
				proc_count = multiprocessing.cpu_count()
			except NotImplementedError:
				proc_count = 1

			env = os.environ
			env['PYTHONPATH'] = config.main.basedir
			params = ['gunicorn', '--workers=%d' % (proc_count + 1), '--max-requests=1000',
				'--bind=:%d' % config.main.port_web,
				'--worker-class=gevent', 'util.web.publisher:Request',
			]
			stdin = open('/dev/null', 'r')
			stdout = open(os.path.join(config.main.basedir, config.main.log_dir, 'service_web.out'), 'a+')
			stderr = open(os.path.join(config.main.basedir, config.main.log_dir, 'service_web.err'), 'a+')
			self.web_process = subprocess.Popen(params,
				stdin=stdin, stdout=stdout, stderr=stderr,
				cwd=config.main.basedir,
				env=env,
			)
			stdin.close()
			stdout.close()
			stderr.close()
			self.web_process.wait()
			del self.web_process
		finally:
			self.pidfile_destroy()
			lock.release()
Example #20
0
    def run(self):
        initCfg()

        logger.debug('Running Daemon')
        set_proctitle('UDSActorDaemon')

        # Linux daemon will continue running unless something is requested to
        if self.interactWithBroker() is False:
            logger.debug('Interact with broker returned false, stopping service after a while')
            return

        if self.isAlive is False:
            logger.debug('The service is not alive after broker interaction, stopping it')
            return

        if self.rebootRequested is True:
            logger.debug('Reboot has been requested, stopping service')
            return

        self.initIPC()

        # *********************
        # * Main Service loop *
        # *********************
        # Counter used to check ip changes only once every 10 seconds, for
        # example
        counter = 0
        while self.isAlive:
            counter += 1
            if counter % 10 == 0:
                self.checkIpsChanged()
            # In milliseconds, will break
            self.doWait(1000)

        self.endIPC()
        self.endAPI()

        self.notifyStop()
Example #21
0
    sys.excepthook = MailException

    reactor.run()

def daemon_reload(self):
    rpc_server.load_lists()

def daemon_shutdown(self):
    global quitting
    global subprocesses

    quitting = True

    for pid in subprocesses:
        os.kill(pid, signal.SIGINT)
    subprocesses = []

prctl.set_proctitle("tor2web")

t2w_daemon = T2WDaemon()

t2w_daemon.daemon_init = daemon_init
t2w_daemon.daemon_main = daemon_main
t2w_daemon.daemon_reload = daemon_reload
t2w_daemon.daemon_shutdown = daemon_shutdown

t2w_daemon.run(config.datadir)

exit(0)
Example #22
0
    def run(self):
        cfg = initCfg()  # Gets a local copy of config to get "reboot"

        logger.debug('CFG: {}'.format(cfg))

        if cfg is not None:
            self.rebootMachineAfterOp = cfg.get('reboot', True)
        else:
            self.rebootMachineAfterOp = False

        logger.info('Reboot after is {}'.format(self.rebootMachineAfterOp))

        logger.debug('Running Daemon')
        set_proctitle('UDSActorDaemon')

        # Linux daemon will continue running unless something is requested to
        while True:
            brokerConnected = self.interactWithBroker()
            if brokerConnected is False:
                logger.debug('Interact with broker returned false, stopping service after a while')
                return
            elif brokerConnected is True:
                break

            # If brokerConnected returns None, repeat the cycle
            self.doWait(16000)  # Wait for a looong while

        if self.isAlive is False:
            logger.debug('The service is not alive after broker interaction, stopping it')
            return

        if self.rebootRequested is True:
            logger.debug('Reboot has been requested, stopping service')
            return

        # Execute script in /etc/udsactor/post after interacting with broker, if no reboot is requested ofc
        # This will be executed only when machine gets "ready"
        try:

            if os.path.isfile(POST_CMD):
                if (os.stat(POST_CMD).st_mode & stat.S_IXUSR) != 0:
                    subprocess.call([POST_CMD, ])
                else:
                    logger.info('POST file exists but it it is not executable (needs execution permission by root)')
            else:
                logger.info('POST file not found & not executed')
        except Exception as e:
            # Ignore output of execution command
            logger.error('Executing post command give')

        self.initIPC()

        # *********************
        # * Main Service loop *
        # *********************
        # Counter used to check ip changes only once every 10 seconds, for
        # example
        counter = 0
        while self.isAlive:
            counter += 1
            if counter % 10 == 0:
                self.checkIpsChanged()
            # In milliseconds, will break
            self.doWait(1000)

        self.endIPC()
        self.endAPI()

        self.notifyStop()
Example #23
0
def set_processname(name):
    '''Set Process Name :: Linux'''
    prctl.set_name(name)
    prctl.set_proctitle(name)
Example #24
0
	def worker(self):
		lock = lockfile.FileLock(self.pidfile_path())
		lock.acquire(0)
		try:
			self.pidfile_create()

			try:
				import prctl
				prctl.set_proctitle('CONTROL/db_cache/%s' % config.main.product_name)
			except ImportError:
				pass

			env = os.environ
			params = ['redis-server', '-']
			stdin = open('/dev/null', 'r')
			stdout = open(os.path.join(config.main.basedir, config.main.log_dir, 'service_cache.out'), 'a+')
			stderr = open(os.path.join(config.main.basedir, config.main.log_dir, 'service_cache.err'), 'a+')
			self.db_cache_process = subprocess.Popen(params, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, cwd=config.main.basedir, env=env)
			self.db_cache_process.stdin.write('\
port %(port)d\n\
timeout 300\n\
\
loglevel notice\n\
logfile stdout\n\
\
databases 16\n\
\
save 900 1\n\
save 300 10\n\
save 60 10000\n\
\
rdbcompression yes\n\
\
dir %(dir)s\n\
dbfilename dump.rdb\n\
\
slave-serve-stale-data yes\n\
\
appendonly no\n\
appendfsync everysec\n\
no-appendfsync-on-rewrite no\n\
vm-enabled no\n\
\
vm-swap-file redis.swap\n\
\
vm-max-memory 0\n\
vm-page-size 32\n\
vm-pages 134217728\n\
vm-max-threads 4\n\
hash-max-zipmap-entries 512\n\
hash-max-zipmap-value 64\n\
list-max-ziplist-entries 512\n\
list-max-ziplist-value 64\n\
set-max-intset-entries 512\n\
activerehashing yes\n\
' % {'port': config.main.db_cache['port'], 'dir': os.path.join(config.main.basedir, config.main.db_cache['dir'])})
			self.db_cache_process.stdin.close()
			stdout.close()
			stderr.close()
			self.db_cache_process.wait()
			del self.db_cache_process
		finally:
			self.pidfile_destroy()
			lock.release()
   log("Opening DB")
   initdb()

   server = dbus.Interface(
               bus.get_object( avahi.DBUS_NAME, avahi.DBUS_PATH_SERVER ),
               avahi.DBUS_INTERFACE_SERVER )
   log("Advertisement setup done")
   print serviceType

   server.connect_to_signal( "StateChanged", server_state_changed )
   server_state_changed( server.GetState() )

   sbrowser = dbus.Interface( bus.get_object(avahi.DBUS_NAME,
               server.ServiceBrowserNew( avahi.IF_UNSPEC,
                    avahi.PROTO_UNSPEC, serviceType, 'local', dbus.UInt32(0))),
               avahi.DBUS_INTERFACE_SERVICE_BROWSER)

   sbrowser.connect_to_signal("ItemNew", new_service)
   sbrowser.connect_to_signal("ItemRemove", remove_service)


   log("Discovery setup done")
   prctl.set_proctitle(prosessinimi)

   #Kun saadaan sigTERM, lopetetaan
   signal.signal(signal.SIGTERM, stop)

   gobject.MainLoop().run()
   group.Free()

Example #26
0
            log.startLogging(sys.stdout)
    else:
        log.startLogging(log.NullFile)

    sys.excepthook = MailException

    reactor.run()


def daemon_reload(self):
    rpc_server.load_lists()


def daemon_shutdown(self):
    global quitting
    quitting = True


prctl.set_proctitle("tor2web")

t2w_daemon = T2WDaemon()

t2w_daemon.daemon_init = daemon_init
t2w_daemon.daemon_main = daemon_main
t2w_daemon.daemon_reload = daemon_reload
t2w_daemon.daemon_shutdown = daemon_shutdown

t2w_daemon.run(config.datadir)

exit(0)
Example #27
0
    # we do not want all workers to die in the same moment
    requests_countdown = config['requests_per_process'] / random.randint(3, 5)

    sys.excepthook = MailException

def SigQUIT(SIG, FRM):
    reactor.stop()

args = sys.argv[1:]
if len(sys.argv[1:]) != 2:
    exit(1)

ports = []
requests_countdown = 10000 / random.randint(3, 5)

rpc_factory = pb.PBClientFactory()
reactor.connectTCP("127.0.0.1",  8789, rpc_factory)

signal.signal(signal.SIGUSR1, SigQUIT)
signal.signal(signal.SIGTERM, SigQUIT)
signal.signal(signal.SIGINT, SigQUIT)

prctl.set_pdeathsig(signal.SIGINT)
prctl.set_proctitle("tor2web-worker")

start()

reactor.run()

exit(0)
Example #28
0
def start(args):
    prctl.set_proctitle('mg-main')
    prctl.set_name('mg-main')

    if args.config:
        settings.load(args.config)

    if args.debug:
        settings.log_level = 'debug'

    configure_logging(settings)

    settings.create_dirs()

    from mg.fluidsynth.api import FluidSynth
    from mg.state import State
    from mg.controller import SynthController, SystemController, MIDIController

    state = State(settings)

    menu, input_manager, event_handler = start_ui(state, settings, args.debug)

    fluid = FluidSynth(settings.sound_dir)

    synth_ctrl = SynthController(fluid, state)
    synth_ctrl.start_listening()

    system_ctrl = SystemController(state, settings)
    system_ctrl.start_listening()
    system_ctrl.set_string_led(0, False)
    system_ctrl.set_string_led(1, False)
    system_ctrl.set_string_led(2, False)
    system_ctrl.update_udc_configuration()

    midi_ctrl = MIDIController(input_manager)
    midi_ctrl.start_listening()

    menu.message('Starting synthesizer')
    start_fluidsynth(
        fluid,
        dump_midi=args.dump_midi,
        debug=args.debug_fs)

    menu.message('Starting core')
    from mg.mglib import mgcore
    mgcore.start()
    mgcore.add_fluid_output(fluid.synth)
    mgcore.enable_fluid_output()

    menu.message('Opening database')
    from mg import db
    import logging
    log = logging.getLogger()
    db_path = os.path.join(settings.data_dir, 'mg.db')
    try:
        db.initialize(db_path)
    except Exception:
        log.exception('Unable to initialize database!')
    db.migrate(db_path)

    # restore key calibration
    from mg.input import calibration
    key_calib = calibration.load_keys()
    calibration.commit_keys(key_calib)

    # restore mapping ranges
    for name in mgcore.get_mapping_configs().keys():
        ranges = db.load_mapping_ranges(name)
        if ranges:
            mgcore.set_mapping_ranges(name, ranges)

    # set default global settings
    state.main_volume = 120
    state.reverb_volume = 25
    state.preset.keynoise[0].volume = 25
    state.coarse_tune = 0
    state.fine_tune = 0
    state.ui.brightness = 80
    state.synth.gain = 50
    state.pitchbend_range = 100

    # restore misc config
    misc = db.load_misc_config()
    if misc:
        state.from_misc_dict(misc)

    try:
        preset = db.Preset.get()  # noqa
        menu.message(f'Loading preset {preset.number}...')
        with state.lock():
            state.load_preset(preset.id)
    except db.Preset.DoesNotExist:
        pass

    menu.message('Starting server')
    start_server(state, menu)

    menu.goto('home')
    input_manager.start()

    event_handler.mainloop()
def main():
    global daemon
    global verbose

    # Parse command line arguments
    try:
        opts, cmds = getopt.getopt(sys.argv[1:], 'dhv',
                                   ['daemon', 'help', 'verbose'])
    except getopt.GetoptError as e:
        print(f'{SCRIPT_NAME}:', e, file=sys.stderr, flush=True)
        print(f"Try '{SCRIPT_NAME} --help' for more information",
              file=sys.stderr,
              flush=True)
        sys.exit(1)

    # Process options (e.g. -h, --verbose)
    for o, v in opts:
        if o in ('-d', '--daemon'):
            daemon = True
        elif o in ('-h', '--help'):
            usage()
        elif o in ('-v', '--verbose'):
            verbose = True
        else:
            # This shouldn't ever happen unless we forget to handle an
            # option we've added
            print(f'{SCRIPT_NAME}: internal error: unhandled option {o}',
                  file=sys.stderr,
                  flush=True)
            sys.exit(1)

    # Process commands
    # This script does not use any commands so we will exit if one is
    # incorrectly provided
    if len(cmds) > 0:
        print(f"{SCRIPT_NAME}: command '{c}' not recognized",
              file=sys.stderr,
              flush=True)
        print(f"Try '{SCRIPT_NAME} --help' for more information",
              file=sys.stderr,
              flush=True)
        sys.exit(1)

    # Set process name and title
    # This allows commands like `killall SCRIPT_NAME` to function
    # correctly
    try:
        import prctl
        if verbose:
            print(f'setting process name to \'{SCRIPT_NAME}\'', flush=True)
        prctl.set_name(SCRIPT_NAME)
        prctl.set_proctitle(' '.join(sys.argv))
    except ImportError:
        if verbose:
            print(f'failed to load module \'prctl\'', flush=True)
            print(f'process name not set', flush=True)

    if daemon:
        # Listen for changes on the BlueZ dbus interface
        # This is a catch all listener (no path specified) because we
        # want to get notified for all adapters without keeping a list
        # of them and managing signal handlers independantly
        bus.add_signal_receiver(
            properties_changed_handler,
            signal_name='PropertiesChanged',
            dbus_interface='org.freedesktop.DBus.Properties',
            bus_name='org.bluez',
            path=None,
            path_keyword='path')

        # Attempt to connect to devices on all existing adapters
        connect_devices_for_all_adapters()

        # Start the mainloop
        loop.run()
    else:
        # Attempt to connect to devices on all existing adapters
        connect_devices_for_all_adapters()

        # If we're waiting for connection attemps to finish, start the
        # mainloop. We will automatically exit the loop once everything
        # is finished
        if len(pending_connections) > 0:
            loop.run()
Example #30
0
def main() -> None:
    prctl.set_name('odrd')
    prctl.set_proctitle(' '.join(sys.argv))

    parser = OptionParser()
    parser.add_option(
        "-c",
        "--config",
        dest="config_file",
        help="Configuration file",
        default=CONFIG_FILE,
    )
    parser.add_option(
        "--debug",
        dest="debug",
        action="store_true",
        help="Activate debug logging",
        default=False,
    )
    parser.add_option(
        "--keep-user",
        dest="keep_user",
        action="store_true",
        help="Do not switch to a different UID / GID; ignore capabilities",
        default=False,
    )
    (options, args) = parser.parse_args()
    if len(args) != 0:
        parser.error("incorrect number of arguments")

    cfg = ConfigParser()
    cfg.read(options.config_file)

    loglevel = logging.INFO
    if options.debug:
        loglevel = logging.DEBUG
    setup_logging(loglevel, cfg.getboolean('daemon', 'syslog', fallback=False))

    prom_port = cfg.getint("daemon", "prometheus_port", fallback=0)
    if prom_port:
        logging.debug("starting prometheus exporter on port %s", prom_port)
        start_http_server(prom_port)

    if not options.keep_user:
        # Capability net_raw is needed for binding to network devices.
        # Capability net_bind_service is needed for binding to the DHCP port.
        drop_caps(
            user=cfg.get('daemon', 'user', fallback=None),
            group=cfg.get('daemon', 'group', fallback=None),
            caps=['net_raw', 'net_bind_service'],
        )

    default_dhcp_device = cfg.get('daemon',
                                  'default_dhcp_listening_device',
                                  fallback=None)

    realms_data = read_realms(cfg, default_dhcp_device)
    if realms_data is None:
        sys.exit(1)

    sloop = SocketLoop()

    def exit_daemon(*args) -> None:
        """Signal handler performing a soft shutdown of the loop.
        """
        logging.info('exiting on signal')
        sloop.quit()

    signal.signal(signal.SIGTERM, exit_daemon)
    signal.signal(signal.SIGHUP, signal.SIG_IGN)

    timeout_mgr = TimeoutManager()
    sloop.add_idle_handler(timeout_mgr.check_timeouts)

    requestor_mgr = odr.dhcprequestor.DhcpAddressRequestorManager()

    servers = read_servers(cfg, sloop)

    for server in servers.values():
        ovpn.OvpnServerSupervisor(
            timeout_mgr=weakref.proxy(timeout_mgr),
            server=weakref.proxy(server),
            timeout=30,
        )

    def start_dhcp_address_request(device, local_ip, **kwargs) -> None:
        requestor = requestor_mgr.get_requestor(device, local_ip)
        if requestor is None:
            return
        request = odr.dhcprequestor.DhcpAddressInitialRequest(
            timeout_mgr=weakref.proxy(timeout_mgr),
            requestor=weakref.proxy(requestor),
            local_ip=local_ip,
            **kwargs)
        requestor.add_request(request)

    def start_dhcp_refresh_request(device, local_ip, **kwargs) -> None:
        requestor = requestor_mgr.get_requestor(device, local_ip)
        if requestor is None:
            return
        request = odr.dhcprequestor.DhcpAddressRefreshRequest(
            timeout_mgr=weakref.proxy(timeout_mgr),
            requestor=weakref.proxy(requestor),
            local_ip=local_ip,
            **kwargs)
        requestor.add_request(request)

    parse_username = ParseUsername(
        default_realm=cfg.get('daemon', 'default_realm'))

    client_mgr = OvpnClientManager(
        timeout_mgr=timeout_mgr,
        servers=servers,
        refresh_lease_clb=start_dhcp_refresh_request,
        realms_data=realms_data,
        parse_username_clb=parse_username.parse_username,
    )

    def create_vpn_cmd_conn(sloop, sock) -> OvpnCmdConn:
        return OvpnCmdConn(
            sloop,
            sock,
            realms_data=realms_data,
            servers=servers,
            secret=cfg.get('daemon', 'secret', fallback=None),
            create_client_clb=client_mgr.create_client,
            remove_client_clb=client_mgr.client_disconnected,
            add_request_clb=start_dhcp_address_request,
            parse_username_clb=parse_username.parse_username,
        )

    cmd_socket_uids = [
        user_to_uid(user) for user in split_cfg_list(
            cfg.get('daemon', 'cmd_socket_uids', fallback=''))
    ]
    cmd_socket_gids = [
        group_to_gid(group) for group in split_cfg_list(
            cfg.get('daemon', 'cmd_socket_gids', fallback=''))
    ]

    def cmd_conn_auth_check(sock, pid, uid, gid) -> bool:
        if uid in cmd_socket_uids:
            return True
        if gid in cmd_socket_gids:
            return True
        return False

    cmd_socket_perms = int(
        cfg.get('daemon', 'cmd_socket_perms', fallback='0666'), 8)
    for unix_socket_fn in split_cfg_list(
            cfg.get('daemon', 'cmd_sockets', fallback='')):
        cmd_listener = CommandConnectionListener(
            sloop=weakref.proxy(sloop),
            socket_path=unix_socket_fn,
            cmd_conn_factory=create_vpn_cmd_conn,
            socket_perm_mode=cmd_socket_perms,
            auth_check=cmd_conn_auth_check,
        )
        sloop.add_socket_handler(cmd_listener)

    if not load_requestors(sloop, requestor_mgr, realms_data):
        sys.exit(1)

    if not options.keep_user:
        # Special capabilities no longer necessary.
        drop_caps()

    try:
        sloop.run()
    except Exception:
        logging.exception('Caught exception in main loop, exiting.')
        sys.exit(1)
Example #31
0
		assemble(job1, debug=True)
		quit()
	
	
	#update options
	for (assembler_name, assembler_module) in assembler_dict.items():
		if not query("SELECT option_value FROM assembly_options WHERE option_value = '{}';".format(assembler_name)):
			query("INSERT INTO assembly_options (option_type, option_value, option_description) VALUES ('assembler', '{}', '{} (Supports {})');".format(assembler_name, assembler_module.description.encode('string_escape'), ', '.join(assembler_module.supports)))
	for (trimmer, trim_func) in trimmer_dict.items():
		if not query("SELECT option_value FROM assembly_options WHERE option_value = '{}';".format(trimmer)):
			query("INSERT INTO assembly_options (option_type, option_value, option_description, option_supports) VALUES ('trimmer', '{}', '{}', 'All');".format(trimmer, str(trim_func.__doc__).encode('string_escape')))

	#run assemblies
	try:
		
		import prctl
		prctl.set_proctitle('Assem_dispatch')
		if '-nodaemon' in sys.argv:
			#raise ValueError
			main()
			quit()
		raise ValueError()
		import daemon
		print "Daemon module successfully imported; running in daemon mode."
		with daemon.DaemonContext(working_directory='/', detach_process=True, stdout=open('/data/Pipeline_jobs/assembly.log', 'a')):
			main(True)
	except (ImportError, ValueError) as e:
		print "Daemon module not found; running in normal mode."
		print e
		main(False)
	
Example #32
0
  def daemonize(self):
    """
    do the UNIX double-fork magic, see Stevens' "Advanced
    Programming in the UNIX Environment" for details (ISBN 0201563177)
    http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
    """
    if self.interactive:
      return

    try:
      pid = os.fork()
      if pid > 0:
        # exit first parent
        sys.exit(0)
    except OSError as e:
      sys.stderr.write("fork #1 failed: {0:d} ({1})\n".format(e.errno, e.strerror))
      raise DaemonError("Fork #1 failed")

    # decouple from parent environment
    os.chdir("/")
    os.setsid()
    os.umask(0)

    # do second fork
    try:
      pid = os.fork()
      if pid > 0:
        # exit from second parent
        sys.exit(0)
    except OSError as e:
      sys.stderr.write("fork #2 failed: {0:d} ({1})\n".format(e.errno, e.strerror))
      raise DaemonError("Fork #2 failed")

    # redirect standard file descriptors
    sys.stdout.flush()
    sys.stderr.flush()
    self.stderr_mode = 'w+'
    si = open(self.stdin, 'r')
    so = open(self.stdout, self.stdout_mode)
    if 'b' in self.stderr_mode.lower():
        se = open(self.stderr, self.stderr_mode, 0)
    else:
        se = open(self.stderr, self.stderr_mode)
    os.dup2(si.fileno(), sys.stdin.fileno())
    os.dup2(so.fileno(), sys.stdout.fileno())
    os.dup2(se.fileno(), sys.stderr.fileno())

    # set name
    if self.name is not None:
        try:
            if HAS_PRCTL:
                prctl.set_name(self.name)
                prctl.set_proctitle(self.name)
            else:
                from ctypes import cdll
                libc = cdll.LoadLibrary("libc.so.6") 
                libc.prctl(15, self.name, 0, 0, 0)
        except:
            pass
            
    # write lock
    atexit.register(self._release_lock)
    self.lock_file.acquire(blocking=self.lock_blocking, timeout=self.lock_timeout)
    self._write_lock_data()
Example #33
0
def ping_write_log(name, dst_ip, ppid):
    """
    子进程函数,ping目标IP,并写入log文件,log文件为  名称_日期.log,例如:test1_20160815.log。
    :param name:字符串,配置文件里IP地址列表的名称。
    :param dst_ip:字符串,需要ping的目标IP地址。
    :param ppid: 字符串,父进程的PID。
    :return:
    """
    first_day = datetime.datetime.now().date()
    prctl.set_proctitle("ping " + name)
    while True:
        if check_pid(ppid):
            day_temp = datetime.datetime.now().date()
            log_path = LOGS_PATH + os.sep + name + os.sep + name + "_" + datetime.datetime.now(
            ).strftime("%Y%m%d") + ".log"
            with open(log_path, mode="a") as log_file:
                before = datetime.datetime.now()
                while first_day == day_temp:
                    if check_pid(ppid):
                        after = datetime.datetime.now()
                        if (after.hour * 3600 + after.minute * 60 +
                                after.second) > (before.hour * 3600 +
                                                 before.minute * 60 +
                                                 before.second):
                            time_str = after.strftime("%Y-%m-%d %X")
                            delay = ping.do_one(dst_ip, timeout=0.7, psize=64)
                            if delay is None or delay > 4.0:
                                if os.path.exists(log_path):
                                    log_file.write(time_str + " " + dst_ip +
                                                   " timeout\n")
                                    log_file.flush()
                                else:
                                    break
                            else:
                                delay_ms = delay * 1000
                                if os.path.exists(log_path):
                                    log_file.write(
                                        time_str + " " + dst_ip + " " +
                                        str(format(delay_ms, ".1f")) + "\n")
                                    log_file.flush()
                                else:
                                    break
                            before = datetime.datetime.now()
                        else:
                            before = datetime.datetime.now()
                            time.sleep(0.1)
                        day_temp = datetime.datetime.now().date()
                    else:
                        logging.info("Main process exit.")
                        logging.info(
                            "Subprocess exit. PPID:%s, PID:%s, HOST:%s, IP:%s"
                            % (ppid, os.getpid(), name, dst_ip))
                        return
            if day_temp >= first_day:
                first_day = day_temp
            else:
                logging.error(
                    "Wrong date. Subprocess exit. PPID:%s, PID:%s, HOST:%s, IP:%s"
                    % (ppid, os.getpid(), name, dst_ip))
                return
        else:
            logging.info("Main process exit.")
            logging.info("Subprocess exit. PPID:%s, PID:%s, HOST:%s, IP:%s" %
                         (ppid, os.getpid(), name, dst_ip))
            return
 def __init__(self, environment: str, cmdline_args: NamedTuple = None):
     settings.init_module(environment, cmdline_args)
     prctl.set_proctitle(SETTINGS.instance_name)
Example #35
0

def SigQUIT(SIG, FRM):
    reactor.stop()


args = sys.argv[1:]
if len(sys.argv[1:]) != 2:
    exit(1)

access_list = []
blocked_ua_list = []
tor_exits_list = []
ports = []

rpc_factory = pb.PBClientFactory()
reactor.connectUNIX(os.path.join("/var/run/tor2web/rpc.socket"), rpc_factory)

signal.signal(signal.SIGUSR1, SigQUIT)
signal.signal(signal.SIGTERM, SigQUIT)
signal.signal(signal.SIGINT, SigQUIT)

prctl.set_pdeathsig(signal.SIGINT)
prctl.set_proctitle("tor2web-worker")

start()

reactor.run()

exit(0)