예제 #1
0
def main():
    print "Start!"
    import daemon
    daemon.daemonize(noClose=True)
    # 初始化工作进程
    num_of_workers = 2
    workers = []
    workQueue = Queue()
    writeCount = Value("i", 0)
    manager = Manager()
    workDict = manager.dict()
    for i in range(num_of_workers):
        worker = Process(target=writeWorker,
                         args=(workQueue, writeCount, workDict))
        workers.append(worker)
    for worker in workers:
        worker.start()

    # 定时打印进度的进程
    sp = Process(target=showStatus, args=(workQueue, writeCount, workDict))
    sp.start()

    # 定期从ttserver读取要保存的数据
    while True:
        btime = time.time()
        try:
            writeToDB(workQueue, workDict)
        except:
            log.error(traceback.format_exc())
        etime = time.time()
        log.info("Use time: %f", etime - btime)
        stime = 1800 - (etime - btime)
        if stime > 0:
            time.sleep(stime)
예제 #2
0
def main_optparse():
	"Entrypoint for the tproxy handler, that uses optparse to parse commandline arguments."
	parser = OptionParser(usage="%prog [-D] -l 'https://tollgate.example.com'")
	parser.add_option('-D', '--daemon', action='store_true', dest='daemon', help='start as a daemon')
	parser.add_option('-l', '--tollgate-uri', dest='tollgate_uri', metavar='URI', help='root URI of tollgate frontend HTTPS server')
	parser.add_option('-P', '--pid', dest='pid_file', default='/var/run/tollgate-captivity.pid', help='Location to write the PID file.  Only has effect in daemon mode.  [default: %default]')
	parser.add_option('-p', '--port', dest='port', type='int', metavar='PORT', help='port of the tproxy service [default: %default]', default=50080)
	parser.add_option('-m', '--mark', dest='mark', type='int', metavar='MARK', help='TPROXY mark tag for this service [default: %default]', default=1)
	options, args = parser.parse_args()
	
	if not options.tollgate_uri:
		parser.error('A URI to the tollgate site is required.')
	
	if not options.port:
		parser.error('A port to listen on is required.')
	
	if options.port < 0 or options.port > 65535:
		parser.error('Port specified is invalid.')
	
	if not options.mark:
		parser.error('Mark tag is required.')
		
	if options.mark <= 0 or options.mark > 255:
		parser.error('Mark value is invalid.')
	
	if not options.pid_file and options.daemon:
		parser.error('No PID file specified and running in daemon mode!')
	
	server = TProxyServer(options.tollgate_uri, options.port, options.mark)
	
	if options.daemon:
		from daemon import daemonize
		daemonize(options.pid_file)
		
	server.run()
예제 #3
0
def main():
  global _global_rawmanifestdata
  global _global_rawmirrorlist 

  
  # read in the manifest file
  rawmanifestdata = open(_commandlineoptions.manifestfilename).read()

  # an ugly hack, but Python's request handlers don't have an easy way to
  # pass arguments
  _global_rawmanifestdata = rawmanifestdata
  _global_rawmirrorlist = json.dumps([])

  # I do this just for the sanity / corruption check
  manifestdict = uppirlib.parse_manifest(rawmanifestdata)

  vendorip = manifestdict['vendorhostname']
  vendorport = manifestdict['vendorport']
  
  # We should detach here.   I don't do it earlier so that error
  # messages are written to the terminal...   I don't do it later so that any
  # threads don't exist already.   If I do put it much later, the code hangs...
  if _commandlineoptions.daemonize:
    daemon.daemonize()

  # we're now ready to handle clients!
  _log('ready to start servers!')

 
  # first, let's fire up the upPIR server
  start_vendor_service(manifestdict, vendorip, vendorport)


  _log('servers started!')
예제 #4
0
def main():
	try:
		opts,args = getopt.getopt(sys.argv[1:], 'd')

	except getopt.GetoptError:
		print 'usage: %s [-d]' % sys.argv[0]
		sys.exit(1)

	for o,a in opts:
		if o == '-d': daemonize()

	s = mysocket()
	r = socket(AF_INET, SOCK_DGRAM)

	while 1:
		buf = ''
		data,addr = s.recvfrom(65536)

		if data == 'rusers':
			users = set()
			for l in commands.getoutput('who').split('\n'):
				try:
					users.add(l.split()[0])
				except IndexError:
					pass

			rv = '%s:   ' % gethostname()
			for u in users:
				rv += '%s ' % u

			#print rv

		s.sendto(rv, addr)
예제 #5
0
파일: main.py 프로젝트: sshyran/coldice
def start_server(directory, host, port, ext, daemon):
    if daemon:
        # daemonize
        daemonize(do_start_server, (directory, host, port, ext, daemon))
    else:
        # interactive
        do_start_server((directory, host, port, ext, daemon))
예제 #6
0
    def start(self):
        """
      This method should be "extended" in a subclass if you want 
      to assign signals
      """
        if self.comingFromRestart:
            self.comingFromRestart = False
        """
      # Verify user is not root
      if os.getuid() == 0:
         print "FATAL: Do not start as root. It will be a mess!"
         sys.exit(2)
      """

        # If it is locked ...
        if self.isLocked():
            # ... and running, exit!!
            if not commands.getstatusoutput('ps -p ' + str(self.lockpid))[0]:
                self.printComment('Already start')
                sys.exit(2)
            # ... and not running, delete the lock.
            else:
                self.printComment('Locked but not running')
                os.unlink(self.lock)

        # Not locked or locked but not running
        # we create the lock
        daemon.daemonize()
        self.makeLock()
예제 #7
0
def main():
    global _global_rawmanifestdata
    global _global_rawmirrorlist

    # read in the manifest file
    rawmanifestdata = open(_commandlineoptions.manifestfilename).read()

    # an ugly hack, but Python's request handlers don't have an easy way to
    # pass arguments
    _global_rawmanifestdata = rawmanifestdata
    _global_rawmirrorlist = json.dumps([])

    # I do this just for the sanity / corruption check
    manifestdict = uppirlib.parse_manifest(rawmanifestdata)

    vendorip = manifestdict['vendorhostname']
    vendorport = manifestdict['vendorport']

    # We should detach here.   I don't do it earlier so that error
    # messages are written to the terminal...   I don't do it later so that any
    # threads don't exist already.   If I do put it much later, the code hangs...
    if _commandlineoptions.daemonize:
        daemon.daemonize()

    # we're now ready to handle clients!
    _log('ready to start servers!')

    # first, let's fire up the upPIR server
    start_vendor_service(manifestdict, vendorip, vendorport)

    _log('servers started!')
예제 #8
0
파일: notify.py 프로젝트: micahr/herald
def startup(options):
    if options.url is not None:
        client_options = [options.url,options.notifouser,
                        options.notifokey,options.port,
                        options.clientuser,options.password]
    else:
        print 'Please Specify a Url'
        sys.exit(1)
        
    try:
        if options.client == 'transmission':
            client = Transmission(*client_options)
        else:
            client = uTorrent(*client_options)
    except ClientError as e:
        print e.value
        sys.exit(1)

    if not options.debug:
        daemon.daemonize(PID_FILE_LOCATION)
    else:
        print 'Starting up Trans-Notify'

    while True:
        client.run()
        time.sleep(client.CHECK_INTERVAL)
예제 #9
0
파일: Igniter.py 프로젝트: hawkeye438/metpx
   def start(self):
      """
      This method should be "extended" in a subclass if you want 
      to assign signals
      """
      if self.comingFromRestart:
         self.comingFromRestart = False

      """
      # Verify user is not root
      if os.getuid() == 0:
         print "FATAL: Do not start as root. It will be a mess!"
         sys.exit(2)
      """
      
      # If it is locked ... 
      if self.isLocked(): 
         # ... and running, exit!!
         if not commands.getstatusoutput('ps -p ' + str(self.lockpid))[0]:
            self.printComment('Already start')
            sys.exit(2)
         # ... and not running, delete the lock.
         else:
            self.printComment('Locked but not running')
            os.unlink(self.lock)
      
      # Not locked or locked but not running
      # we create the lock 
      daemon.daemonize()
      self.makeLock()
예제 #10
0
def main():
    print "Start!"
    daemon.daemonize(noClose=True)
    #初始化工作线程
    num_of_workers = 4
    workers = []
    workQueue = Queue()
    writeCount = Value("i", 0)
    for i in range( num_of_workers ):  
        worker = Process( target=writeWorker, args=(workQueue, writeCount))  
        workers.append(worker)
    for worker in workers:
        worker.start()
    
    sp = Process(target=showStatus, args=(workQueue, writeCount))
    sp.start()
    
    #定期从ttserver读取要保存的数据        
    while True:
        btime = time.time()
        try:
            writeToDB(workQueue)
        except:
            pass
        etime = time.time()
        log.info("Use time: %f", etime - btime)
        stime = 600 - (etime - btime)
        if stime >0:
            time.sleep(stime)
예제 #11
0
파일: sock5.py 프로젝트: codefever/yxsock5
def main():
    args = parse_args()
    pidfile = args.pid_file
    logfile = args.log_file
    addr = args.addr
    port = args.port
    dae = args.daemon
    if args.config_file:
        with open(args.config_file, 'r') as f:
            import json
            config = f.read()
            tmp = json.loads(config)
            pidfile = tmp['pid_file'] if 'pid_file' in tmp else pidfile
            logfile = tmp['log_file'] if 'log_file' in tmp else logfile
            addr = tmp['listen_addr'] if 'listen_addr' in tmp else addr
            port = tmp['listen_port'] if 'listen_port' in tmp else port
            dae = tmp['daemon'] if 'daemon' in tmp else dae
    #set up logging
    logging.basicConfig(filename=logfile, level=logging.WARNING)
    #create daemon
    import daemon
    if dae:
        daemon.daemonize()
    daemon.create_pid_file(pidfile)
    #create server
    server = SocketServer.ThreadingTCPServer((addr, port), LocalServer)
    server.allow_reuse_address = True
    t = threading.Thread(target=server.serve_forever)
    t.setDaemon(True)
    t.start()
    t.join()
예제 #12
0
def main():
    usage = "usage: %prog [options] arg"
    parser = OptionParser(usage=usage)
    parser.add_option("-t", "--tail", dest="tail", action="store_false", default=True, 
                      help="if send file content from the end of it")
    (options, args) = parser.parse_args()
    
    try:
        pid_file = '/var/run/scribe_log.pid'
        daemon.daemonize(pid_file)
        
        threads = []
        for task in setting.scribe_config:
            threads.append(Handler(task['file'],
                                   task['category'],
                                   task['host'],
                                   task['port'],
                                   task['prefix'],
                                   task['postfix'],
                                   options.tail))

        for t in threads:
            t.setDaemon(True)
            t.start()
    
        for t in threads:
            t.join()
    except Exception,e:
        G_LOGGER.exception(e)
예제 #13
0
파일: bot.py 프로젝트: ikeikeikeike/babbler
 def run(self, as_daemon=False):
     """
     Main event loop that gets the entries from the feed and posts them
     to Twitter.
     """
     # Set up logging.
     logger_args = {"format": "%(asctime)-15s %(levelname)-5s %(message)s"}
     if as_daemon:
         self.kill()
         daemonize(self.pid_path)
         logger_args.update({"filename": self.log_path, "filemode": "wb"})
     logging.basicConfig(**logger_args)
     log_level = getattr(logging, self.data["options"]["log_level"])
     logging.getLogger().setLevel(log_level)
     logging.debug("\n\nUsing options:\n\n%s\n" % self.data["options"])
     # Set up the feed.
     self.data.setdefault("feed", RespondingFeed())
     feed_options = dict(twitter=self.twitter, max_len=TWEET_MAX_LEN,
                         eliza_path=self.eliza_path, **self.data["options"])
     self.data["feed"].setup(feed_options)
     self.data.save()
     # Set up hashtagging.
     tagger = Tagger(scorer=self.hashtag_score,
                     data_path=self.package_data_path,
                     min_length=self.data["options"]["hashtag_min_length"])
     # Main loop.
     try:
         for entry in self.data["feed"]:
             try:
                 # Twitter reply.
                 tweet = "%s %s" % (entry["to"], entry["title"])
                 reply_to = entry["id"]
             except KeyError:
                 # Feed entry.
                 tweet = entry["title"]
                 tweet += " %s" % entry["id"]
                 reply_to = None
             for tag in tagger.tags(entry["title"]):
                 tag = " #" + tag
                 # Extra check to ensure tag isn't already in the tweet.
                 if (len(tweet + tag) <= TWEET_MAX_LEN and
                     tag.strip().lower() not in tweet.lower()):
                     tweet += tag
             # Post to Twitter.
             done = True
             try:
                 if not self.data["options"]["dry_run"]:
                     self.twitter.PostUpdate(tweet, reply_to)
             except Exception, e:
                 logging.error("Error tweeting '%s': %s" % (tweet, e))
                 # Mark the entry as done if it's a duplicate.
                 done = str(e) == "Status is a duplicate."
             if done or True:  # XXX: Force process
                 logging.info("Tweeted: %s" % tweet)
                 # Move the entry from "todo" to "done" and save.
                 self.data["feed"].process()
                 if not self.data["options"]["dry_run"]:
                     self.data.save()
     except Exception, e:
         logging.critical("Shutting down on unhandled error: %s" % e)
예제 #14
0
def boot_dbus(daemonise, name, pid_file=None):
	PortalBackendAPI(name)
	loop = glib.MainLoop()
	if daemonise:
		assert pid_file, 'Running in daemon mode means pid_file must be specified.'
		from daemon import daemonize
		daemonize(pid_file)
	loop.run()
예제 #15
0
def main():
    logging.basicConfig(level=logging.DEBUG)

    dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
    notifyDaemon = NotificationDaemon("/org/freedesktop/Notifications")
    daemon.daemonize("/tmp/notication-daemon-mac-py.pid")
    while True:
        time.sleep(1000)
예제 #16
0
파일: nmmain.py 프로젝트: Ashmita89/attic
def main():

  global configuration

  if not FOREGROUND:
    # Background ourselves.
    daemon.daemonize()

  # ensure that only one instance is running at a time...
  gotlock = runonce.getprocesslock("seattlenodemanager")
  if gotlock == True:
    # I got the lock.   All is well...
    pass
  else:
    if gotlock:
      servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + 
          ") is running")
    else:
      servicelogger.log("[ERROR]:Another node manager process is running")
    return

  
  # I'll grab the necessary information first...
  servicelogger.log("[INFO]:Loading config")
  # BUG: Do this better?   Is this the right way to engineer this?
  configuration = persist.restore_object("nodeman.cfg")
  
  # Armon: initialize the network restrictions
  initialize_ip_interface_restrictions(configuration)
  
  
  
  # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new
  #            seattle crontab entry has been installed in the crontab.
  #            Do this here because the "nodeman.cfg" needs to have been read
  #            into configuration via the persist module.
  if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin':
    if 'crontab_updated_for_2009_installer' not in configuration or \
          configuration['crontab_updated_for_2009_installer'] == False:
      try:
        import update_crontab_entry
        modified_crontab_entry = \
            update_crontab_entry.modify_seattle_crontab_entry()
        # If updating the seattle crontab entry succeeded, then update the
        # 'crontab_updated_for_2009_installer' so the nodemanager no longer
        # tries to update the crontab entry when it starts up.
        if modified_crontab_entry:
          configuration['crontab_updated_for_2009_installer'] = True
          persist.commit_object(configuration,"nodeman.cfg")

      except Exception,e:
        exception_traceback_string = traceback.format_exc()
        servicelogger.log("[ERROR]: The following error occured when " \
                            + "modifying the crontab for the new 2009 " \
                            + "seattle crontab entry: " \
                            + exception_traceback_string)
예제 #17
0
def main():
    DBusGMainLoop(set_as_default=True)

    session_bus = dbus.SessionBus()
    for source, event, callback in load_callbacks():
        session_bus.add_signal_receiver(callback, event, source)

    loop = gobject.MainLoop()
    daemonize()
    loop.run()
 def start() : 
     try : 
         daemon.daemonize(PIDFILE,stdout=LOGFILE,stderr=LOGFILE)
     except RuntimeError as e : 
         # daemon already runnning, as defined in the 
         # daemon.daemonize method
         raise SystemExit(1)
     else :  
         # daemon started
         main()
예제 #19
0
def main():
    # Set umask.
    os.umask(0077)

    # Get listen address/port.
    listen_addr = cfg.get('general', 'listen_addr', '127.0.0.1')
    listen_port = int(cfg.get('general', 'listen_port', '7777'))

    run_as_daemon = cfg.get('general', 'run_as_daemon', 'yes')

    # Get log level.
    log_level = getattr(logging,
                        cfg.get('general', 'log_level', 'info').upper())

    # Initialize file based logger.
    if cfg.get('general', 'log_type', 'file') == 'file':
        if run_as_daemon == 'yes':
            logging.basicConfig(
                level=log_level,
                format='%(asctime)s %(levelname)s %(message)s',
                datefmt='%Y-%m-%d %H:%M:%S',
                filename=cfg.get('general', 'log_file',
                                 '/var/log/iredapd.log'),
            )
        else:
            logging.basicConfig(
                level=log_level,
                format='%(asctime)s %(levelname)s %(message)s',
                datefmt='%Y-%m-%d %H:%M:%S',
            )

    # Initialize policy daemon.
    socketDaemon = apdSocket((listen_addr, listen_port))

    # Run this program as daemon.
    if run_as_daemon == 'yes':
        daemon.daemonize()

    # Run as a low privileged user.
    run_as_user = cfg.get('general', 'run_as_user', 'nobody')
    uid = pwd.getpwnam(run_as_user)[2]

    try:
        # Write pid number into pid file.
        f = open(cfg.get('general', 'pid_file', '/var/run/iredapd.pid'), 'w')
        f.write(str(os.getpid()))
        f.close()

        # Set uid.
        os.setuid(uid)

        # Starting loop.
        asyncore.loop()
    except KeyboardInterrupt:
        pass
 def start() : 
     try : 
         daemon.daemonize(PIDFILE,stdout=LOGFILE,stderr=LOGFILE,initialize_pipes = False)
     except RuntimeError as e : 
         # daemon already runnning, as defined in the 
         # daemon.daemonize method
         print(e)
         sys.stderr.write(e)
         raise SystemExit(1)
     else : 
         # daemon started
         main()
예제 #21
0
    def daemonize(self):
        
        try:
            import daemon

            daemon.daemonize(PIDFILE)
            self.log_event("Starting Secure XMLRPC Server [background]: https://{0}:{1}/".format(self.server_address[0], self.server_address[1]))
            SocketServer.TCPServer.serve_forever(self)

        except ImportError:
            sys.stderr.write("Unable to daemonize failing back to foreground.\n")
            self.serve_forever(daemon=False)
예제 #22
0
def main():
    # Set umask.
    os.umask(0077)

    # Get listen address/port.
    listen_addr = cfg.get('general', 'listen_addr', '127.0.0.1')
    listen_port = int(cfg.get('general', 'listen_port', '7777'))

    run_as_daemon = cfg.get('general', 'run_as_daemon', 'yes')

    # Get log level.
    log_level = getattr(logging, cfg.get('general', 'log_level', 'info').upper())

    # Initialize file based logger.
    if cfg.get('general', 'log_type', 'file') == 'file':
        if run_as_daemon == 'yes':
            logging.basicConfig(
                    level=log_level,
                    format='%(asctime)s %(levelname)s %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S',
                    filename=cfg.get('general', 'log_file', '/var/log/iredapd.log'),
                    )
        else:
            logging.basicConfig(
                    level=log_level,
                    format='%(asctime)s %(levelname)s %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S',
                    )

    # Initialize policy daemon.
    socketDaemon = apdSocket((listen_addr, listen_port))

    # Run this program as daemon.
    if run_as_daemon == 'yes':
        daemon.daemonize()

    # Run as a low privileged user.
    run_as_user = cfg.get('general', 'run_as_user', 'nobody')
    uid = pwd.getpwnam(run_as_user)[2]

    try:
        # Write pid number into pid file.
        f = open(cfg.get('general', 'pid_file', '/var/run/iredapd.pid'), 'w')
        f.write(str(os.getpid()))
        f.close()

        # Set uid.
        os.setuid(uid)

        # Starting loop.
        asyncore.loop()
    except KeyboardInterrupt:
        pass
예제 #23
0
파일: websocket.py 프로젝트: raylu/ykill
def main():
	if len(sys.argv) == 2 and sys.argv[1] == '-d':
		daemon.daemonize()

	def exit():
		conn.close()
		log.close()
	atexit.register(exit)

	while True:
		run()
		time.sleep(15)
예제 #24
0
파일: jsonic.py 프로젝트: cousnecs/jsonic
def run(port=8888, processes=4, debug=False, static=False, pid=None):
    '''
    Runs an instance of the JSonic server.
    
    :param port: Server port
    :type port: int
    :param processes: Number of worker processes for synthesis and caching
        operations. Defaults to 4.
    :type processes: int
    :param debug: True to enable automatic server reloading for debugging.
        Defaults to False.
    :type debug: bool
    :param static: True to serve ../ as static files to allow running of the
        example code and downloading of the JS directly from this server. 
        False to disable static file sharing when this server should handle the
        JSonic REST API only.
    :type static: bool
    :param pid: Name of a pid file to write if launching as a daemon or None
        to run in the foreground
    :type pid: string
    '''
    if pid is not None:
        # log to file
        logging.basicConfig(level=logging.INFO,
                            format='%(asctime)s %(levelname)s %(message)s',
                            filename='jsonic.log',
                            filemode='w')
        # launch as a daemon and write the pid file
        import daemon
        daemon.daemonize(pid)
    else:
        # log to console
        logging.basicConfig(level=logging.INFO,
                            format='%(asctime)s %(levelname)s %(message)s')
    synthesizer.init()
    kwargs = {}
    kwargs['pool'] = pool = multiprocessing.Pool(processes=processes)
    if static:
        # serve static files for debugging purposes
        kwargs['static_path'] = os.path.join(os.path.dirname(__file__), "../")
    application = tornado.web.Application(
        [(r'/engine', EngineHandler),
         (r'/engine/([a-zA-Z0-9]+)', EngineHandler), (r'/synth', SynthHandler),
         (r'/files/([a-f0-9]+-[a-f0-9]+\..*)', FilesHandler, {
             'path': './files'
         }), (r'/version', VersionHandler)],
        debug=debug,
        **kwargs)
    http_server = tornado.httpserver.HTTPServer(application)
    http_server.listen(port)
    ioloop = tornado.ioloop.IOLoop.instance()
    ioloop.start()
예제 #25
0
파일: jsonic.py 프로젝트: query/jsonic
def run(port=8888, processes=4, debug=False, static=False, pid=None):
    '''
    Runs an instance of the JSonic server.
    
    :param port: Server port
    :type port: int
    :param processes: Number of worker processes for synthesis and caching
        operations. Defaults to 4.
    :type processes: int
    :param debug: True to enable automatic server reloading for debugging.
        Defaults to False.
    :type debug: bool
    :param static: True to serve ../ as static files to allow running of the
        example code and downloading of the JS directly from this server. 
        False to disable static file sharing when this server should handle the
        JSonic REST API only.
    :type static: bool
    :param pid: Name of a pid file to write if launching as a daemon or None
        to run in the foreground
    :type pid: string
    '''
    if pid is not None:
        # log to file
        logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s %(levelname)s %(message)s',
                    filename='jsonic.log',
                    filemode='w')
        # launch as a daemon and write the pid file
        import daemon
        daemon.daemonize(pid)
    else:
        # log to console
        logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s %(levelname)s %(message)s')
    synthesizer.init()
    kwargs = {}
    kwargs['pool'] = pool = multiprocessing.Pool(processes=processes)
    if static:
        # serve static files for debugging purposes
        kwargs['static_path'] = os.path.join(os.path.dirname(__file__), "../")
    application = tornado.web.Application([
        (r'/engine', EngineHandler),
        (r'/engine/([a-zA-Z0-9]+)', EngineHandler),
        (r'/synth', SynthHandler),
        (r'/files/([a-f0-9]+-[a-f0-9]+\..*)', FilesHandler, {'path' : './files'}),
        (r'/version', VersionHandler)
    ], debug=debug, **kwargs)
    http_server = tornado.httpserver.HTTPServer(application)
    http_server.listen(port)
    ioloop = tornado.ioloop.IOLoop.instance()
    ioloop.start()
예제 #26
0
def main(options, args, ports):
    username = args[0]
    access_key = args[1]
    local_host = args[2]
    domains = ",".join(args[4:]).split(",")

    if options.daemonize:
        daemon.daemonize(options.pidfile)

    if options.diagnostic:
        run_diagnostic(domains, ports, local_host)

    sauce_client = saucerest.SauceClient(name=username, access_key=access_key,
                                         base_url=options.base_url)

    if sauce_client.get_tunnel("test-authorized")['error'] == 'Unauthorized':
        logger.error("Exiting: Incorrect username or access key")
        sys.exit(1)

    def disconnected_callback(tunnel_id):
        logger.warning("tunnel %s disconnected, marking unhealthy", tunnel_id)
        sauce_client.unhealthy_tunnels.add(tunnel_id)

    def tunnel_change_callback(new_tunnel, connected_callback=None):
        global tunnel_id
        tunnel_id = new_tunnel['id']
        sshtunnel.connect_tunnel(
            tunnel_id, sauce_client.base_url, username, access_key, local_host,
            new_tunnel['Host'], ports, connected_callback,
            lambda t=tunnel_id: disconnected_callback(t),
            lambda t=tunnel_id: sauce_client.delete_tunnel(t),
            options.diagnostic)

    try:
        max_tries = 1000
        if not options.shutdown:
            max_tries = 1
        tunnel = get_new_tunnel(sauce_client, domains,
                                replace=options.shutdown,
                                max_tries=max_tries)
        connect_tunnel(options, tunnel, tunnel_change_callback)
        h = Heartbeat(sauce_client, tunnel_id, tunnel_change_callback)
        h.start()
        reactor.run()
        logger.warning("Reactor stopped")
        h.done = True
        h.join()
    finally:
        logger.warning("Exiting")
        sauce_client.delete_tunnel(tunnel_id)
예제 #27
0
    def start(self):
        '''
      After initializing server, this is called to begin listening. This method
      blocks until the server completes execution.
      '''
        if self.__reuseAddr:
            self.__passiveSocket.setsockopt(socket.SOL_SOCKET,
                                            socket.SO_REUSEADDR, 1)
        self.__passiveSocket.bind((self.__interface, self.__serverPort))
        self.__passiveSocket.listen(self.__backlog)
        self.__sockets[self.__passiveSocket.fileno()] = self.__passiveSocket
        poll = select.poll()
        poll.register(self.__passiveSocket, select.POLLIN)
        if not os.path.exists(self.getLogDirectory()):
            os.makedirs(self.getLogDirectory())
        if self.__daemonize:
            daemon.daemonize([self.__passiveSocket.fileno()])
        while True:
            for fd, event in poll.poll():
                self.__clientID = fd  # Client being served
                sock = self.__sockets[fd]
                # Removed closed sockets from our list.
                if event & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
                    poll.unregister(fd)
                    del self.__sockets[fd]
                    self.__responses.pop(sock, None)
                    self.disconnected()
                    del self.__clientAddr[fd]

                # Accept connections from new sockets.
                elif sock is self.__passiveSocket:
                    newsock, sockname = sock.accept()
                    newsock.setblocking(False)
                    fd = newsock.fileno()
                    self.__clientID = fd  # Client being served
                    self.__clientAddr[fd] = newsock.getpeername()
                    self.__sockets[fd] = newsock
                    poll.register(fd, select.POLLIN)
                    self.newClient()

                # Collect incoming data until newline character found.
                elif event & select.POLLIN:
                    self.received()

                # Don't know how to handle it.
                else:
                    logging.warning("Don't know how to handle event:")
                    logging.warning("   " + str(fd) + ": " + str(event))

                self.__clientID = -1  # -1 indicates no client being served
예제 #28
0
 def start(self):
    '''
    After initializing server, this is called to begin listening. This method
    blocks until the server completes execution.
    '''
    if self.__reuseAddr:
       self.__passiveSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    self.__passiveSocket.bind((self.__interface,self.__serverPort))
    self.__passiveSocket.listen(self.__backlog)
    self.__sockets[self.__passiveSocket.fileno()]  = self.__passiveSocket
    poll = select.poll()
    poll.register(self.__passiveSocket, select.POLLIN)
    if not os.path.exists(self.getLogDirectory()):
       os.makedirs(self.getLogDirectory())
    if self.__daemonize:
       daemon.daemonize([self.__passiveSocket.fileno()])
    while True:
       for fd, event in poll.poll():
          self.__clientID = fd # Client being served
          sock = self.__sockets[fd]
          # Removed closed sockets from our list.
          if event & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
             poll.unregister(fd)
             del self.__sockets[fd]
             self.__responses.pop(sock, None)
             self.disconnected()
             del self.__clientAddr[fd]
          
          # Accept connections from new sockets.
          elif sock is self.__passiveSocket:
             newsock, sockname = sock.accept()
             newsock.setblocking(False)
             fd = newsock.fileno()
             self.__clientID = fd # Client being served
             self.__clientAddr[fd] = newsock.getpeername()
             self.__sockets[fd] = newsock
             poll.register(fd, select.POLLIN)
             self.newClient()
          
          # Collect incoming data until newline character found.
          elif event & select.POLLIN:
             self.received()
          
          # Don't know how to handle it.
          else:
             logging.warning("Don't know how to handle event:")
             logging.warning("   " + str(fd) + ": " + str(event))
          
          self.__clientID = -1 # -1 indicates no client being served
예제 #29
0
파일: server.py 프로젝트: al-layth/denigma
def run():
    """CLI entry point. Parses args and starts the gevent-socketio server."""
    settings.parse_args()
    pid_name = "chat-%s-%s.pid" % (settings.HTTP_HOST, settings.HTTP_PORT)
    pid_file = settings.PID_FILE or os.path.join(gettempdir(), pid_name)
    if settings.KILL:
        if kill(pid_file):
            print "Daemon killed"
        else:
            print "Could not kill any daemons"
        return
    elif kill(pid_file):
        print "Running daemon killed"
    if settings.DAEMON:
        daemonize(pid_file)
    serve_forever()
예제 #30
0
    def run(self):
        self.initialize()

        self.forever = 1

        if self.cnf.daemonize:
            daemon.daemonize(self.cnf.pidfile)

        self.logger.info("Node {} started.".format(self.cnf.node))
        while (self.forever):
            self.loop()

        self.logger.debug("Aborting loop.")
        e = self.on_exit(None)

        return e
예제 #31
0
파일: server.py 프로젝트: McDoku/denigma
def run():
    """CLI entry point. Parses args and starts the gevent-socketio server."""
    settings.parse_args()
    pid_name = "chat-%s-%s.pid" % (settings.HTTP_HOST, settings.HTTP_PORT)
    pid_file = settings.PID_FILE or os.path.join(gettempdir(), pid_name)
    if settings.KILL:
        if kill(pid_file):
            print "Daemon killed"
        else:
            print "Could not kill any daemons"
        return
    elif kill(pid_file):
        print "Running daemon killed"
    if settings.DAEMON:
        daemonize(pid_file)
    serve_forever()
예제 #32
0
def run():
    argument_parser = create_optparser()
    args = argument_parser.parse_args()

    if args.daemonize:
        daemonize(args.pid_file, args.error_log)

    configure_logging()
    install_signal_handlers()
    config = load_config(args.config)

    LOG.info('Starting')

    schedule(lambda: perform_useless_task(config['server']['host']),
             period=config['period'], run_now=False)


    wait_for_shutdown()
예제 #33
0
def run(port=8888, threads=4, debug=False, static=False, pid=None,
        mongo_host='127.0.0.1', mongo_port=27017, seed=0):
    if pid is not None:
        # launch as a daemon and write the pid file
        import daemon
        daemon.daemonize(pid)
    # retry making the mongo connection with exponential backoff
    for i in range(8):
        try:
            conn = pymongo.Connection(mongo_host, mongo_port)
            break
        except pymongo.errors.AutoReconnect:
            t = 2 ** i
            logging.warning('backoff on python connection %d' % t)
            time.sleep(t)
    else:
        raise pymongo.errors.AutoReconnect

    google_secrets = {
        "key": os.environ['GOOGLE_OAUTH_KEY'],
        "secret": os.environ['GOOGLE_OAUTH_SECRET'],
        "redirect": os.environ['GOOGLE_OAUTH_REDIRECT']
    }

    kwargs = {
        'cookie_secret': generate_secret(seed),
        'debug': debug,
        'thread_count': threads,
        'mongo_conn': conn,
        'google_oauth': google_secrets
    }
    if static:
        kwargs['static_path'] = os.path.join(os.path.dirname(__file__), "../")
    application = mongo_util.MongoApplication([
        (r"/data/([a-zA-Z]*)-([a-zA-Z][a-zA-Z0-9]*)/([a-zA-Z][a-zA-Z0-9]*)?$", DatabaseHandler),
        (r"/data/([a-zA-Z]*)-([a-zA-Z][a-zA-Z0-9]*)/([a-zA-Z][a-zA-Z0-9]*)/$", CollectionHandler),
        (r"/data/([a-zA-Z]*)-([a-zA-Z][a-zA-Z0-9]*)/([a-zA-Z][a-zA-Z0-9]*)/([a-f0-9]+)", ItemHandler),
        (r"/data/_auth(.*)$", access.AuthHandler),
        (r"/data/_test_(reset|\d+)$", TestHandler),
        (r"/data/_warning$", WarningHandler),
    ], **kwargs)
    http_server = tornado.httpserver.HTTPServer(application)
    http_server.listen(port)
    tornado.ioloop.IOLoop.instance().start()
    def start():
        log("starting...")
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM); s.bind(('',0)) # pick a free port
        port = s.getsockname()[1]
        del s
        log("port=%s"%port)
        open(portfile,'w').write(str(port))
        open(logfile, 'w')  # for now we clear it on restart...
        log("setting logfile to %s"%logfile)

        import sage_server
        run_server = lambda: sage_server.run_server(port=port, host='127.0.0.1', pidfile=pidfile, logfile=logfile)
        if daemon:
            log("daemonizing")
            from daemon import daemonize
            daemonize(pidfile)
            run_server()
        else:
            log("starting in foreground")
            run_server()
예제 #35
0
def parseArgs():
    """Parse any command line options."""

    parser = OptionParser(usage=usagestr)
    parser.add_option("-d", "--daemon",  action="store_true", dest="daemon",  help="Run as a daemon")
    parser.add_option("-v", "--version", action="store_true", dest="version", help="Give version number then exit")
    parser.add_option("-x", "--exit",    action="store_true", dest="exit"   , help="Exit on I/O error (rather than restart)")
    (options, args) = parser.parse_args()
    
    if options.version:
        print weewx.__version__
        sys.exit()
        
    if len(args) < 1:
        sys.stderr.write("Missing argument(s).\n")
        sys.stderr.write(parser.parse_args(["--help"]))
        sys.exit(weewx.CMD_ERROR)
    
    if options.daemon:
        daemon.daemonize(pidfile='/var/run/weewx.pid')

    return (options, args)
예제 #36
0
파일: update.py 프로젝트: mcg/couchdb-solr2
def main():
    opts, args = parse_opts()
    config = configure(opts.config_file)
    if config is None:
        return 1

    if opts.no_daemonize is False:
        daemonize(opts.pid_file)

    # File handles will be closed during daemonization
    log_format = '[%(asctime)s|%(levelname)s|%(name)s|%(threadName)s|%(message)s]'
    logging.basicConfig(filename=config['log']['file'],
                        level=string2log_level(config['log']['level']),
                        format=log_format)

    updater = SolrUpdater(config['amqp'], config['solr']['uri'])
    if updater.start_amqp() is False:
        print >> sys.stderr, "Problem connecting to AMQP broker"
        return 2
    signal.signal(signal.SIGTERM, lambda s, f: updater.shutdown())
    updater.process_updates()
    return 0
예제 #37
0
def main():
    global configuration

    if not FOREGROUND:
        # Background ourselves.
        daemon.daemonize()

    # Check if we are running in testmode.
    if TEST_NM:
        nodemanager_pid = os.getpid()
        servicelogger.log(
            "[INFO]: Running nodemanager in test mode on port 1224, " +
            "pid %s." % str(nodemanager_pid))
        nodeman_pid_file = open(os.path.join(os.getcwd(), 'nodemanager.pid'),
                                'w')

        # Write out the pid of the nodemanager process that we started to a file.
        # This is only done if the nodemanager was started in test mode.
        try:
            nodeman_pid_file.write(str(nodemanager_pid))
        finally:
            nodeman_pid_file.close()

    else:
        # ensure that only one instance is running at a time...
        gotlock = runonce.getprocesslock("seattlenodemanager")

        if gotlock == True:
            # I got the lock.   All is well...
            pass
        else:
            if gotlock:
                servicelogger.log(
                    "[ERROR]:Another node manager process (pid: " +
                    str(gotlock) + ") is running")
            else:
                servicelogger.log(
                    "[ERROR]:Another node manager process is running")
            return

    servicelogger.log('[INFO]: This is Seattle release "' + version + "'")

    # Feature add for #1031: Log information about the system in the nm log...
    servicelogger.log('[INFO]:platform.python_version(): "' +
                      str(platform.python_version()) + '"')
    servicelogger.log('[INFO]:platform.platform(): "' +
                      str(platform.platform()) + '"')

    # uname on Android only yields 'Linux', let's be more specific.
    try:
        import android
        servicelogger.log('[INFO]:platform.uname(): Android / "' +
                          str(platform.uname()) + '"')
    except ImportError:
        servicelogger.log('[INFO]:platform.uname(): "' +
                          str(platform.uname()) + '"')

    # I'll grab the necessary information first...
    servicelogger.log("[INFO]:Loading config")
    # BUG: Do this better?   Is this the right way to engineer this?
    configuration = persist.restore_object("nodeman.cfg")

    # Armon: initialize the network restrictions
    initialize_ip_interface_restrictions(configuration)

    # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new
    #            seattle crontab entry has been installed in the crontab.
    #            Do this here because the "nodeman.cfg" needs to have been read
    #            into configuration via the persist module.
    if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin':
        if 'crontab_updated_for_2009_installer' not in configuration or \
              configuration['crontab_updated_for_2009_installer'] == False:
            try:
                # crontab may not exist on Android, therefore let's not check
                # if we are running on Android. See #1302 and #1254.
                try:
                    import android
                except ImportError:
                    import update_crontab_entry
                    modified_crontab_entry = \
                        update_crontab_entry.modify_seattle_crontab_entry()
                    # If updating the seattle crontab entry succeeded, then update the
                    # 'crontab_updated_for_2009_installer' so the nodemanager no longer
                    # tries to update the crontab entry when it starts up.
                    if modified_crontab_entry:
                        configuration[
                            'crontab_updated_for_2009_installer'] = True
                        persist.commit_object(configuration, "nodeman.cfg")

            except Exception, e:
                exception_traceback_string = traceback.format_exc()
                servicelogger.log("[ERROR]: The following error occured when " \
                                    + "modifying the crontab for the new 2009 " \
                                    + "seattle crontab entry: " \
                                    + exception_traceback_string)
예제 #38
0
class MyDaemon(daemonize()):
    def run(self):
        # Or simply merge your code with MyDaemon.
        your_code = YourCode()
        your_code.run()
	self.output_filenames = output_filenames
	self.OpenOutputFiles()

    def ReloadOutputFiles(self):
	self.CloseOutputFiles()
	self.OpenOutputFiles()

    """ Let's stare abstractedly at the User Streams ! """
    def on_status(self, status):
	dump_text = json.dumps(status._json) + "\n"
	if self.output_files is None:
		print dump_text
		sys.stdout.flush()
		return
	for file in self.output_files:
		file.write(dump_text)
		file.flush()

listener = Listener()

def SigHupHandler(signum, frame):
	listener.ReloadOutputFiles()

if __name__ == '__main__':
    signal.signal(signal.SIGHUP, SigHupHandler)
    auth = get_oauth()
    listener.SetOutputFilenames(sys.argv[1:])
    stream = Stream(auth, listener, secure=True)
    daemon.daemonize(PID_FILE_NAME)
    stream.userstream()
# -*- coding: utf-8 -*-
import platform
import os
from daemon import daemonize
from proxypool import ProxyPool

if __name__ == '__main__':
    # 以守护进程方式运行
    if "Linux" in platform.system():
         daemonize(os.getcwd(), '/dev/null','/tmp/daemon_stdout.log','/tmp/daemon_error.log')
    redis_key_https = "merge_https_proxy"
    redis_key_http = "merge_http_proxy"
    redis_distinct_set_http = "merge_set_http"
    redis_distinct_set_https = "merge_set_https"
    ProxyPool(redis_key_http=redis_key_http,
              redis_key_https=redis_key_https,
              redis_distinct_set_http=redis_distinct_set_http,
              redis_distinct_set_https=redis_distinct_set_https).run()

예제 #41
0
    def _replicate_watcher(args):
        replicate_active_watcher(min_time_s=args.min_time_s)

    parser_replicate_watcher = subparsers.add_parser(
        'replicate_watcher',
        help=
        'watch the active directory (created by ./storage.py --activity), and when projects change snapshot them and replicate them.'
    )
    parser_replicate_watcher.add_argument(
        "--min_time_s",
        help="min interval between snapshots (default: 120)",
        type=int,
        default=120)
    parser_replicate_watcher.set_defaults(func=_replicate_watcher)

    args = parser.parse_args()

    setup_log(loglevel=args.loglevel, logfile=args.logfile)

    if args.daemon:
        if not args.pidfile:
            raise RuntimeError("in --daemon mode you *must* specify --pidfile")
        import daemon
        daemon.daemonize(args.pidfile)

    args.func(args)

else:
    setup_log()
예제 #42
0
파일: tts.py 프로젝트: rainfly123/TTS
            f.write(data)
        val = {
            "code": 0,
            "data": "http://120.76.190.105/" + mylogo,
            "message": "Ok"
        }
        self.write(json.dumps(val))


def main():
    tornado.options.parse_command_line()
    application = tornado.web.Application(
        [
            (r"/tts", ttsHandler),
            (r"/tts_url", ttsurlHandler),
        ],
        template_path=os.path.join(os.path.dirname(__file__), "templates"),
        static_path=os.path.join(os.path.dirname(__file__), "static"),
        debug=True,
        cookie_secret="*****@*****.**",
        login_url="/login")

    http_server = tornado.httpserver.HTTPServer(application)
    http_server.listen(options.port)
    tornado.ioloop.IOLoop.current().start()


if __name__ == "__main__":
    daemon.daemonize("/tmp/tts.pid")
    main()
예제 #43
0
                if cgids[self.gid] % 4 == 0:
                    send.cSend(self.gid)
                return
        except:
            print "Connect ERROR"
            cgids[self.gid] += 1
            if cgids[self.gid] % 4 == 0:
                send.cSend(self.gid)
            return
        if gids[self.gid] == md5:
            print "Md5 ERROR"
            cgids[self.gid] += 1
            if cgids[self.gid] % 4 == 0:
                send.mSend(self.gid)
        else:
            gids[self.gid] = md5
            if cgids[self.gid] > 3:
                cgids[self.gid] = 0
                send.okSend(self.gid)

if __name__ == "__main__":
    daemon.daemonize("/tmp/cdn.pid")
    os.chdir("/data")

    while True:
        for gid in gids.keys():
            t = cdn(gid)
            t.start()
        time.sleep(25)

예제 #44
0
def main_optparse():
    "Entrypoint for the tproxy handler, that uses optparse to parse commandline arguments."
    parser = OptionParser(usage="%prog [-D] -l 'https://tollgate.example.com'")
    parser.add_option('-D',
                      '--daemon',
                      action='store_true',
                      dest='daemon',
                      help='start as a daemon')
    parser.add_option('-l',
                      '--tollgate-uri',
                      dest='tollgate_uri',
                      metavar='URI',
                      help='root URI of tollgate frontend HTTPS server')
    parser.add_option(
        '-P',
        '--pid',
        dest='pid_file',
        default='/var/run/tollgate-captivity.pid',
        help=
        'Location to write the PID file.  Only has effect in daemon mode.  [default: %default]'
    )
    parser.add_option('-p',
                      '--port',
                      dest='port',
                      type='int',
                      metavar='PORT',
                      help='port of the tproxy service [default: %default]',
                      default=50080)
    parser.add_option(
        '-m',
        '--mark',
        dest='mark',
        type='int',
        metavar='MARK',
        help='TPROXY mark tag for this service [default: %default]',
        default=1)
    options, args = parser.parse_args()

    if not options.tollgate_uri:
        parser.error('A URI to the tollgate site is required.')

    if not options.port:
        parser.error('A port to listen on is required.')

    if options.port < 0 or options.port > 65535:
        parser.error('Port specified is invalid.')

    if not options.mark:
        parser.error('Mark tag is required.')

    if options.mark <= 0 or options.mark > 255:
        parser.error('Mark value is invalid.')

    if not options.pid_file and options.daemon:
        parser.error('No PID file specified and running in daemon mode!')

    server = TProxyServer(options.tollgate_uri, options.port, options.mark)

    if options.daemon:
        from daemon import daemonize
        daemonize(options.pid_file)

    server.run()
예제 #45
0
        return ""

class palette_png:
    def GET(self):
        web.header("Content-Type", "images/png")
        params = web.input()
        data = StringIO()
        colors_as_image(params.c.split(",")).save(data, 'png')
        data.seek(0)
        return data.read()
"""

if __name__ == "__main__":
    gflags.DEFINE_boolean('daemon', False, "is start in daemon mode?")
    gflags.DEFINE_boolean('webdebug', False, "is web.py debug")
    gflags.DEFINE_boolean('reload', False, "is web.py reload app")
    gflags.DEFINE_string('solr_host', 'sdl-guang-solr4', 'solr host')
    backup_args = []
    backup_args.extend(sys.argv)
    sys.argv = [sys.argv[0],] + sys.argv[2:]
    log_init('GuangLogger', "sqlalchemy.*")
    sys.argv = backup_args[:2]
    web.config.debug = FLAGS.webdebug
    if len(sys.argv) == 1:
        web.wsgi.runwsgi = lambda func, addr=None: web.wsgi.runfcgi(func, addr)
    if FLAGS.daemon:
        daemon.daemonize(os.path.join(file_path, 'solrweb.pid'))
    #render = web.template.render('templates/', base='layout')
    app = web.application(urls, globals(), autoreload=FLAGS.reload)
    app.run()
예제 #46
0
class ProxyServer():
    '''Class used to invoke a simple test HTTP Proxy server'''
    def __init__(self):
        self.proxy_port = 1337
        self.proxy_host = 'localhost'

        self.proxyd = None

    def run_proxy(self):
        '''Starts Instance of Proxy in a TCPServer'''
        #Setup Proxy in thread
        self.proxyd = SocketServer.TCPServer(
            (self.proxy_host, self.proxy_port), Proxy).serve_forever()
        # Start Proxy Process
        print("serving at port %s on PID %s " %
              (self.proxy_port, self.proxyd.pid))

    def get_proxy_url(self):
        return "http://%s:%s" % (self.proxy_host, self.proxy_port)


if __name__ == '__main__':
    import daemon

    daemon.daemonize()
    daemon.createPid()

    proxy = ProxyServer()
    proxy.run_proxy()
예제 #47
0
            sys.argv[0]),
              file=sys.stderr)
        raise SystemExit(1)

    app = sys.argv[2]
    PIDFILE = "/tmp/scan" + app + ".pid"

    if sys.argv[1] == 'start':
        logger.info("Receive start command for app: %s" % app)
        if len(sys.argv) != 4:
            print('Usage: {} [start] [appname] [msg]'.format(sys.argv[0]),
                  file=sys.stderr)
            raise SystemExit(1)
        try:
            daemonize(PIDFILE,
                      stdout='/tmp/scanfiles.log',
                      stderr='/tmp/scanfiles.log')
        except RuntimeError as e:
            print(e, file=sys.stderr)
            raise SystemExit(1)
        main(app, sys.argv[3])

    elif sys.argv[1] == 'stop':
        logger.info("Receive stop command for app: %s" % app)
        if os.path.exists(PIDFILE):
            #with open("%sthreadnum.txt"%outputdir,"wt") as f:
            #    print(0,file=f)
            with open(PIDFILE) as f:
                os.kill(int(f.read()), signal.SIGTERM)
        else:
            print('Not running', file=sys.stderr)
예제 #48
0
        # Have one global model for db query
        self.user_model = self.loader.use("user.model")
        self.topic_model = self.loader.use("topic.model")
        self.reply_model = self.loader.use("reply.model")
        self.plane_model = self.loader.use("plane.model")
        self.node_model = self.loader.use("node.model")
        self.notification_model = self.loader.use("notification.model")
        self.vote_model = self.loader.use("vote.model")
        self.favorite_model = self.loader.use("favorite.model")

        # Have one global session controller
        self.session_manager = SessionManager(settings["cookie_secret"],
                                              ["127.0.0.1:11211"], 0)

        # Have one global memcache controller
        self.mc = memcache.Client(["127.0.0.1:11211"])


def main():
    tornado.options.parse_command_line()
    http_server = tornado.httpserver.HTTPServer(Application())
    http_server.listen(options.port)
    tornado.ioloop.IOLoop.instance().start()


if __name__ == "__main__":
    daemon.daemonize(stdin='/dev/null',
                     stdout='output.log',
                     stderr='error.log')
    main()
예제 #49
0
    server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
    server.ehlo()
    server.login(email_user, email_password)
    server.send_message(message)
    server.close()


def find_by_id(object_id, file_name):
    with open(file_name, 'r') as db:
        lines = db.readlines()

    for line in lines:
        if line.strip('\n') == str(object_id):
            return True

    return False


daemon.daemonize('/home/ubuntu/python/twitter/log/log.in',
                 '/home/ubuntu/python/twitter/log/log.out',
                 '/home/ubuntu/python/twitter/log/log.err')

while True:
    try:
        check_new()
    except Exception as e:
        sys.stderr.write("{0}".format(e))

    time.sleep(60)
예제 #50
0
        self.write("http://120.76.190.105/" + mylogo)


def main():
    tornado.options.parse_command_line()
    application = tornado.web.Application(
        [
            (r"/", MainHandler),
            (r"/mdata", MdataHandler),
            (r"/ndata", NdataHandler),
            (r"/login", LoginHandler),
            (r"/minput", minputHandler),
            (r"/ninput", ninputHandler),
            (r"/tts", ttsHandler),
            (r"/tts_url", ttsurlHandler),
        ],
        template_path=os.path.join(os.path.dirname(__file__), "templates"),
        static_path=os.path.join(os.path.dirname(__file__), "static"),
        debug=True,
        cookie_secret="*****@*****.**",
        login_url="/login")

    http_server = tornado.httpserver.HTTPServer(application)
    http_server.listen(options.port)
    tornado.ioloop.IOLoop.current().start()


if __name__ == "__main__":
    daemon.daemonize("/tmp/cms.pid")
    main()
예제 #51
0
    #try to login
    while True:
        try:
            sync_printers(cups_connection, cpp)
            break
        except rest.REST.RESTException, e:
            #not a auth error
            if e.code != 403:
                raise
            #don't have a stored auth key
            if not cpp.get_saved_auth():
                raise
            #reset the stored auth
            cpp.set_auth('')

    printers = cpp.get_printers()

    if daemon:
        try:
            import daemon
        except ImportError:
            print 'daemon module required for -d'
            sys.exit(1)
        daemon.daemonize(pidfile)

    process_jobs(cups_connection, cpp, printers)


if __name__ == '__main__':
    main()
예제 #52
0
def main(options, args, EngineClass=StdEngine):
    """Prepare the main loop and run it. 

    Mostly consists of a bunch of high-level preparatory calls, protected
    by try blocks in the case of an exception."""

    # Set the logging facility.
    syslog.openlog('weewx', syslog.LOG_PID | syslog.LOG_CONS)

    # Set up the signal handlers.
    signal.signal(signal.SIGHUP, sigHUPhandler)
    signal.signal(signal.SIGTERM, sigTERMhandler)

    syslog.syslog(syslog.LOG_INFO,
                  "engine: Initializing weewx version %s" % weewx.__version__)
    syslog.syslog(syslog.LOG_INFO, "engine: Using Python %s" % sys.version)
    syslog.syslog(syslog.LOG_INFO, "engine: Platform %s" % platform.platform())

    # Save the current working directory. A service might
    # change it. In case of a restart, we need to change it back.
    cwd = os.getcwd()

    if options.daemon:
        syslog.syslog(syslog.LOG_INFO,
                      "engine: pid file is %s" % options.pidfile)
        daemon.daemonize(pidfile=options.pidfile)

    while True:

        os.chdir(cwd)

        config_path = os.path.abspath(args[0])
        config_dict = getConfiguration(config_path)

        # Look for the debug flag. If set, ask for extra logging
        weewx.debug = int(config_dict.get('debug', 0))
        if weewx.debug:
            syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))
        else:
            syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_INFO))

        try:
            syslog.syslog(syslog.LOG_DEBUG, "engine: Initializing engine")

            # Create and initialize the engine
            engine = EngineClass(config_dict)

            syslog.syslog(
                syslog.LOG_INFO,
                "engine: Starting up weewx version %s" % weewx.__version__)

            # Start the engine
            engine.run()

        # Catch any console initialization error:
        except InitializationError, e:
            # Log it:
            syslog.syslog(syslog.LOG_CRIT,
                          "engine: Unable to load driver: %s" % e)
            # See if we should loop, waiting for the console to be ready, or exit:
            if options.loop_on_init:
                syslog.syslog(syslog.LOG_CRIT,
                              "    ****  Waiting 60 seconds then retrying...")
                time.sleep(60)
                syslog.syslog(syslog.LOG_NOTICE, "engine: retrying...")
            else:
                syslog.syslog(syslog.LOG_CRIT, "    ****  Exiting...")
                sys.exit(weewx.IO_ERROR)

        # Catch any recoverable weewx I/O errors:
        except weewx.WeeWxIOError, e:
            # Caught an I/O error. Log it, wait 60 seconds, then try again
            syslog.syslog(syslog.LOG_CRIT,
                          "engine: Caught WeeWxIOError: %s" % e)
            if options.exit:
                syslog.syslog(syslog.LOG_CRIT, "    ****  Exiting...")
                sys.exit(weewx.IO_ERROR)
            syslog.syslog(syslog.LOG_CRIT,
                          "    ****  Waiting 60 seconds then retrying...")
            time.sleep(60)
            syslog.syslog(syslog.LOG_NOTICE, "engine: retrying...")
예제 #53
0
def main():
	global _global_myxordatastore
	global _global_manifestdict
	global _batchlock
	global _batchevent
	global _xorstrings
	global _batchrequests
	global _request_restart

	manifestdict = retrieve_manifest_dict()

	# We should detach here.   I don't do it earlier so that error
	# messages are written to the terminal...   I don't do it later so that any
	# threads don't exist already.   If I do put it much later, the code hangs...
	if _commandlineoptions.daemonize:
		daemon.daemonize()

	if _commandlineoptions.database != None:
		print("Using mmap datastore")
		dstype = "mmap"
		source = _commandlineoptions.database
	else:
		print("Using RAM datastore")
		dstype = "RAM"
		source = _commandlineoptions.files

	myxordatastore = fastsimplexordatastore.XORDatastore(manifestdict['blocksize'], manifestdict['blockcount'], dstype, source, _commandlineoptions.use_precomputed_data)

	if dstype == "RAM":
		# now let's put the content in the datastore in preparation to serve it
		print("Loading data into RAM datastore...")
		start = _timer()
		lib.populate_xordatastore(manifestdict, myxordatastore, source, dstype, _commandlineoptions.use_precomputed_data)
		elapsed = (_timer() - start)
		print("Datastore initialized. Took %f seconds." % elapsed)

	# we're now ready to handle clients!
	#_log('ready to start servers!')

	# an ugly hack, but Python's request handlers don't have an easy way to pass arguments
	_global_myxordatastore = myxordatastore
	_global_manifestdict = manifestdict
	_batchlock = threading.Lock()
	_batchevent = threading.Event()
	_batchrequests = 0
	_xorstrings = b''

	# first, let's fire up the RAID-PIR server
	xorserver = service_raidpir_clients(myxordatastore, _commandlineoptions.ip, _commandlineoptions.port)

	# If I should serve legacy clients via HTTP, let's start that up...
	if _commandlineoptions.http:
		service_http_clients(myxordatastore, manifestdict, _commandlineoptions.ip, _commandlineoptions.httpport)

	#_log('servers started!')
	print("Mirror Server started at", _commandlineoptions.ip, ":", _commandlineoptions.port)

	# let's send the mirror information periodically...
	# we should log any errors...
	_send_mirrorinfo()
	counter = 0

	while True:
		if counter > _commandlineoptions.mirrorlistadvertisedelay:
			counter = 0
			try:
				_send_mirrorinfo()
			except Exception as e:
				_log(str(e) + "\n" + str(traceback.format_tb(sys.exc_info()[2])))

		if _request_restart:
			print("Shutting down")
			xorserver.shutdown()
			sys.exit(0)

		counter = counter + 1
		time.sleep(1)
예제 #54
0
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import os
import sys
import time as ltime
import datetime
import daemon
import mysql
import commands
import json

if __name__ == "__main__":
    daemon.daemonize("/tmp/delive.pid")
    mydata = []
    basedir = os.path.abspath(os.path.dirname(__file__))
    filename = os.path.join(basedir, 'config')
    data = json.load(open(filename))
    for channel in data["all"]:
        gid = channel['Gid']
        times = channel['Times']
        one = {"gid": gid, "time": []}
        for time in times:
            start = datetime.datetime.strptime(time['StartTime'],
                                               "%Y-%m-%d %H:%M")
            end = datetime.datetime.strptime(time['EndTime'], "%Y-%m-%d %H:%M")
            one["time"].append({"startime": start, "endtime": end})
            mydata.append(one)

    while True:
        now = datetime.datetime.now()
        print now
예제 #55
0
def main(options, args, engine_class=StdEngine):
    """Prepare the main loop and run it. 

    Mostly consists of a bunch of high-level preparatory calls, protected
    by try blocks in the case of an exception."""

    # Set the logging facility.
    syslog.openlog(options.log_label, syslog.LOG_PID | syslog.LOG_CONS)

    # Set up the signal handlers.
    signal.signal(signal.SIGHUP, sigHUPhandler)
    signal.signal(signal.SIGTERM, sigTERMhandler)

    syslog.syslog(syslog.LOG_INFO,
                  "engine: Initializing weewx version %s" % weewx.__version__)
    syslog.syslog(syslog.LOG_INFO, "engine: Using Python %s" % sys.version)
    syslog.syslog(syslog.LOG_INFO, "engine: Platform %s" % platform.platform())
    syslog.syslog(syslog.LOG_INFO,
                  "engine: Locale is '%s'" % locale.setlocale(locale.LC_ALL))

    # Save the current working directory. A service might
    # change it. In case of a restart, we need to change it back.
    cwd = os.getcwd()

    # Get the path to the configuration file
    config_path = os.path.abspath(args[0])

    if options.daemon:
        syslog.syslog(syslog.LOG_INFO,
                      "engine: pid file is %s" % options.pidfile)
        daemon.daemonize(pidfile=options.pidfile)

    # For backward compatibility, recognize loop_on_init from command-line
    loop_on_init = options.loop_on_init

    # Make sure the system time is not out of date (a common problem with the Raspberry Pi).
    # Do this by making sure the system time is later than the creation time of the config file
    sane = os.stat(config_path).st_ctime

    n = 0
    while weewx.launchtime_ts < sane:
        # Log any problems every minute.
        if n % 120 == 0:
            syslog.syslog(
                syslog.LOG_INFO,
                "engine: Waiting for sane time. Current time is %s" %
                weeutil.weeutil.timestamp_to_string(weewx.launchtime_ts))
        n += 1
        time.sleep(0.5)
        weewx.launchtime_ts = time.time()

    while True:

        os.chdir(cwd)

        config_dict = getConfiguration(config_path)

        # Look for the debug flag. If set, ask for extra logging
        weewx.debug = int(config_dict.get('debug', 0))
        if weewx.debug:
            syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))
        else:
            syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_INFO))
        syslog.syslog(syslog.LOG_DEBUG, "engine: Debug is %s" % weewx.debug)

        # See if there is a loop_on_init directive in the configuration, but
        # use it only if nothing was specified via command-line.
        if loop_on_init is None:
            loop_on_init = to_bool(config_dict.get('loop_on_init', False))

        try:
            syslog.syslog(syslog.LOG_DEBUG, "engine: Initializing engine")

            # Create and initialize the engine
            engine = engine_class(config_dict)

            syslog.syslog(
                syslog.LOG_INFO,
                "engine: Starting up weewx version %s" % weewx.__version__)

            # Start the engine. It should run forever unless an exception
            # occurs. Log it if the function returns.
            engine.run()
            syslog.syslog(
                syslog.LOG_CRIT,
                "engine: Unexpected exit from main loop. Program exiting.")

        # Catch any console initialization error:
        except InitializationError as e:
            # Log it:
            syslog.syslog(syslog.LOG_CRIT,
                          "engine: Unable to load driver: %s" % e)
            # See if we should loop, waiting for the console to be ready.
            # Otherwise, just exit.
            if loop_on_init:
                syslog.syslog(syslog.LOG_CRIT,
                              "    ****  Waiting 60 seconds then retrying...")
                time.sleep(60)
                syslog.syslog(syslog.LOG_NOTICE, "engine: retrying...")
            else:
                syslog.syslog(syslog.LOG_CRIT, "    ****  Exiting...")
                sys.exit(weewx.IO_ERROR)

        # Catch any recoverable weewx I/O errors:
        except weewx.WeeWxIOError as e:
            # Caught an I/O error. Log it, wait 60 seconds, then try again
            syslog.syslog(syslog.LOG_CRIT,
                          "engine: Caught WeeWxIOError: %s" % e)
            if options.exit:
                syslog.syslog(syslog.LOG_CRIT, "    ****  Exiting...")
                sys.exit(weewx.IO_ERROR)
            syslog.syslog(syslog.LOG_CRIT,
                          "    ****  Waiting 60 seconds then retrying...")
            time.sleep(60)
            syslog.syslog(syslog.LOG_NOTICE, "engine: retrying...")

        except (weedb.CannotConnect, weedb.DisconnectError) as e:
            # No connection to the database server. Log it, wait 120 seconds, then try again
            syslog.syslog(syslog.LOG_CRIT,
                          "engine: Database connection exception: %s" % e)
            if options.exit:
                syslog.syslog(syslog.LOG_CRIT, "    ****  Exiting...")
                sys.exit(weewx.DB_ERROR)
            syslog.syslog(syslog.LOG_CRIT,
                          "    ****  Waiting 2 minutes then retrying...")
            time.sleep(120)
            syslog.syslog(syslog.LOG_NOTICE, "engine: retrying...")

        except weedb.OperationalError as e:
            # Caught a database error. Log it, wait 120 seconds, then try again
            syslog.syslog(
                syslog.LOG_CRIT,
                "engine: Database OperationalError exception: %s" % e)
            if options.exit:
                syslog.syslog(syslog.LOG_CRIT, "    ****  Exiting...")
                sys.exit(weewx.DB_ERROR)
            syslog.syslog(syslog.LOG_CRIT,
                          "    ****  Waiting 2 minutes then retrying...")
            time.sleep(120)
            syslog.syslog(syslog.LOG_NOTICE, "engine: retrying...")

        except OSError as e:
            # Caught an OS error. Log it, wait 10 seconds, then try again
            syslog.syslog(syslog.LOG_CRIT, "engine: Caught OSError: %s" % e)
            weeutil.weeutil.log_traceback("    ****  ", syslog.LOG_DEBUG)
            syslog.syslog(syslog.LOG_CRIT,
                          "    ****  Waiting 10 seconds then retrying...")
            time.sleep(10)
            syslog.syslog(syslog.LOG_NOTICE, "engine: retrying...")

        except Restart:
            syslog.syslog(syslog.LOG_NOTICE,
                          "engine: Received signal HUP. Restarting.")

        except Terminate:
            syslog.syslog(
                syslog.LOG_INFO,
                "engine: Terminating weewx version %s" % weewx.__version__)
            weeutil.weeutil.log_traceback("    ****  ", syslog.LOG_DEBUG)
            # Reraise the exception (this should cause the program to exit)
            raise

        # Catch any keyboard interrupts and log them
        except KeyboardInterrupt:
            syslog.syslog(syslog.LOG_CRIT, "engine: Keyboard interrupt.")
            # Reraise the exception (this should cause the program to exit)
            raise

        # Catch any non-recoverable errors. Log them, exit
        except Exception as ex:
            # Caught unrecoverable error. Log it, exit
            syslog.syslog(syslog.LOG_CRIT,
                          "engine: Caught unrecoverable exception in engine:")
            syslog.syslog(syslog.LOG_CRIT, "    ****  %s" % ex)
            # Include a stack traceback in the log:
            weeutil.weeutil.log_traceback("    ****  ", syslog.LOG_CRIT)
            syslog.syslog(syslog.LOG_CRIT, "    ****  Exiting.")
            # Reraise the exception (this should cause the program to exit)
            raise
예제 #56
0
def main():
    global _global_myxordatastore
    global _global_manifestdict
    global _batchlock
    global _batchevent
    global _xorstrings
    global _batchrequests
    global _request_restart

    manifestdict = retrieve_manifest_dict()

    # We should detach here.   I don't do it earlier so that error
    # messages are written to the terminal...   I don't do it later so that any
    # threads don't exist already.   If I do put it much later, the code hangs...
    if _commandlineoptions.daemonize:
        daemon.daemonize()

    if _commandlineoptions.database != None:
        print("Using mmap datastore")
        dstype = "mmap"
        source = _commandlineoptions.database
    else:
        print("Using RAM datastore")
        dstype = "RAM"
        source = _commandlineoptions.files

    myxordatastore = fastsimplexordatastore.XORDatastore(
        manifestdict['blocksize'], manifestdict['blockcount'], dstype, source,
        _commandlineoptions.use_precomputed_data)

    if dstype == "RAM":
        # now let's put the content in the datastore in preparation to serve it
        print("Loading data into RAM datastore...")
        start = _timer()
        lib.populate_xordatastore(manifestdict, myxordatastore, source, dstype,
                                  _commandlineoptions.use_precomputed_data)
        elapsed = (_timer() - start)
        print("Datastore initialized. Took %f seconds." % elapsed)

    # we're now ready to handle clients!
    #_log('ready to start servers!')

    # an ugly hack, but Python's request handlers don't have an easy way to pass arguments
    _global_myxordatastore = myxordatastore
    _global_manifestdict = manifestdict
    _batchlock = threading.Lock()
    _batchevent = threading.Event()
    _batchrequests = 0
    _xorstrings = b''

    # first, let's fire up the RAID-PIR server
    xorserver = service_raidpir_clients(myxordatastore, _commandlineoptions.ip,
                                        _commandlineoptions.port)

    # If I should serve legacy clients via HTTP, let's start that up...
    if _commandlineoptions.http:
        service_http_clients(myxordatastore, manifestdict,
                             _commandlineoptions.ip,
                             _commandlineoptions.httpport)

    #_log('servers started!')
    print("Mirror Server started at", _commandlineoptions.ip, ":",
          _commandlineoptions.port)

    # let's send the mirror information periodically...
    # we should log any errors...
    _send_mirrorinfo()
    counter = 0

    while True:
        if counter > _commandlineoptions.mirrorlistadvertisedelay:
            counter = 0
            try:
                _send_mirrorinfo()
            except Exception as e:
                _log(
                    str(e) + "\n" +
                    str(traceback.format_tb(sys.exc_info()[2])))

        if _request_restart:
            print("Shutting down")
            xorserver.shutdown()
            sys.exit(0)

        counter = counter + 1
        time.sleep(1)
예제 #57
0
def main():
  global configuration

  if not FOREGROUND:
    # Background ourselves.
    daemon.daemonize()


  # Check if we are running in testmode.
  if TEST_NM:
    nodemanager_pid = os.getpid()
    servicelogger.log("[INFO]: Running nodemanager in test mode on port <nodemanager_port>, "+
                      "pid %s." % str(nodemanager_pid))
    nodeman_pid_file = open(os.path.join(os.getcwd(), 'nodemanager.pid'), 'w')
    
    # Write out the pid of the nodemanager process that we started to a file.
    # This is only done if the nodemanager was started in test mode.
    try:
      nodeman_pid_file.write(str(nodemanager_pid))
    finally:
      nodeman_pid_file.close()

  else:
    # ensure that only one instance is running at a time...
    gotlock = runonce.getprocesslock("seattlenodemanager")

    if gotlock == True:
      # I got the lock.   All is well...
      pass
    else:
      if gotlock:
        servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + 
                        ") is running")
      else:
        servicelogger.log("[ERROR]:Another node manager process is running")
      return



  # Feature add for #1031: Log information about the system in the nm log...
  servicelogger.log('[INFO]:platform.python_version(): "' + 
    str(platform.python_version())+'"')
  servicelogger.log('[INFO]:platform.platform(): "' + 
    str(platform.platform())+'"')

  # uname on Android only yields 'Linux', let's be more specific.
  try:
    import android
    servicelogger.log('[INFO]:platform.uname(): Android / "' + 
      str(platform.uname())+'"')
  except ImportError:
    servicelogger.log('[INFO]:platform.uname(): "'+str(platform.uname())+'"')

  # I'll grab the necessary information first...
  servicelogger.log("[INFO]:Loading config")
  # BUG: Do this better?   Is this the right way to engineer this?
  configuration = persist.restore_object("nodeman.cfg")
  
  
  # Armon: initialize the network restrictions
  initialize_ip_interface_restrictions(configuration)
  
  
  
  # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new
  #            seattle crontab entry has been installed in the crontab.
  #            Do this here because the "nodeman.cfg" needs to have been read
  #            into configuration via the persist module.
  if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin':
    if 'crontab_updated_for_2009_installer' not in configuration or \
          configuration['crontab_updated_for_2009_installer'] == False:
      try:
        # crontab may not exist on Android, therefore let's not check
        # if we are running on Android. See #1302 and #1254.
        try:
          import android
        except ImportError:
          import update_crontab_entry
          modified_crontab_entry = \
              update_crontab_entry.modify_seattle_crontab_entry()
          # If updating the seattle crontab entry succeeded, then update the
          # 'crontab_updated_for_2009_installer' so the nodemanager no longer
          # tries to update the crontab entry when it starts up.
          if modified_crontab_entry:
            configuration['crontab_updated_for_2009_installer'] = True
            persist.commit_object(configuration,"nodeman.cfg")

      except Exception,e:
        exception_traceback_string = traceback.format_exc()
        servicelogger.log("[ERROR]: The following error occured when " \
                            + "modifying the crontab for the new 2009 " \
                            + "seattle crontab entry: " \
                            + exception_traceback_string)
예제 #58
0
파일: engine.py 프로젝트: ngulden/weewx
def main(options, args, engine_class=StdEngine):
    """Prepare the main loop and run it. 

    Mostly consists of a bunch of high-level preparatory calls, protected
    by try blocks in the case of an exception."""

    # Set the logging facility.
    syslog.openlog(options.log_label, syslog.LOG_PID | syslog.LOG_CONS)

    # Set up the signal handlers.
    signal.signal(signal.SIGHUP, sigHUPhandler)
    signal.signal(signal.SIGTERM, sigTERMhandler)

    syslog.syslog(syslog.LOG_INFO, "engine: Initializing weewx version %s" % weewx.__version__)
    syslog.syslog(syslog.LOG_INFO, "engine: Using Python %s" % sys.version)
    syslog.syslog(syslog.LOG_INFO, "engine: Platform %s" % platform.platform())

    # Save the current working directory. A service might
    # change it. In case of a restart, we need to change it back.
    cwd = os.getcwd()

    if options.daemon:
        syslog.syslog(syslog.LOG_INFO, "engine: pid file is %s" % options.pidfile)
        daemon.daemonize(pidfile=options.pidfile)

    # for backward compatibility, recognize loop_on_init from command-line
    loop_on_init = options.loop_on_init

    # be sure that the system has a reasonable time (at least 1 jan 2000).
    # log any problems every minute.
    n = 0
    while weewx.launchtime_ts < 946684800:
        if n % 120 == 0:
            syslog.syslog(syslog.LOG_INFO,
                          "engine: waiting for sane time.  current time is %s"
                          % weeutil.weeutil.timestamp_to_string(weewx.launchtime_ts))
        n += 1
        time.sleep(0.5)
        weewx.launchtime_ts = time.time()

    while True:

        os.chdir(cwd)

        config_path = os.path.abspath(args[0])
        config_dict = getConfiguration(config_path)

        # Look for the debug flag. If set, ask for extra logging
        weewx.debug = int(config_dict.get('debug', 0))
        if weewx.debug:
            syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))
        else:
            syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_INFO))

        # See if there is a loop_on_init directive in the configuration, but
        # use it only if nothing was specified via command-line.
        if loop_on_init is None:
            loop_on_init = to_bool(config_dict.get('loop_on_init', False))

        try:
            syslog.syslog(syslog.LOG_DEBUG, "engine: Initializing engine")

            # Create and initialize the engine
            engine = engine_class(config_dict)
    
            syslog.syslog(syslog.LOG_INFO, "engine: Starting up weewx version %s" % weewx.__version__)

            # Start the engine. It should run forever unless an exception
            # occurs. Log it if the function returns.
            engine.run()
            syslog.syslog(syslog.LOG_CRIT, "engine: Unexpected exit from main loop. Program exiting.")
    
        # Catch any console initialization error:
        except InitializationError, e:
            # Log it:
            syslog.syslog(syslog.LOG_CRIT, "engine: Unable to load driver: %s" % e)
            # See if we should loop, waiting for the console to be ready.
            # Otherwise, just exit.
            if loop_on_init:
                syslog.syslog(syslog.LOG_CRIT, "    ****  Waiting 60 seconds then retrying...")
                time.sleep(60)
                syslog.syslog(syslog.LOG_NOTICE, "engine: retrying...")
            else:
                syslog.syslog(syslog.LOG_CRIT, "    ****  Exiting...")
                sys.exit(weewx.IO_ERROR)

        # Catch any recoverable weewx I/O errors:
        except weewx.WeeWxIOError, e:
            # Caught an I/O error. Log it, wait 60 seconds, then try again
            syslog.syslog(syslog.LOG_CRIT, "engine: Caught WeeWxIOError: %s" % e)
            if options.exit:
                syslog.syslog(syslog.LOG_CRIT, "    ****  Exiting...")
                sys.exit(weewx.IO_ERROR)
            syslog.syslog(syslog.LOG_CRIT, "    ****  Waiting 60 seconds then retrying...")
            time.sleep(60)
            syslog.syslog(syslog.LOG_NOTICE, "engine: retrying...")