def starter(fpid): if fpid.find('.pid') == -1: fpid = fpid + '.pid' openlog(fpid.replace('.pid','')) fpid = os.getcwd()+'/'+fpid pidfile = fpid param = 'start' if len(sys.argv) > 1: param = sys.argv[1] if(param == 'kill'): os.unlink(fpid) os.system("killall -9 %s"%fpid.replace('.pid','')) print("killall -9 %s"%fpid.replace('.pid','')) exit(0) if param == "stop" or param == "restart": if os.path.isfile(fpid): f=open(fpid,'r') pid = f.readline() os.system("kill -9 %s" % (pid)) print("stoped %s" % (pid)) f.close() os.unlink(fpid) else: print "is already stopped" if(param == "restart"): param = "start" if(param == "stop"): exit(0) if(param == 'start'): if os.path.isfile(fpid): syslog(LOG_INFO, "second start") print "already started" exit(0) else: print("starting") daemon.createDaemon() signal.signal(signal.SIGTERM, cleanup) f=open(fpid,'w+') f.write(str(os.getpid())) f.close()
def main(argv=None): global options if argv is None: argv = sys.argv (options, args) = parser.parse_args() if options.daemon: daemon.createDaemon() loggerInit(options) while True: try: logging.debug("Creating RandomPlayList") r = RandomPlayList(doupdate=options.update, doclear=options.clear, nb_keeped=options.keep, nb_queued=options.enqueue, host=options.host, passwd=options.password, port=options.port, mpdconf=options.mpdconf, musicdir=options.musicdir, exclude=options.exclude, equiproba=options.equiproba) r.feed_mpd() except Exception, e: logging.debug(e) raise e
def main(options): if options.daemon: createDaemon(options.pidfile, options.logfile) logger = None if options.logfile: logger = logging.getLogger('bzcache') logger.setLevel(logging.DEBUG) handler = logging.FileHandler(options.logfile) logger.addHandler(handler) if options.pidfile is not None: fp = open(options.pidfile, "w") fp.write("%d\n" % os.getpid()) fp.close() handler = MessageHandler(options.es_server, logger) pulse = consumers.BugzillaConsumer(applabel='[email protected]|bz_monitor_' + socket.gethostname()) pulse.configure(topic="#", callback=handler.got_message, durable=options.durable) while True: try: handler.log('starting pulse listener') pulse.listen() except Exception, inst: handler.log('exception while listening for pulse messages: %s' % inst) time.sleep(600)
def main(): parser = optparse.OptionParser() parser.add_option('--pidfile', dest='pidfile', default='translator.pid', help='path to file for logging pid') parser.add_option('--logfile', dest='logfile', default='stdout.log', help='path to file for stdout logging') parser.add_option('--logdir', dest='logdir', default='logs', help='directory to store other log files') parser.add_option('--daemon', dest='daemon', action='store_true', help='run as daemon (posix only)') parser.add_option('--durable', dest='durable', action='store_true', default=False, help='register a durable queue') parser.add_option('--display-only', dest='display_only', action='store_true', default=False, help='only display build properties and don\'t add ' 'jobs to the queue') parser.add_option('--pulse-cfg', dest='pulse_cfg', default='', help='optional config file containing optional sections ' '[consumer] and [publisher] for nondefault Pulse ' 'configs') options, args = parser.parse_args() pulse_cfgs = {'consumer': None, 'publisher': None} if options.pulse_cfg: if not os.path.exists(options.pulse_cfg): print 'Config file does not exist!' return pulse_cfgfile = ConfigParser.ConfigParser() pulse_cfgfile.read(options.pulse_cfg) for section in pulse_cfgs.keys(): pulse_cfgs[section] = PulseConfiguration.read_from_config( pulse_cfgfile, section) if options.daemon: if os.access(options.logfile, os.F_OK): os.remove(options.logfile) createDaemon(options.pidfile, options.logfile) f = open(options.pidfile, 'w') f.write("%d\n" % os.getpid()) f.close() service = PulseBuildbotTranslator(durable=options.durable, logdir=options.logdir, display_only=options.display_only, consumer_cfg=pulse_cfgs['consumer'], publisher_cfg=pulse_cfgs['publisher']) service.start()
def main(): usage = "usage: pstream3d [options]" parser = OptionParser(usage) parser.add_option("--daemon", help="Run me as daemon", action="store_true", dest="daemon") parser.add_option("--ip", help="Ip where should i listen", dest="ip") parser.add_option("--port", help="Port where should i bind", dest="port") parser.add_option("--pid", help="Process pid", dest="pid") (options, args) = parser.parse_args() if options.daemon: ret= createDaemon() if options.pid: pid= os.getpid() f= open(options.pid, "w") f.write(str(pid)) f.close() if options.ip and options.port: srv= CreateServer(ip, port) else: srv= CreateServer() while(1): try: srv.GetRequestHandler().UpdateStatus() sleep(.01) except KeyboardInterrupt: srv.__del__() break
def main(): parser = optparse.OptionParser() parser.add_option('--pidfile', dest='pidfile', default='translator.pid', help='path to file for logging pid') parser.add_option('--logfile', dest='logfile', default='stdout.log', help='path to file for stdout logging') parser.add_option('--logdir', dest='logdir', default='logs', help='directory to store other log files') parser.add_option('--daemon', dest='daemon', action='store_true', help='run as daemon (posix only)') parser.add_option('--durable', dest='durable', action='store_true', default=False, help='register a durable queue') parser.add_option('--push-message', dest='message', help='path to file of a Pulse message to process') parser.add_option('--display-only', dest='display_only', action='store_true', default=False, help='only display build properties and don\'t add jobs to the queue') options, args = parser.parse_args() if options.daemon: if os.access(options.logfile, os.F_OK): os.remove(options.logfile) createDaemon(options.pidfile, options.logfile) f = open(options.pidfile, 'w') f.write("%d\n" % os.getpid()) f.close() service = PulseBuildbotTranslator(durable=options.durable, logdir=options.logdir, message=options.message, display_only=options.display_only) service.start()
def main(): try: if configuration.c.getCfgValueAsBool("createDaemon"): # detach from controlling tty and go to background daemon.createDaemon() # start the server port = configuration.c.getCfgValueAsInt("port") server = ThreadingHTTPServer(('', port), HttpServerHandler) # load motes motes.addAll() # start listening thread listenThread = threading.Thread(target = listenSerial) listenThread.start() # report ok and enter the main loop print("<remoteaccess>: started, listening to TCP port {}, serial baudrate {}".format( port, configuration.c.getCfgValueAsInt("baudrate"))) server.serve_forever() except Exception as e: print("<remoteaccess>: exception occurred:") print(e) print(traceback.format_exc()) return 1
def main(): try: if configuration.c.getCfgValueAsBool("createDaemon"): # detach from controlling tty and go to background daemon.createDaemon() # start the server port = configuration.c.getCfgValueAsInt("port") server = ThreadingHTTPServer(('', port), HttpServerHandler) # load motes motes.addAll() # start listening thread listenThread = threading.Thread(target=listenSerial) listenThread.start() # report ok and enter the main loop print( "<remoteaccess>: started, listening to TCP port {}, serial baudrate {}" .format(port, configuration.c.getCfgValueAsInt("baudrate"))) server.serve_forever() except Exception as e: print("<remoteaccess>: exception occurred:") print(e) print(traceback.format_exc()) return 1
def main(): try: if configuration.c.getCfgValueAsBool("createDaemon"): # detach from controlling tty and go to background daemon.createDaemon() # load users initalizeUsers() # start the server port = configuration.c.getCfgValueAsInt("port") server = ThreadingHTTPServer(('', port), HttpServerHandler) # load motes motes.addAll() # report ok and enter the main loop print("<http-server>: started, listening to TCP port {}, serial baudrate {}".format(port, configuration.c.getCfgValueAsInt("baudrate"))) server.serve_forever() except SystemExit: raise # XXX except Exception as e: print("<http-server>: exception occurred:") print(e) print(traceback.format_exc()) sys.exit(1)
def main(): try: configuration.setupPaths() if configuration.c.getCfgValueAsBool("createDaemon"): # detach from controlling tty and go to background daemon.createDaemon() # load users initalizeUsers() # start the server port = configuration.c.getCfgValueAsInt("port") server = ThreadingHTTPServer(('', port), HttpServerHandler) # load motes motes.addAll() # report ok and enter the main loop print("<http-server>: started, listening to TCP port {}, serial baudrate {}".format(port, configuration.c.getCfgValueAsInt("baudrate"))) server.serve_forever() except SystemExit: raise # XXX except Exception as e: print("<http-server>: exception occurred:") print(e) print(traceback.format_exc()) sys.exit(1)
def run (path, repository): #imports import daemon # daemonize pid = daemon.createDaemon() # spit pid try: file_pid = open( os.path.join( Config.PATH_TETHERBALL_PROC, repository ), 'w' ) file_pid.write( str(pid) ) file_pid.close() except Exception, e: # l = Logger(Config) # l.debug( "Failed to write pid into file: %s" ) n = Notifier( title=NOTIFIER_TITLE ) n.message( message=("Failed to write pid into file: %s" % e) )
preload='off') if args.gpulang == 'CUDA': source = ex.petsc.source(args.library, args.num, '.cu') else: source = ex.petsc.source( args.library, args.num, 'OpenCL.c' ) # Using the convention of OpenCL code residing in source files ending in 'OpenCL.c' (at least for snes/ex52) sizes = {} times = {} events = {} log = not args.daemon if args.daemon: import daemon print('Starting daemon') daemon.createDaemon('.') for run in args.runs: name, stropts = run.split('=', 1) opts = dict([ t if len(t) == 2 else (t[0], None) for t in [arg.split('=', 1) for arg in stropts.split(' ')] ]) if args.dmType == 'DMDA': sizes[name] = [] times[name] = [] events[name] = {} run_DMDA(ex, name, opts, args, sizes, times, events, log=log) elif args.dmType == 'DMComplex': sizes[name] = {} times[name] = {}
index += 1 master = sys.argv[index] elif arg == "-d": daemon_mode = True index += 1 if port == None: print "Usage: %s -d -i <id> -p <port> -m <host>:<port>" % sys.argv[0] sys.exit(1) if daemon_mode: # TODO: How to log this? Do we need to log this? # print "Daemonizing..." # sys.stdout.flush() retCode = daemon.createDaemon() # print "Starting Supervisor..." # sys.stdout.flush() # NOTE: Cannot do sys.stdout.flush() # Will have funny behavior... os.chdir(workdir) pid = os.getpid() fname = "supervisor-py-%d.log" % pid ### Daemon Stuff fname1 = "supervisor-sh-%d.log" % port fd = os.open(fname1, os.O_APPEND | os.O_WRONLY) # standard input (0)
def runCouchPotato(options, base_path, args): # Load settings from couchpotato.environment import Env settings = Env.get('settings') settings.setFile(options.config_file) # Create data dir if needed data_dir = os.path.expanduser(Env.setting('data_dir')) if data_dir == '': data_dir = os.path.join(base_path, '_data') if not os.path.isdir(data_dir): os.makedirs(data_dir) # Create logging dir log_dir = os.path.join(data_dir, 'logs'); if not os.path.isdir(log_dir): os.mkdir(log_dir) # Daemonize app if options.daemonize: createDaemon() # Register environment settings Env.set('uses_git', not options.git) Env.set('app_dir', base_path) Env.set('data_dir', data_dir) Env.set('log_path', os.path.join(log_dir, 'CouchPotato.log')) Env.set('db_path', 'sqlite:///' + os.path.join(data_dir, 'couchpotato.db')) Env.set('cache_dir', os.path.join(data_dir, 'cache')) Env.set('cache', FileSystemCache(os.path.join(Env.get('cache_dir'), 'python'))) Env.set('quiet', options.quiet) Env.set('daemonize', options.daemonize) Env.set('args', args) # Determine debug debug = options.debug or Env.setting('debug', default = False) Env.set('debug', debug) # Only run once when debugging if os.environ.get('WERKZEUG_RUN_MAIN') or not debug: # Logger logger = logging.getLogger() formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S') level = logging.DEBUG if debug else logging.INFO logger.setLevel(level) # To screen if debug and not options.quiet and not options.daemonize: hdlr = logging.StreamHandler(sys.stderr) hdlr.setFormatter(formatter) logger.addHandler(hdlr) # To file hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10) hdlr2.setFormatter(formatter) logger.addHandler(hdlr2) # Disable server access log server_log = logging.getLogger('werkzeug') server_log.disabled = True # Start logging from couchpotato.core.logger import CPLog log = CPLog(__name__) log.debug('Started with options %s' % options) # Load configs & plugins loader = Env.get('loader') loader.preload(root = base_path) loader.run() # Load migrations from migrate.versioning.api import version_control, db_version, version, upgrade db = Env.get('db_path') repo = os.path.join(base_path, 'couchpotato', 'core', 'migration') logging.getLogger('migrate').setLevel(logging.WARNING) # Disable logging for migration latest_db_version = version(repo) initialize = True try: current_db_version = db_version(db, repo) initialize = False except: version_control(db, repo, version = latest_db_version) current_db_version = db_version(db, repo) if current_db_version < latest_db_version and not debug: log.info('Doing database upgrade. From %d to %d' % (current_db_version, latest_db_version)) upgrade(db, repo) # Configure Database from couchpotato.core.settings.model import setup setup() fireEventAsync('app.load') if initialize: fireEventAsync('app.initialize') # Create app from couchpotato import app api_key = Env.setting('api_key') url_base = '/' + Env.setting('url_base').lstrip('/') if Env.setting('url_base') else '' reloader = debug and not options.daemonize # Basic config app.secret_key = api_key config = { 'use_reloader': reloader, 'host': Env.setting('host', default = '0.0.0.0'), 'port': Env.setting('port', default = 5000) } # Static path web.add_url_rule(url_base + '/static/<path:filename>', endpoint = 'static', view_func = app.send_static_file) # Register modules app.register_blueprint(web, url_prefix = '%s/' % url_base) app.register_blueprint(api, url_prefix = '%s/%s/' % (url_base, api_key)) # Go go go! app.run(**config)
def process_socket(options): """ Process socket connections. .. note:: This is not a multithreaded process. So only one connection can be handled at any given time. But given the nature of munin, this is Good Enough. """ retcode = 0 if options.no_daemon: # set up on-screen-logging console_handler = logging.StreamHandler(sys.stdout) console_handler.setFormatter(logging.Formatter(LOG_FORMAT)) logging.getLogger().addHandler(console_handler) else: # fork fork retcode = createDaemon() # set up a rotating file log rfhandler = RotatingFileHandler(join(options.log_dir, 'daemon.log'), maxBytes=100 * 1024, backupCount=5) rfhandler.setFormatter(logging.Formatter(LOG_FORMAT)) logging.getLogger().addHandler(rfhandler) # write down some house-keeping information LOG.info('New process PID: %d' % getpid()) pidfile = open(join(options.log_dir, 'pypmmn.pid'), 'w') pidfile.write(str(getpid())) pidfile.close() LOG.info('PID file created in %s' % join(options.log_dir, 'pypmmn.pid')) LOG.info('Socket handler started.') host = '' # listens on all addresses TODO: make this configurable port = int(options.port) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((host, port)) s.listen(1) LOG.info('Listening on host %r, port %r' % (host, port)) conn, addr = s.accept() handler = CmdHandler(conn.recv, conn.send, options) handler.do_version(None) handler.reset_time() LOG.info("Accepting incoming connection from %s" % (addr, )) while True: data = conn.recv(1024) if not data.strip(): sleep(1) if handler.is_timed_out(): LOG.info('Session timeout.') conn.shutdown(socket.SHUT_RDWR) conn.close() LOG.info('Listening on host %r, port %r' % (host, port)) conn, addr = s.accept() handler.reset_time() handler.get_fun = conn.recv handler.put_fun = conn.send handler.do_version(None) LOG.info("Accepting incoming connection from %s" % (addr, )) try: data = conn.recv(1024) except socket.error, exc: LOG.warning("Socket error. Reinitialising.: %s" % exc) conn, addr = s.accept() handler.reset_time() handler.get_fun = conn.recv handler.put_fun = conn.send handler.do_version(None) LOG.info("Accepting incoming connection from %s" % (addr, )) if data.strip() == 'quit': LOG.info('Client requested session end. Closing connection.') conn.shutdown(socket.SHUT_RDWR) conn.close() LOG.info('Listening on host %r, port %r' % (host, port)) conn, addr = s.accept() handler.reset_time() handler.get_fun = conn.recv handler.put_fun = conn.send handler.do_version(None) LOG.info("Accepting incoming connection from %s" % (addr, )) continue handler.handle_input(data)
def main(): """Initialization, configuration reading, start of server over sockets on given port""" global conf, log # TODO: Register signals # SIGINT, SIGPIPE?, others? signal.signal(signal.SIGINT, sigint_handler) # At this stage, logging on stdout log = logs.Logging() log.info("Starting server") conf = UserConfiguration(logger=log) # Create primary pid file create_pid_file() # Create global state global_state = GlobalState() if conf.mode == 'daemon': # Destroy pid file, will be re-created after fork destroy_pid_file() # Fork, detach pipes, chdir to '/' etc... daemon.createDaemon() create_pid_file() # Create server socket server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind(("", conf.port)) server_socket.listen(conf.max_simultaneous_connections) # Redirect logging to logfile log.init_stage2(conf) log.info("Configuration loaded") # Create pidfile log.info("Starting audio server") audio.port = conf.audio_port audio.host = conf.audio_host audio.init(logger=log, config=conf) log.info("Starting audio event delivery thread") audio_event_delivery_thread = threading.Thread( target=audio_event_delivery, name="Audio event delivery", kwargs={'global_state': global_state}) audio_event_delivery_thread.start() # Terminate and join this thread on exit() atexit.register(join_audio_event_delivery_thread, audio_event_delivery_thread) log.info("Waiting for connections") atexit.register(join_terminated_client_threads) while True: log.info("Waiting for connections") (client_socket, address) = server_socket.accept() join_terminated_client_threads() log.debug("Connection ready") client_provider = threading.Thread(target=serve_client, name="Provider (" + str(client_socket.fileno()) + ")", kwargs={ 'method': 'socket', 'socket': client_socket, 'global_state': global_state }) client_threads.append(client_provider) client_provider.start() log.info("Accepted new client, thread started")
class DenyHosts(object): def __init__(self, logfile, prefs, lock_file, ignore_offset=0, first_time=0, noemail=0, daemon=0, foreground=0): self.__denied_hosts = {} self.__prefs = prefs self.__lock_file = lock_file self.__first_time = first_time self.__noemail = noemail self.__report = Report(prefs.get("HOSTNAME_LOOKUP"), is_true(prefs['SYSLOG_REPORT'])) self.__daemon = daemon self.__foreground = foreground self.__sync_server = prefs.get('SYNC_SERVER') self.__sync_upload = is_true(prefs.get("SYNC_UPLOAD")) self.__sync_download = is_true(prefs.get("SYNC_DOWNLOAD")) self.__iptables = prefs.get("IPTABLES") self.__blockport = prefs.get("BLOCKPORT") self.__pfctl = prefs.get("PFCTL_PATH") self.__pftable = prefs.get("PF_TABLE") r = Restricted(prefs) self.__restricted = r.get_restricted() info("restricted: %s", self.__restricted) self.init_regex() try: self.file_tracker = FileTracker(self.__prefs.get('WORK_DIR'), logfile) except Exception, e: self.__lock_file.remove() die("Can't read: %s" % logfile, e) self.__allowed_hosts = AllowedHosts(self.__prefs) if ignore_offset: last_offset = 0 else: last_offset = self.file_tracker.get_offset() if last_offset is not None: self.get_denied_hosts() info("Processing log file (%s) from offset (%ld)", logfile, last_offset) offset = self.process_log(logfile, last_offset) if offset != last_offset: self.file_tracker.save_offset(offset) last_offset = offset elif not daemon: info("Log file size has not changed. Nothing to do.") if daemon and not foreground: info("launching DenyHosts daemon (version %s)..." % VERSION) #logging.getLogger().setLevel(logging.WARN) # remove lock file since createDaemon will # create a new pid. A new lock # will be created when runDaemon is invoked self.__lock_file.remove() retCode = createDaemon() if retCode == 0: self.runDaemon(logfile, last_offset) else: die("Error creating daemon: %s (%d)" % (retCode[1], retCode[0])) elif foreground: info("launching DenyHost (version %s)..." % VERSION) self.__lock_file.remove() self.runDaemon(logfile, last_offset)
def main(): usage = "usage: pstream3_client_lirc [options]" parser = OptionParser(usage) parser.add_option("--daemon", help="Run me as daemon", action="store_true", dest="daemon") parser.add_option("--ip", help="Ip where should i listen", dest="ip") parser.add_option("--port", help="Port where should i bind", dest="port") parser.add_option("--dump", help="Folder where should i dump", dest="dump") parser.add_option("--lirc-config", help="Path to lirc config", dest="lirc_config") parser.add_option("--pid", help="Process pid", dest="pid") (options, args) = parser.parse_args() if options.daemon: ret= createDaemon() if options.pid: pid= os.getpid() f= open(options.pid, "w") f.write(str(pid)) f.close() if options.ip and options.port: ip= options.ip port= options.port else: ip= "localhost" port= str(8400) client = xmlrpclib.ServerProxy("http://%s:%s/" % (ip, port)) h264_client= None blocking= 1 if options.lirc_config: lirc_path= options.lirc_config else: lirc_path="/etc/pstream/conf" if(pylirc.init("pylirc", lirc_path, blocking)): code= {"config": ""} while(code["config"] != "quit"): #Read next code s= pylirc.nextcode(1) #Right now i don't care what you press on remote. code["config"]= "start_stream" status= None if(code["config"] == "start_stream"): try: status=h264_client.GetAppRunStatus() except: status=None if (status!=AppStatus.RUNNING) or not status: print("Creating new app") h264= client.CreateApp("h264Stream") notifyCopyProcess= client.CreateApp("NotifyCopyProcess") notifyCopyProcess_client= xmlrpclib.ServerProxy("http://%s:%s/" % (ip,port) + str(notifyCopyProcess)) print h264, client.GetAppInstances() h264_client= xmlrpclib.ServerProxy("http://%s:%s/" % (ip,port) + str(h264)) h264_client.SetAppValue("auto_restart", 1) if options.dump: h264_client.SetAppValue("dumpfolder", options.dump) else: h264_client.SetAppValue("dumpfolder", "/home/recode/dump") h264_client.StartApp() else: print("Stoping stream") client.DestroyInstance(h264) client.DestroyInstance(notifyCopyProcess) continue while h264_client.GetAppRunStatus()!=AppStatus.RUNNING: print "here" sleep(.5) print("Next code") client.DestroyInstances() pylirc.exit()
def process_socket(options): """ Process socket connections. .. note:: This is not a multithreaded process. So only one connection can be handled at any given time. But given the nature of munin, this is Good Enough. """ retcode = 0 if options.no_daemon: # set up on-screen-logging console_handler = logging.StreamHandler(sys.stdout) console_handler.setFormatter(logging.Formatter(LOG_FORMAT)) logging.getLogger().addHandler(console_handler) else: # fork fork retcode = createDaemon() # set up a rotating file log rfhandler = RotatingFileHandler( join(options.log_dir, 'daemon.log'), maxBytes=100 * 1024, backupCount=5 ) rfhandler.setFormatter(logging.Formatter(LOG_FORMAT)) logging.getLogger().addHandler(rfhandler) # write down some house-keeping information LOG.info('New process PID: %d' % getpid()) pidfile = open(join(options.log_dir, 'pypmmn.pid'), 'w') pidfile.write(str(getpid())) pidfile.close() LOG.info('PID file created in %s' % join(options.log_dir, 'pypmmn.pid')) LOG.info('Socket handler started.') host = '' # listens on all addresses TODO: make this configurable port = int(options.port) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((host, port)) s.listen(1) LOG.info('Listening on host %r, port %r' % (host, port)) conn, addr = s.accept() handler = CmdHandler(conn.recv, conn.send, options) handler.do_version(None) handler.reset_time() LOG.info("Accepting incoming connection from %s" % (addr, )) while True: data = conn.recv(1024) if not data.strip(): sleep(1) if handler.is_timed_out(): LOG.info('Session timeout.') conn.shutdown(socket.SHUT_RDWR) conn.close() LOG.info('Listening on host %r, port %r' % (host, port)) conn, addr = s.accept() handler.reset_time() handler.get_fun = conn.recv handler.put_fun = conn.send handler.do_version(None) LOG.info("Accepting incoming connection from %s" % (addr, )) try: data = conn.recv(1024) except socket.error, exc: LOG.warning("Socket error. Reinitialising.: %s" % exc) conn, addr = s.accept() handler.reset_time() handler.get_fun = conn.recv handler.put_fun = conn.send handler.do_version(None) LOG.info("Accepting incoming connection from %s" % (addr, )) if data.strip() == 'quit': LOG.info('Client requested session end. Closing connection.') conn.shutdown(socket.SHUT_RDWR) conn.close() LOG.info('Listening on host %r, port %r' % (host, port)) conn, addr = s.accept() handler.reset_time() handler.get_fun = conn.recv handler.put_fun = conn.send handler.do_version(None) LOG.info("Accepting incoming connection from %s" % (addr, )) continue handler.handle_input(data)
img_links = saveImageUrls(html, url[0]) logging.debug('Saving document to DB') data = {'url':url[0], 'levels_deep': url[1], 'parent_url': url[2], 'job_id': url[3], 'img_links': img_links} db_handle.insert(data) db_handle.update({'job_id': url[3], 'keep_track_crawl':1}, {'$inc': {'crawl_count': 1}}, True) except: logging.error("Couldn't read HTML") self.queue_handle.task_done() time.sleep(1) # Main launches worker threads and # seeds the queue with initial URLS if __name__ == "__main__": daemon.createDaemon() args = sys.argv job_id = args[1] urls = [] for url in args[2:]: urls.append(url) logging.debug('Starting Main Crawl Thread with args %s %s' % (str(job_id), str(urls))) for i in range(num_fetch_threads): logging.debug('Creating worker thread %d' % i) try: worker = ProcessURL(i, url_queue) worker.setDaemon(True) worker.start() except: logging.debug('Error detected') logging.debug('Created worker thread %d' % i)
def main(): parser = optparse.OptionParser() parser.add_option('--pidfile', dest='pidfile', default='translator.pid', help='path to file for logging pid') parser.add_option('--logfile', dest='logfile', default='stdout.log', help='path to file for stdout logging') parser.add_option('--logdir', dest='logdir', default='logs', help='directory to store other log files') parser.add_option('--daemon', dest='daemon', action='store_true', help='run as daemon (posix only)') parser.add_option('--durable', dest='durable', action='store_true', default=False, help='register a durable queue') parser.add_option('--display-only', dest='display_only', action='store_true', default=False, help='only display build properties and don\'t add ' 'jobs to the queue') parser.add_option('--pulse-cfg', dest='pulse_cfg', default='', help='optional config file containing optional sections ' '[consumer] and [publisher] for nondefault Pulse ' 'configs') parser.add_option('--push-message', dest='message', help='path to file of a Pulse message to process') parser.add_option('--label', dest='label', help='label to use for pulse queue') options, args = parser.parse_args() pulse_cfgs = {'consumer': None, 'publisher': None} if options.pulse_cfg: if not os.path.exists(options.pulse_cfg): print 'Config file does not exist!' return pulse_cfgfile = ConfigParser.ConfigParser() pulse_cfgfile.read(options.pulse_cfg) for section in pulse_cfgs.keys(): pulse_cfgs[section] = PulseConfiguration.read_from_config( pulse_cfgfile, section) if os.environ.get('pulseuser'): setattr(pulse_cfgs['consumer'], 'user', os.environ['pulseuser']) setattr(pulse_cfgs['publisher'], 'user', os.environ['pulseuser']) if os.environ.get('pulsepassword'): setattr(pulse_cfgs['consumer'], 'password', os.environ['pulsepassword']) setattr(pulse_cfgs['publisher'], 'password', os.environ['pulsepassword']) if options.daemon: if os.access(options.logfile, os.F_OK): os.remove(options.logfile) createDaemon(options.pidfile, options.logfile) f = open(options.pidfile, 'w') f.write("%d\n" % os.getpid()) f.close() service = PulseBuildbotTranslator(durable=options.durable, logdir=options.logdir, message=options.message, label=options.label, display_only=options.display_only, consumer_cfg=pulse_cfgs['consumer'], publisher_cfg=pulse_cfgs['publisher']) service.start()
print "naoqi = %s" % naoqi print "ip = %s" % ip print "port = %s" % port print if '--bodyposition' in sys.argv: import bodyposition bodyposition.read_until_ctrl_c() else: print "you can use various switches to test the nao:" print default_help() + ' [--bodyposition]' print "--bodyposition - enter an endless loop printing various sensors (good for testing)" # We will daemonize at this point? before anything else: if '--daemonize' in sys.argv: import daemon retcode = daemon.createDaemon() # must be the first import - you can only import naoqi after this from base import * from options import * import burst_target as target # Finally we load the networking/ipc library to connect to naoqi which does # actually talk to the hardware. We have two implementations and two development # hosts, namely 64 bit and 32bit, so this gets a little complex. from burst_util import is64 import sys using_pynaoqi = False
espmlDocObject = minidom.parseString(espmlString) logging.debug(espmlDocObject.toxml()) return "OK" # There will probably be several List functions that take different parameters. # So this list will be expanded as we go on. def listSystems(self, locationString): """ Returns a list of systems in the database to the client. """ logging.debug(locationString) locationDocument = location.parseString(locationString) result = self._db.listSystems(locationDocument) logging.debug(result) return result if __name__ == '__main__': # daemonize the registry daemon.createDaemon(logging, '/tmp/', pidfile='/var/run/esp_registry.pid', gid=1000, uid=1000) # start the soap registry server registry = Registry(8080) registry.start()
def runCouchPotato(options, base_path, args): # Load settings from couchpotato.environment import Env settings = Env.get('settings') settings.setFile(options.config_file) # Create data dir if needed data_dir = os.path.expanduser(Env.setting('data_dir')) if data_dir == '': data_dir = os.path.join(base_path, '_data') if not os.path.isdir(data_dir): os.makedirs(data_dir) # Create logging dir log_dir = os.path.join(data_dir, 'logs') if not os.path.isdir(log_dir): os.mkdir(log_dir) # Daemonize app if options.daemonize: createDaemon() # Register environment settings Env.set('uses_git', not options.git) Env.set('app_dir', base_path) Env.set('data_dir', data_dir) Env.set('log_path', os.path.join(log_dir, 'CouchPotato.log')) Env.set('db_path', 'sqlite:///' + os.path.join(data_dir, 'couchpotato.db')) Env.set('cache_dir', os.path.join(data_dir, 'cache')) Env.set('cache', FileSystemCache(os.path.join(Env.get('cache_dir'), 'python'))) Env.set('quiet', options.quiet) Env.set('daemonize', options.daemonize) Env.set('args', args) # Determine debug debug = options.debug or Env.setting('debug', default=False) Env.set('debug', debug) # Only run once when debugging if os.environ.get('WERKZEUG_RUN_MAIN') or not debug: # Logger logger = logging.getLogger() formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S') level = logging.DEBUG if debug else logging.INFO logger.setLevel(level) # To screen if debug and not options.quiet and not options.daemonize: hdlr = logging.StreamHandler(sys.stderr) hdlr.setFormatter(formatter) logger.addHandler(hdlr) # To file hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10) hdlr2.setFormatter(formatter) logger.addHandler(hdlr2) # Disable server access log server_log = logging.getLogger('werkzeug') server_log.disabled = True # Start logging from couchpotato.core.logger import CPLog log = CPLog(__name__) log.debug('Started with options %s' % options) # Load configs & plugins loader = Env.get('loader') loader.preload(root=base_path) loader.run() # Load migrations from migrate.versioning.api import version_control, db_version, version, upgrade db = Env.get('db_path') repo = os.path.join(base_path, 'couchpotato', 'core', 'migration') logging.getLogger('migrate').setLevel( logging.WARNING) # Disable logging for migration latest_db_version = version(repo) initialize = True try: current_db_version = db_version(db, repo) initialize = False except: version_control(db, repo, version=latest_db_version) current_db_version = db_version(db, repo) if current_db_version < latest_db_version and not debug: log.info('Doing database upgrade. From %d to %d' % (current_db_version, latest_db_version)) upgrade(db, repo) # Configure Database from couchpotato.core.settings.model import setup setup() fireEventAsync('app.load') if initialize: fireEventAsync('app.initialize') # Create app from couchpotato import app api_key = Env.setting('api_key') url_base = '/' + Env.setting('url_base').lstrip('/') if Env.setting( 'url_base') else '' reloader = debug and not options.daemonize # Basic config app.secret_key = api_key config = { 'use_reloader': reloader, 'host': Env.setting('host', default='0.0.0.0'), 'port': Env.setting('port', default=5000) } # Static path web.add_url_rule(url_base + '/static/<path:filename>', endpoint='static', view_func=app.send_static_file) # Register modules app.register_blueprint(web, url_prefix='%s/' % url_base) app.register_blueprint(api, url_prefix='%s/%s/' % (url_base, api_key)) # Go go go! app.run(**config)
def run(self): print ' start game server' G.getGameServer().serve_forever() class AssistThread(threading.Thread): def run(self): print ' start assist server' openApi = OpenAPIV3(config.appId, decrypt(config.appKey, config.commKey), config.qqIpLists) G.getAssistServer().setOpenApi(openApi) G.getAssistServer().serve_forever() class ProxySvr: def _createServers(self): G.setGameServer(GameSvrServer(config.gameServerAddr)) G.setAssistServer(AssistServer()) def start(self): print 'start proxy server ... ' self._createServers() GameSvrServerThread().start() AssistThread().start() if __name__ == '__main__': daemon.createDaemon() ProxySvr().start()
""" Updates the system described in teh ESPML document in the database. """ espmlDocObject = minidom.parseString(espmlString) logging.debug(espmlDocObject.toxml()) return "OK" # There will probably be several List functions that take different parameters. # So this list will be expanded as we go on. def listSystems(self, locationString): """ Returns a list of systems in the database to the client. """ logging.debug(locationString) locationDocument = location.parseString(locationString) result = self._db.listSystems(locationDocument) logging.debug(result) return result if __name__ == '__main__': # daemonize the registry daemon.createDaemon(logging, '/tmp/', pidfile='/var/run/esp_registry.pid', gid=1000, uid=1000) # start the soap registry server registry = Registry(8080) registry.start()
if hasattr(args, 'comp'): args.dmType = 'DMDA' else: args.dmType = 'DMComplex' ex = PETScExample(args.library, args.num, log_summary='summary.dat', log_summary_python = None if args.batch else args.module+'.py', preload='off') source = ex.petsc.source(args.library, args.num) sizes = {} times = {} events = {} log = not args.daemon if args.daemon: import daemon print 'Starting daemon' daemon.createDaemon('.') for run in args.runs: name, stropts = run.split('=', 1) opts = dict([t if len(t) == 2 else (t[0], None) for t in [arg.split('=', 1) for arg in stropts.split(' ')]]) if args.dmType == 'DMDA': sizes[name] = [] times[name] = [] events[name] = {} run_DMDA(ex, name, opts, args, sizes, times, events, log=log) elif args.dmType == 'DMComplex': sizes[name] = {} times[name] = {} events[name] = {} run_DMComplex(ex, name, opts, args, sizes, times, events, log=log) outputData(sizes, times, events)
elif action == "del": pid = web.input(pid=None).pid model.del_pattern(pid) return self.patlist() def patlist(self): patterns = [{"name": pat.name, "pattern": pat.pattern, "pid": pat.pid, "cid": pat.cid} for pat in model.get_pattern()] return snippet.patlist(model.get_category(), patterns) class Email: def GET(self, action): emails = model.get_maillist() return render.maillist(emails) def POST(self, action): if action == "del": email = web.input(email=None).email model.del_mail(email) return render.maillist(model.get_maillist()) app = web.application(urls, globals()) if __name__ == '__main__': createDaemon() app.run()