def __init__(self, argv): logging.basicConfig(filename=CB_LOGFILE, level=CB_LOGGING_LEVEL, format='%(asctime)s %(message)s') self.appClass = "none" # Should be overwritten by app self.cbFactory = {} self.adtInstances = [] self.doStop = False self.friendlyLookup = {} self.configured = False self.status = "ok" self.bridge_id = "unconfigured" if len(argv) < 3: logging.error("%s cbApp improper number of arguments", ModuleName) exit(1) managerSocket = argv[1] self.id = argv[2] logging.info("%s Hello from %s", ModuleName, self.id) procname.setprocname(self.id) initMsg = {"id": self.id, "type": "app", "status": "req-config"} self.managerFactory = CbClientFactory(self.processManager, initMsg) reactor.connectUNIX(managerSocket, self.managerFactory, timeout=2) reactor.callLater(TIME_TO_MONITOR_STATUS, self.sendStatus) reactor.run()
def __init__(self): procname.setprocname('cbsupervisor') logging.basicConfig(filename=CB_LOGFILE, level=CB_LOGGING_LEVEL, format='%(asctime)s %(levelname)s: %(message)s') logging.info( "%s ************************************************************", ModuleName) if not os.path.exists(CB_SOCKET_DIR): os.makedirs(CB_SOCKET_DIR) print("cbsupervisor, CB_RASPBERRY: {}, CB_DEV_APPS: {}".format( CB_RASPBERRY, CB_DEV_APPS)) self.connected = False self.checkingManager = False self.waitingToReconnect = False self.conduitConnectAttempt = 0 self.timeStamp = 0 self.managerPings = 0 self.timeChanged = False signal.signal(signal.SIGINT, self.signalHandler) # For catching SIGINT signal.signal(signal.SIGTERM, self.signalHandler) # For catching SIGTERM reactor.callLater(0.1, self.startConman) if CB_RASPBERRY: reactor.callInThread(self.iptables) reactor.callLater(1, self.startManager, False) #reactor.callLater(120, self.disconnectTest) reactor.run()
def main(): parser = argparse.ArgumentParser() # parser.add_argument('--data_dir', type=str, default='res/BioCreative2GM/train/', help='data directory') parser.add_argument('--data_dir', type=str, default='res/Pubmed/train/', help='data directory') parser.add_argument('--restore', type=str, default=None, help='ckpt file name') parser.add_argument('--save_dir', type=str, default='logs/pubmed/', help='ckpt file path') parser.add_argument('--batch_size', type=int, default=1000, help='data directory') parser.add_argument('--num_epochs', type=int, default=10, help='num_epoch') parser.add_argument('--rnn_size', type=int, default=100, help='output nodes of rnn') parser.add_argument('--embedding_dim', type=int, default=100, help='embedding dimension') # parser.add_argument('--class_size', type=int, default=3, help='class size') parser.add_argument('--class_size', type=int, default=17, help='class size') parser.add_argument('--save_every', type=int, default=100, help='save per iteration') parser.add_argument('--exp_code', type=str, default=None, help='Experiment code') parser.add_argument('--gpu', type=int, default=1, help='what gpu will you use?') args = parser.parse_args() if args.exp_code == None: print("YOU SHOULD INPUT EXP_CODE") exit() args.save_dir = os.path.join(args.save_dir, args.exp_code) if os.path.isdir(args.save_dir): print("INPUT EXP_CODE EXISTS ALREADY") exit() else: os.mkdir(args.save_dir) procname.setprocname("NER_Pubmed_TRAIN") os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) print(args) train(args)
def __init__(self): procname.setprocname(self.name) FastServer.__init__(self) #self._openLogFile(self._request_file) self.loadConfig() index = FastServerIndexResource(server=self) index.putChild('test', resources.TestResource(server=self)) tunnel = FastServerIndexResource(server=self) index.putChild('tunnel' , tunnel) tunnel.putChild('start', resources.SSHTunnelStartJsonResource(server=self)) tunnel.putChild('end', resources.SSHTunnelEndJsonResource(server=self)) tunnel.putChild('check', resources.SSHTunnelCheckJsonResource(server=self)) tunnel.putChild('list', resources.SSHTunnelListJsonResource(server=self)) twisted.web.server.Site.__init__(self, index) lp = LoopingCall(self.checkAndReconnectAll) lp.start(self.config.getint('main', 'check_period')) self.save_file = os.path.join(self.dir, 'config', 'save.dat') self.loadAllTunnels()
def __init__(self, argv): procname.setprocname('cbzwavectrl') self.status = "ok" self.state = "stopped" self.include = False self.exclude = False self.getting = False self.resetBoard = False self.getStrs = [] self.cbFactory = {} self.adaptors = [] self.found = [] self.listen = [] self.postToUrls = [] if len(argv) < 3: print("error, Improper number of arguments") exit(1) managerSocket = argv[1] self.id = argv[2] self.fromTime = str(int(time.time()) - 1) # Connection to manager initMsg = {"id": self.id, "type": "zwave", "status": "req-config"} self.managerFactory = CbClientFactory(self.onManagerMessage, initMsg) self.managerConnect = reactor.connectUNIX(managerSocket, self.managerFactory, timeout=10) reactor.run()
def main(): setprocname(basename(sys.argv[0])) config = Config() chdir(config["rootdir"]) logger = setup_logging(config) manager = Manager() Worker(channel="workers").register(manager) if config["debug"]: Debugger( logger=logger, events=config["verbose"], ).register(manager) Core(config).register(manager) try: manager.run() finally: config.save()
def main(): parser = argparse.ArgumentParser(description="Monitor your MySQL replication status") parser.add_argument("--config", help="Path to the replmon config file (default: {0})".format(DEFAULT_CNF_FILE), default=DEFAULT_CNF_FILE) args = parser.parse_args() parser = ConfigParser.ConfigParser() parser.read(args.config) mysql_args = dict(parser.items("mysql")) mon = Replmon(mysql_args) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler = logging.FileHandler(DEFAULT_LOG_FILE) file_handler.setFormatter(formatter) logger.addHandler(file_handler) with daemon.DaemonContext(pidfile=lockfile.pidlockfile.PIDLockFile(DEFAULT_PID_FILE), files_preserve=[file_handler.stream]): procname.setprocname("replmon") # noinspection PyBroadException try: mon.run() except Exception: logger.exception("An error occurred")
def main(): if PROCNAME: procname.setprocname("Gmail:main") users = get_users(DOMAIN) logger.info("Running with %s users and %s threads", len(users), MAX_THREADS) manager = multiprocessing.Manager() queue = manager.Queue() pool = multiprocessing.Pool(MAX_THREADS, runuser_init, [queue]) results = [] parameters = [] for item in users: parameters.append((item, queue)) runuser_init(queue) print runuser(item, queue) try: r = pool.map_async(runuser, parameters, callback=results.append) main_progressbar(len(users), queue) r.wait() except KeyboardInterrupt: r.terminate() r.wait()
def renameprocess(newname, debug=False): errors = [] # Renaming ourselves for ps et al. try: import procname procname.setprocname(newname) except: errors.append("Failed procname module") # Renaming ourselves for pkill et al. try: import ctypes # GNU/Linux style libc = ctypes.CDLL('libc.so.6') libc.prctl(15, newname, 0, 0, 0) except: errors.append("Failed GNU/Linux style") try: import dl # FreeBSD style libc = dl.open('/lib/libc.so.6') libc.call('setproctitle', newname + '\0') renamed = True except: errors.append("Failed FreeBSD style") if debug and errors: msg = [_("Errors occured while trying to change process name:")] for i in errors: msg.append("%s" % (i, )) log.addwarning('\n'.join(msg))
def main(): """ Entry point for the backdoor program. :return: """ # os.setuid(0) # os.seteuid(0) config = ConfigParser.ConfigParser() config.read('./bd.config') # initial setup stuff remote_host = config.get('Setup', 'rhost') local_host = config.get('Setup', 'lhost') source_port = config.get('Setup', 'sport') dest_port = config.get('Setup', 'dport') protocol = config.get('Setup', 'proto') encryption_key = config.get('Setup', 'enkey') # initial watches watches = config.get('Watches', 'paths').split(',') # hide the process name procname.setprocname('bash') listener = Listener(remote_host, local_host, source_port, dest_port, protocol, encryption_key, watches) try: listener.listen() except KeyboardInterrupt: pass
def renameprocess(newname, debug=False): errors = [] # Renaming ourselves for ps et al. try: import procname procname.setprocname(newname) except: errors.append("Failed procname module") # Renaming ourselves for pkill et al. try: import ctypes # Linux style libc = ctypes.CDLL("libc.so.6") libc.prctl(15, newname, 0, 0, 0) except: errors.append("Failed linux style") try: import dl # FreeBSD style libc = dl.open("/lib/libc.so.6") libc.call("setproctitle", newname + "\0") renamed = True except: errors.append("Failed FreeBSD style") if debug and errors: msg = [_("Errors occured while trying to change process name:")] for i in errors: msg.append("%s" % (i,)) log.addwarning("\n".join(msg))
def preview(udp_port, window_name=None): if window_name is not None: import procname # This is a hack to set the window title so we can embed it later. # A nicer way would be probably to create a new window and provide # the id when xvimagesink asks for it, but I'm feeling too lazy. # The procname has to be set before gst is imported procname.setprocname(window_name) # Pygst loves to grab the argv, so give the # greedy bastard nothing args, sys.argv = sys.argv, [] import gst sys.argv = args # TODO: Use hardware decoding when available (and working) pipe_str = ( "udpsrc uri=udp://0.0.0.0:%i " 'caps="application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264" !' "queue ! rtph264depay ! ffdec_h264 skip-frame=1 ! " "xvimagesink sync=false" % int(udp_port) ) pipeline = gst.parse_launch(pipe_str) pipeline.set_state(gst.STATE_PLAYING) try: gobject.MainLoop().run() except KeyboardInterrupt: pass pipeline.set_state(gst.STATE_NULL)
def procline(self, message): """Changes the current procname for the process. This can be used to make `ps -ef` output more readable. 修改当前进程名 """ setprocname('rq: %s' % (message,))
def setup_process_name(env): args = env['args'] endpoint = ' {0}://{1}:{2} '.format(args['transport'], args['bind'], args['port']) config = ' --config {0} '.format(args['config']) process_name = 'elevator' + endpoint + config procname.setprocname(process_name)
def run(self): if PROCNAME: procname.setprocname("Gmail:%s" % self.user_email) self.logger.info("Starting") usercredentials = self.impersonate_user() offlineimap_config = self.get_offlineimap_config( usercredentials.access_token) config_filename = "config_files/offlineimap_config_%s" % self.user_email open(config_filename, "w").write(offlineimap_config) command = [ "/home/gmailbackup/gmailbackup/offlineimap/build/lib.linux-x86_64-2.7/offlineimaprun", "-c", config_filename, "-q" ] p = subprocess.Popen(command, stderr=subprocess.PIPE) start = time.time() stats = {} total_processed = 0 total_messages = 0 while True: line = p.stderr.readline() if line == None or line == '': break matches = re.search( ' Copy message ([0-9]+) \(([0-9]+) of ([0-9]+)\) (.*)', line) if not matches: continue folder_messages = matches.group(3) folder = matches.group(4) total_processed += 1 if folder not in stats: stats[folder] = int(folder_messages) if self.queue: self.queue.put(["add_total", int(folder_messages)]) total_messages = sum(stats.values()) self.logger.info("%s messages (%s processed)", total_messages, total_processed) if self.queue: self.queue.put(["processed"]) ret = p.wait() end = time.time() elapsed = end - start msgs = total_processed / elapsed self.logger.info( "Finished with return code %s in %.2f seconds. Downloaded %s/%s messages. %.2f msg/s", ret, elapsed, total_processed, total_messages, msgs) # Subtract messages not processed from progress bar. if self.queue: self.queue.put(["missed", total_messages - total_processed]) self.queue.put(["finished_user"]) if PROCNAME: procname.setprocname("Gmail:null") return ret
def serv(self): procname.setprocname('sharer spec') while True: req = self.reqQueue.get() try: self.handleReq(req) except Exception as e: self.resQueue.put((None, None)) logger.error(str(e))
def serv(self): procname.setprocname('restarter') now = datetime.datetime.utcnow() ts = time.mktime(now.timetuple()) - 86400 * 7 startTime = datetime.datetime.fromtimestamp(ts) retry_max_times = 5 history_col = self.db.share_job_history try: while True: logger.debug(u"-------all 重试开始开始-------") share_jobs = self.col.find({ 'status': 'deferred', "create_at": { "$gte": startTime } }) for share_job in share_jobs: _id = share_job.get('_id', None) category = share_job.get('category', 'send') retry_times_key = category + '_retry_times' create_at = share_job.get('create_at') update_at = share_job.get('update_at') now = datetime.datetime.utcnow() current_ts = time.mktime(now.timetuple()) update_ts = time.mktime(update_at.timetuple()) if (current_ts - update_ts < 3600.): continue retry_times = share_job.get(retry_times_key, retry_max_times) if retry_times < retry_max_times: message = self.col.find_and_modify( query={'_id': _id}, update={ '$set': { 'status': 'pending', retry_times_key: retry_times + 1 } }, sort={'create_at': 1}) # logger.debug("Restart job!") if message is None: logger.debug('modify error!') time.sleep(100) except Exception as e: logger.error("Exception: " + str(e)) exc_type, exc_value, exc_traceback = sys.exc_info() logger.error( repr( traceback.format_exception(exc_type, exc_value, exc_traceback)))
def checkServer(): procname.setprocname('sharer check') port = '3011' urls = ('/login/check', 'loginCheck') app = web.application(urls, globals()) if len(sys.argv) > 1: sys.argv[1] = port else: sys.argv.append(port) app.run()
def serv(self): procname.setprocname('sharer log') try: while True: req = reqQueue.get() if req == None: continue try: self.handleReq(req) except Exception as e: pass except Exception as e: pass
def main(): parser = argparse.ArgumentParser() # parser.add_argument('--data_dir', type=str, default='res/BioCreative2GM/test/', help='data directory') parser.add_argument('--data_dir', type=str, default='res/Pubmed/test/', help='data directory') parser.add_argument('--restore', type=str, default='None', help='ckpt file path') parser.add_argument('--save_dir', type=str, default='logs/pubmed/', help='ckpt file path') parser.add_argument('--batch_size', type=int, default=1000, help='data directory') parser.add_argument('--num_epochs', type=int, default=10, help='num_epoch') parser.add_argument('--rnn_size', type=int, default=100, help='output nodes of rnn') parser.add_argument('--embedding_dim', type=int, default=100, help='embedding dimension') parser.add_argument('--class_size', type=int, default=17, help='class size') parser.add_argument('--exp_code', type=str, default=None, help='Experiment code') parser.add_argument('--gpu', type=int, default=1, help='what gpu will you use?') args = parser.parse_args() args.restore = get_checkpoint_path( os.path.join(args.save_dir, args.exp_code)) if not os.path.isfile(args.restore + ".index"): print(args.restore, "is invalid.") exit() procname.setprocname("NER_Pubmed_TEST") os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) print(args) test(args)
def run(self): if PROCNAME: procname.setprocname("Gmail:%s" % self.user_email) self.logger.info("Starting") usercredentials = self.impersonate_user() offlineimap_config = self.get_offlineimap_config(usercredentials.access_token) config_filename = "config_files/offlineimap_config_%s" % self.user_email open(config_filename, "w").write(offlineimap_config) command = ["/home/gmailbackup/gmailbackup/offlineimap/build/lib.linux-x86_64-2.7/offlineimaprun", "-c", config_filename, "-q"] p = subprocess.Popen(command, stderr=subprocess.PIPE) start = time.time() stats = {} total_processed = 0 total_messages = 0 while True: line = p.stderr.readline() if line == None or line == '': break matches = re.search(' Copy message ([0-9]+) \(([0-9]+) of ([0-9]+)\) (.*)', line) if not matches: continue folder_messages = matches.group(3) folder = matches.group(4) total_processed += 1 if folder not in stats: stats[folder] = int(folder_messages) if self.queue: self.queue.put(["add_total", int(folder_messages)]) total_messages = sum(stats.values()) self.logger.info("%s messages (%s processed)", total_messages, total_processed) if self.queue: self.queue.put(["processed"]) ret = p.wait() end = time.time() elapsed = end - start msgs = total_processed / elapsed self.logger.info("Finished with return code %s in %.2f seconds. Downloaded %s/%s messages. %.2f msg/s", ret, elapsed, total_processed, total_messages, msgs) # Subtract messages not processed from progress bar. if self.queue: self.queue.put(["missed", total_messages - total_processed]) self.queue.put(["finished_user"]) if PROCNAME: procname.setprocname("Gmail:null") return ret
def run(self): if not self.listen_to: raise ValueError("Cannot run a stage that lacks an input queue") logging.info("Starting %s", self.name) # Needs to set up after daemonization self.startup() try: import procname procname.setprocname(self.name) except ImportError: pass if self.args.pidfile: with open(self.args.pidfile, "w") as f: f.write(str(os.getpid())) self.task_channel = self.connection.channel() self.task_channel.basic_qos(prefetch_count=1) for action_cls in self.to_purge: task_queue = action_cls.get_queue(self.args.instance) self.task_channel.queue_declare(queue=task_queue) self.task_channel.queue_purge(queue=task_queue) logging.debug("Purged queue %s", task_queue) for action_cls in self.listen_to: task_queue = action_cls.get_queue(self.args.instance) self.task_channel.queue_declare(queue=task_queue) self.task_channel.basic_consume(self._task_wrapper_callback, queue=task_queue) logging.debug("Listening to queue %s", task_queue) try: self.task_channel.start_consuming() except KeyboardInterrupt: logging.error("Keyboard interrupt, shutting down..") except Exception as e: logging.exception("Unhandled exception, restarting stage") try: self.shutdown() except Exception as e: logging.exception("Exception in shutdown")
def run(self): if not self.listen_to: raise ValueError('Cannot run a stage that lacks an input queue') logging.info('Starting %s', self.name) # Needs to set up after daemonization self.startup() try: import procname procname.setprocname(self.name) except ImportError: pass if self.args.pidfile: with open(self.args.pidfile, 'w') as f: f.write(str(os.getpid())) self.task_channel = self.connection.channel() self.task_channel.basic_qos(prefetch_count=1) for action_cls in self.to_purge: task_queue = action_cls.get_queue(self.args.instance) self.task_channel.queue_declare(queue=task_queue) self.task_channel.queue_purge(queue=task_queue) logging.debug('Purged queue %s', task_queue) for action_cls in self.listen_to: task_queue = action_cls.get_queue(self.args.instance) self.task_channel.queue_declare(queue=task_queue) self.task_channel.basic_consume(self._task_wrapper_callback, queue=task_queue) logging.debug('Listening to queue %s', task_queue) try: self.task_channel.start_consuming() except KeyboardInterrupt: logging.error('Keyboard interrupt, shutting down..') except Exception, e: logging.exception('Unhandled exception, restarting stage')
def run_daemon(self): self.check_pid() self.setup_logging() self.prepare() import logging logging.getLogger('st.daemon').trace("on run_daemon") self.daemonize() self.change_working_dir() self.drop_privileges() self.write_pid() if self.process_name: try: import procname procname.setprocname(self.process_name) except ImportError: pass signal.signal(signal.SIGTERM, self._exit) # Terminate signal.signal(signal.SIGINT, self._exit) # Interrupt try: self.run() except KeyboardInterrupt: os.kill(self.__pid, signal.SIGTERM)
def main(): setprocname(basename(sys.argv[0])) config = Config() manager = Manager() Worker(channel="workerthreads").register(manager) Worker(channel="workerprocesses").register(manager) if config.get("debug"): Debugger( events=config.get("verbose"), file=config.get("errorlog") ).register(manager) if config.get("daemon"): manager += Daemon(config.get("pidfile")) Core(config).register(manager) manager.run()
def _internal_run(self, pidfile, purge_task_queue): logging.info('Starting %s', self.name) try: import procname procname.setprocname(self.name) except ImportError: pass with open(pidfile, 'w') as f: f.write(str(os.getpid())) # On error, restart this thread running = True while running: self.startup() self.task_channel = self.connection.channel() self.task_channel.queue_declare(queue=self.task_queue) self.task_channel.basic_qos(prefetch_count=1) if purge_task_queue: self.task_channel.queue_purge(queue=self.task_queue) self.task_channel.basic_consume( self._task_wrapper_callback, queue=self.task_queue) try: self.task_channel.start_consuming() except KeyboardInterrupt: logging.error('Keyboard interrupt, shutting down..') running = False except Exception, e: logging.exception('Unhandled exception, restarting stage') try: self.shutdown() except Exception, e: logging.exception('Exception in shutdown')
def main(): parser = argparse.ArgumentParser(description="Monitor your MySQL replication status") parser.add_argument("--config", help="Path to the replmon config file (default: {0})".format(DEFAULT_CNF_FILE), default=DEFAULT_CNF_FILE) args = parser.parse_args() parser = ConfigParser.ConfigParser() parser.read(args.config) mysql_args = dict(parser.items("mysql")) mon = Replmon(mysql_args, DEFAULT_STATUS_FILE, DEFAULT_CHECK_INTERVAL) try: user = parser.get("system", "user") except (ConfigParser.NoSectionError, ConfigParser.NoSectionError): uid, gid = os.getuid(), os.getgid() else: user_entry = pwd.getpwnam(user) uid, gid = user_entry.pw_uid, user_entry.pw_gid # Ensure we can touch the status file for dir in map(os.path.dirname, [DEFAULT_PID_FILE, DEFAULT_STATUS_FILE]): ensure_writable(dir, uid, gid) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler = logging.FileHandler(DEFAULT_LOG_FILE) file_handler.setFormatter(formatter) logger.addHandler(file_handler) with daemon.DaemonContext(pidfile=lockfile.pidlockfile.PIDLockFile(DEFAULT_PID_FILE), files_preserve=[file_handler.stream], uid=uid, gid=gid): procname.setprocname("replmon") # noinspection PyBroadException try: mon.run() except Exception: logger.exception("An error occurred")
def handler(self, req): """Special handler for WSGI.""" if req.role not in self.roles: return FCGI_UNKNOWN_ROLE, 0 # Mostly taken from example CGI gateway. environ = req.params environ.update(self.environ) environ['wsgi.version'] = (1,0) environ['wsgi.input'] = req.stdin if self._bindAddress is None: stderr = req.stderr else: stderr = TeeOutputStream((sys.stderr, req.stderr)) environ['wsgi.errors'] = stderr environ['wsgi.multithread'] = not isinstance(req, CGIRequest) and \ thread_available and self.multithreaded environ['wsgi.multiprocess'] = isinstance(req, CGIRequest) or \ self.multiprocess environ['wsgi.run_once'] = isinstance(req, CGIRequest) if environ.get('HTTPS', 'off') in ('on', '1'): environ['wsgi.url_scheme'] = 'https' else: environ['wsgi.url_scheme'] = 'http' self._sanitizeEnv(environ) try: remote_addr = environ.get('REMOTE_ADDR', '--NO REMOTE_ADDR SET--')[:22] host = environ.get('HTTP_HOST', '--NO HTTP_HOST SET--')[:30] uri = environ.get('REQUEST_URI', '--NO REQUEST_URI SET--')[:200] process_name = 'python manage.py runfcgi INFO: [[%s], [%s], [%s]]' % (remote_addr, host, uri) procname.setprocname(process_name) except Exception, e: print "-- error setting proc name: %s" % repr(e)
# Downsample to mono mono_vec = vec.sum(-1) / float(s.input_channels) self._spectrogram = np.fft.fft(mono_vec) #self.bandpassed = fft_bandpassfilter(self.spectrogram, 44010, 100, 20) #print random() def autostart(self): if not self.running: self.running = True self.start() atexit.register(self.quit) def quit(self): """ Shutdown the audio thread """ if self.running: self.running = False self.join() @property def spectrogram(self): self.autostart() return self._spectrogram procname.setprocname('looper') audio = AudioThread()
"Universal server black-boxing the communication with individual equipment" ) parser.add_argument( "worker", help="specify the Worker module you want to communicate with") parser.add_argument("-p", "--port", default=12345, type=int, help="specify the port at which you want to broadcast") parser.add_argument("-d", "--debug", action="store_true", help="enable debug messages") args = vars(parser.parse_args(sys.argv[1:])) import importlib Worker = importlib.import_module("MTWorker").Worker DEBUG = args["debug"] try: import procname procname.setprocname(args["worker"]) except: print "Procname module not found. Using default procname: python" server = Server(args["worker"], args["port"]) server.run()
def setup_process_name(config_file): config = ' -c {0} '.format(config_file) process_name = 'elevator' + config procname.setprocname(process_name)
if __name__ == "__main__": os.setpgrp() atexit.register(cleanup) conn = pymongo.Connection(config.mongoServer, tz_aware=True) db = conn['kanche'] col = db['share_job'] col.ensure_index([("category", pymongo.ASCENDING), ("update_pending", pymongo.ASCENDING), ("status", pymongo.ASCENDING)]) col.ensure_index([("category", pymongo.ASCENDING), ("remove_pending", pymongo.ASCENDING), ("status", pymongo.ASCENDING)]) history_col = db['share_job_history'] procname.setprocname('sharer') # checker.start() logserver = logger.LogServer() sessionServer = session.SessionServer() # restarter = restarter.Restarter() # che168Refresh = che168Refresh.che168Refresh() specServer = spec.SpecServer() counter = Counter() while True: try: loop_req(col, history_col, sessionServer, specServer, counter) except Exception as e: logger.error(str(e))
__license__ = 'GPLv3' __author__ = 'georgi.kolev_[at]_gmail.com' __name__ = 'carpcp-main' import logging from time import sleep from serial import Serial from thread import start_new_thread import carpcp_conf as conf import carpcp_lib as carlib from carpcp_can import CanMsg from carpcp_bmw import CarStatus try: from procname import setprocname setprocname(__name__) except: print('Warning: "import procname" failed :(') print('Warning: Skipping process rename!') def ser2sock(serial_dev, mcast_sock, car_status, can_msg, logger): """ serial-to-socket thread """ counter = 0 while 42: buffer = serial_dev.readline() if can_msg.parse(buffer.strip()): results = car_status.msg(can_msg) if results: counter += 1 for result in results:
#---------- # initialize output directory #---------- if options.outputDir == None: options.outputDir = "results/" + time.strftime("%Y-%m-%d-%H%M%S") if not os.path.exists(options.outputDir): os.makedirs(options.outputDir) #---------- # try to set the process name #---------- try: import procname procname.setprocname("train " + os.path.basename(options.outputDir.rstrip('/'))) except ImportError, ex: pass #---------- # setup logging #---------- logfname = os.path.join(options.outputDir, "train.log") if os.path.exists(logfname): # append logfile = open(logfname, "a") print >> logfile, "----------------------------------------------------------------------" else: logfile = open(logfname, "w") fouts = [sys.stdout, logfile]
import time import procname import sys script_prefix = "RECOVERY_" proc_name = script_prefix + sys.argv[1] print "Process name is %s" % proc_name procname.setprocname(proc_name) var = 1 while var == 1: time.sleep(10) print "Sleeping"
def main(): # configure server server = PServer((HOST, PORT), PomHandler) server.SetUp() procname.setprocname('PomoServer') # begin listening server_thread = threading.Thread(target=server.serve_forever) server_thread.setDaemon(True) server_thread.start() # configure Pomodoro functions q = TaskQueue() q.from_file(FILELOC) in_time = time.time() # When the block started isPlay = False t_block = WORK_TIME # Size of the block we are in suspend_ticks = 0 # The ticks elapsed before suspension print '\nWorking on ' + q[0].name while threading.active_count() > 1: if len(q.getQue()) == 0 and server.code == RUN: print 'No more tasks in queue! Suspending Server' server.code = SUSPEND if server.code == DONE: #if server.code in [set] use me! tasq = work_it(q, server.msg) if tasq is not None: print 'Starting play time...' isPlay = True t_block = PLAY_TIME + (WORK_TIME - int(in_time - time.time())) in_time = time.time() server.code = RUN elif server.code == RUN: if int(time.time() - in_time) > t_block - suspend_ticks: suspend_ticks = 0 if isPlay == False: tasq = work_it(q, PMsg(DONE, 'None')) # RUN OUT OF TASKS? print 'Starting play time...' isPlay = True t_block = PLAY_TIME in_time = time.time() else: print 'Play Time Over...' print 'Begin ' + q[0].name isPlay = False t_block = WORK_TIME in_time = time.time() elif server.code == RESUME: server.code = RUN print 'Resuming Pom Service...' + q[0].name + ' is current task.' in_time = time.time() isPlay = False elif server.code == CURRENT: print 'Current task: ' + q[0].name server.code = RUN elif server.code == SUSPEND: suspend_ticks = in_time - time.time() time.sleep(0.1) q.to_file(FILELOC) sys.exit(0)
class Soap2file(Daemon2): def __init__(self): Daemon2.__init__(self,'soap2file','main','hltd') #SOAPpy.Config.debug = 1 self._conf=hltdconf.hltdConf('/etc/hltd.conf') self._hostname = os.uname()[1] def checkEnabled(self): if self._conf.soap2file_port>0:return True return False def run(self): dem = demote.demote(self._conf.user) dem() server = SOAPpy.SOAPServer((self._hostname, self._conf.soap2file_port)) server.registerFunction(writeToFile) server.registerFunction(createDirectory) server.registerFunction(renamePath) server.serve_forever() if __name__ == "__main__": daemon = Soap2file() import procname procname.setprocname('soap2file') daemon.start()
es.elasticize_fu_complete(completed) self.infile.deleteFile(silent=True) self.stop() def elasticizeLS(self): ls = self.infile.ls es.flushLS(ls) self.infile.deleteFile(silent=True) if __name__ == "__main__": import procname procname.setprocname('elastic') conf=initConf() try:run_str = ' - RUN:'+sys.argv[1].zfill(conf.run_number_padding) except:run_str = '' logging.basicConfig(filename=os.path.join(conf.log_dir,"elastic.log"), level=conf.service_log_level, format='%(levelname)s:%(asctime)s - %(funcName)s'+run_str+' - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logger = logging.getLogger(os.path.basename(__file__)) #STDOUT AND ERR REDIRECTIONS sys.stderr = stdErrorLog() sys.stdout = stdOutLog()
def set_process_name(name): from procname import setprocname return setprocname(name)
def procline(self, message): """Changes the current procname for the process. This can be used to make `ps -ef` output more readable. """ setprocname('rq: {0}'.format(message))
#! /usr/bin/python # Import your application as: # from wsgi import application # Example: import os import procname from skyview.wsgi import application as skyview # Import CherryPy import cherrypy from cherrypy import wsgiserver procname.setprocname('skyview') public_path = os.path.abspath(os.path.dirname(__file__)) class Root(object): pass def make_static_config(static_dir_name): """ All custom static configurations are set here, since most are common, it makes sense to generate them just once. """ static_path = os.path.join('/', static_dir_name) path = os.path.join(public_path, static_dir_name) configuration = { '/':
#!/usr/bin/python import procname import subprocess import os import platform import sys import time import signal import shlex from get_version import get_version procname.setprocname('gnomescroll_server_py') current_process = None current_process_id = None def register_signals(): def signal_handler(sig, frame): global current_process global current_process_id if current_process is not None: print "Forwarding signal %d to process %d" % ( sig, current_process_id, ) current_process.send_signal(sig) print "Waiting for server process to exit..." current_process.wait()
except Exception as e: print fitwidth ( screenlen, "", str(e), "").encode('utf-8') sys.stdout.flush() #print traceback.format_exc() screenlen = 168; #screenlen = 90; if __name__ == "__main__": # open up listener fd procname.setprocname('mybar') pipe_read, pipe_write = os.pipe() signal.set_wakeup_fd(pipe_write) # Set up a signal to repeat every 1/3rd of a second fps = 1 signal.setitimer(signal.ITIMER_REAL, fps, fps) signal.signal(signal.SIGALRM, lambda x,y: None) signal.signal(signal.SIGIOT, lambda x,y: None) poller = select.epoll() poller.register(pipe_read, select.EPOLLIN)
def procline(self, message, prefix='rq: '): """Changes the current procname for the process. This can be used to make `ps -ef` output more readable. """ setprocname(prefix + message)
def waitFinish(self): self.finish=True self.join() def abortMerging(self): self.abort = True self.join() if __name__ == "__main__": import procname procname.setprocname('anelastic') conf=initConf() logging.basicConfig(filename=os.path.join(conf.log_dir,"anelastic.log"), level=conf.service_log_level, format='%(levelname)s:%(asctime)s - %(funcName)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logger = logging.getLogger(os.path.basename(__file__)) #STDOUT AND ERR REDIRECTIONS sys.stderr = stdErrorLog() sys.stdout = stdOutLog() eventQueue = Queue.Queue() dqmQueue = Queue.Queue()
def __init__(self): self.server = SSHTunnelServer() Daemon.__init__(self, pidfile='/var/run/%s-master.pid' % (self.name.lower()) ) procname.setprocname(self.name)
import socket import sys import subprocess import csv import procname import time from datetime import datetime import json import urllib import rasterio import numpy as np import os import math procname.setprocname("serverSide") ######################## VARIABLES ######################## outfile = "parsed_data.js" # Output file as a .geojson archive_path = "archive/archive_" + str(datetime.now()) + ".csv" radcap = 1 # Highest amount of radiation to scale color gradient interval = 5 # Time in seconds in between scans mapalt = 0 online = False elevationFile = 'srtm_14_04.tif' log = [] heatmapdata = [] #create a list of waypoints that has been visited finishedwp = set([ 0, 1, 3, 4, 5, 6, 7, 8, 15, 16, 18, 19, 20, 21, 22, 30, 31, 33, 34, 35, 36,
#exit if both checks are complete if check_es_complete==False: break #check every 10 seconds self.threadEvent.wait(10) def stop(self): self.stop = True self.threadEvent.set() if __name__ == "__main__": import procname procname.setprocname('elasticbu') conf=initConf(sys.argv[1]) logging.basicConfig(filename=os.path.join(conf.log_dir,"elasticbu.log"), level=conf.service_log_level, format='%(levelname)s:%(asctime)s - %(funcName)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logger = logging.getLogger(os.path.basename(__file__)) #STDOUT AND ERR REDIRECTIONS sys.stderr = stdErrorLog() sys.stdout = stdOutLog() eventQueue = Queue.Queue()
#!/usr/bin/env python3 import tty import shutil from sys import stderr, stdout, stdin, argv import unicodedata try: import pyperclip import textwrap paste = True except ImportError: paste = False try: import procname procname.setprocname('oneline') except ImportError: pass esc = '\x1b' # The escape character csi = esc + '[' # Control Sequence Introducer, used for terminal control sequences def sgr( n ): # Return a string that when printed will send a Select Graphic Rendition command to the terminal. n should be an integer indicating the display mode to select return (csi + str(n) + 'm') def with_sgr( n, string
#!/usr/bin/env python from django.core.management import execute_manager import imp import procname procname.setprocname('django_trader') try: imp.find_module('settings') # Assumed to be in the same directory. except ImportError: import sys sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__) sys.exit(1) import settings if __name__ == "__main__": execute_manager(settings)
def main(): slowctrl = SlowControl() procname.setprocname("slowcontrol") Process(target=slowctrl.bk_server, args=()).start() Process(target=slowctrl.lj_server, args=()).start()
self.threadEvent.wait(10) try: self.elastic_process.wait() except: pass def stop(self): self.stop = True self.threadEvent.set() if __name__ == "__main__": import procname procname.setprocname('elasticbu') conf = initConf(sys.argv[1]) logging.basicConfig( filename=os.path.join(conf.log_dir, "elasticbu.log"), level=conf.service_log_level, format='%(levelname)s:%(asctime)s - %(funcName)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logger = logging.getLogger(os.path.basename(__file__)) #STDOUT AND ERR REDIRECTIONS sys.stderr = stdErrorLog() sys.stdout = stdOutLog() eventQueue = Queue.Queue()
def procline(self, message): """Changes the current procname for the process. This can be used to make `ps -ef` output more readable. """ setprocname('rq: %s' % (message, ))
def waitCompletition(self): self.join() def waitFinish(self): self.finish = True self.join() def abortMerging(self): self.abort = True self.join() if __name__ == "__main__": import procname procname.setprocname('anelastic') conf = initConf() logging.basicConfig( filename=os.path.join(conf.log_dir, "anelastic.log"), level=conf.service_log_level, format='%(levelname)s:%(asctime)s - %(funcName)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logger = logging.getLogger(os.path.basename(__file__)) #STDOUT AND ERR REDIRECTIONS sys.stderr = stdErrorLog() sys.stdout = stdOutLog() eventQueue = Queue.Queue()