def test_obfuscate_process_password(): original_title = setproctitle.getproctitle() setproctitle.setproctitle("pgcli user=root password=secret host=localhost") obfuscate_process_password() title = setproctitle.getproctitle() expected = "pgcli user=root password=xxxx host=localhost" assert title == expected setproctitle.setproctitle("pgcli user=root password=top secret host=localhost") obfuscate_process_password() title = setproctitle.getproctitle() expected = "pgcli user=root password=xxxx host=localhost" assert title == expected setproctitle.setproctitle("pgcli user=root password=top secret") obfuscate_process_password() title = setproctitle.getproctitle() expected = "pgcli user=root password=xxxx" assert title == expected setproctitle.setproctitle("pgcli postgres://root:secret@localhost/db") obfuscate_process_password() title = setproctitle.getproctitle() expected = "pgcli postgres://root:xxxx@localhost/db" assert title == expected setproctitle.setproctitle(original_title)
def _save_process_pid(self): """ Changes the progress title adding a -PID_%PID parameter """ if sys.platform.startswith('linux2'): import setproctitle title = setproctitle.getproctitle() setproctitle.setproctitle("%s -PID_%s" % (title, os.getpid())) new_title = setproctitle.getproctitle() self.logger.info("Current proc title is %s" % new_title)
def main(): print(str.format( '[-] Current process name: {}', setproctitle.getproctitle() )) setproctitle.setproctitle('arheo_process') print(str.format( '[-] After change process name: {}', setproctitle.getproctitle() )) time.sleep(10)
def test_lifecycle(self): t = setproctitle.getproctitle() try: p = progress.Proctitle() self.assertEqual(t, setproctitle.getproctitle()) p('') self.assertEqual('bigitrd ', setproctitle.getproctitle()) p('foo') self.assertEqual('bigitrd foo', setproctitle.getproctitle()) finally: del p self.assertEqual(t, setproctitle.getproctitle())
def __init__(self, configfile): # Initialize Logging self.log = logging.getLogger('diamond') # Initialize Members self.configfile = configfile self.config = None # We do this weird process title swap around to get the sync manager # title correct for ps if setproctitle: oldproctitle = getproctitle() setproctitle('%s - SyncManager' % getproctitle()) if setproctitle: setproctitle(oldproctitle)
def periodic_aggregated_stats_logger(cls): hostname = socket.gethostname() service_name = '_'.join(setproctitle.getproctitle().split('_')[:-1]) logd = cls._stats.to_dict() logs = [] for server_type in ['http', 'tcp']: try: server_type_d = logd['sub'][server_type]['sub'] except KeyError: continue for k, v in server_type_d.items(): d = dict({ 'method': k, 'server_type': server_type, 'hostname': hostname, 'service_name': service_name, 'average_response_time': v['average'], 'total_request_count': v['count'], 'success_count': v['success_count'] }) for k2, v2 in v['sub'].items(): d['CODE_{}'.format(k2)] = v2['count'] logs.append(d) _logger = logging.getLogger('stats') for logd in logs: _logger.info(dict(logd)) asyncio.get_event_loop().call_later(300, cls.periodic_aggregated_stats_logger)
def title(cls, message=None): '''Set the title of the process''' if message == None: return getproctitle() else: setproctitle('qless-py-worker %s' % message) logger.info(message)
def periodic_aggregated_stats_logger(cls): hostname = socket.gethostname() service_name = "_".join(setproctitle.getproctitle().split("_")[:-1]) logd = cls._stats.to_dict() logs = [] for server_type in ["http", "tcp"]: try: server_type_d = logd["sub"][server_type]["sub"] except KeyError: continue for k, v in server_type_d.items(): d = dict( { "method": k, "server_type": server_type, "hostname": hostname, "service_name": service_name, "average_response_time": v["average"], "total_request_count": v["count"], "success_count": v["success_count"], } ) for k2, v2 in v["sub"].items(): d["CODE_{}".format(k2)] = v2["count"] logs.append(d) _logger = logging.getLogger("stats") for logd in logs: _logger.info(dict(logd)) asyncio.get_event_loop().call_later(300, cls.periodic_aggregated_stats_logger)
def _log(level, relatedobjects, message): # Determine the name of the calling method filename = traceback.extract_stack()[-3].filename fmatch = FILENAME_RE.match(filename) if fmatch: filename = fmatch.group(1) caller = '%s:%s:%s()' % (filename, traceback.extract_stack()[-3].lineno, traceback.extract_stack()[-3].name) # Build a structured log line log_ctx = LOG.withPrefix('%s[%s]' % (setproctitle.getproctitle(), os.getpid())) fields = {'method': caller} generic_counter = 1 if relatedobjects: for obj in relatedobjects: try: n, v = obj.get_describing_tuple() fields[n] = v except Exception: fields['generic-%s' % generic_counter] = str(obj) generic_counter += 1 # Actually log log_ctx.withFields(fields).__getattribute__(level)(message)
def start_analysis(self, name): datasource_record = session.query(Datasource).filter_by(company_id=self.company_id, name=name).first() if datasource_record.analysis is not None: return None semaphor_record = session.query(Semaphor).filter_by(company_id=self.company_id, entity_id=datasource_record.id, entity_type='datasource').first() if semaphor_record is None: semaphor_record = Semaphor(company_id=self.company_id, entity_id=datasource_record.id, entity_type='datasource', action='write') session.add(semaphor_record) session.commit() else: return try: try: original_process_title = setproctitle.getproctitle() setproctitle.setproctitle('mindsdb_native_process') except Exception: pass analysis = self.mindsdb_native.analyse_dataset(self.get_datasource_obj(name, raw=True)) datasource_record = session.query(Datasource).filter_by(company_id=self.company_id, name=name).first() datasource_record.analysis = json.dumps(analysis) session.commit() try: setproctitle.setproctitle(original_process_title) except Exception: pass except Exception as e: log.error(e) finally: semaphor_record = session.query(Semaphor).filter_by(company_id=self.company_id, entity_id=datasource_record.id, entity_type='datasource').first() session.delete(semaphor_record) session.commit()
def __init__( self, function_name, traceback_str, cause, proctitle=None, pid=None, ip=None, actor_repr=None, ): """Initialize a RayTaskError.""" import ray # BaseException implements a __reduce__ method that returns # a tuple with the type and the value of self.args. # https://stackoverflow.com/a/49715949/2213289 self.args = (function_name, traceback_str, cause, proctitle, pid, ip) if proctitle: self.proctitle = proctitle else: self.proctitle = setproctitle.getproctitle() self.pid = pid or os.getpid() self.ip = ip or ray.util.get_node_ip_address() self.function_name = function_name self.traceback_str = traceback_str self.actor_repr = actor_repr # TODO(edoakes): should we handle non-serializable exception objects? self.cause = cause assert traceback_str is not None
def _format_exception_message(cls, msg, pid): return cls._EXCEPTION_LOG_FORMAT.format( timestamp=cls._iso_timestamp_for_now(), process_title=setproctitle.getproctitle(), args=sys.argv, pid=pid, message=msg)
def get_parameters(): """ Parse command-line parameters an return as a dictionary. """ wikietcpath=os.path.dirname(os.path.realpath(__file__)) parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Script for populating a local git repository with "\ "page revision from a local MediaWiki.") parser.add_argument('-v','--verbose',help='Display status messages', action='store_true') parser.add_argument('--wikiuser',help='Wiki username') parser.add_argument('--wikipw', help='Wiki password') parser.add_argument('--wikiconf',help='Read login information from file', default=os.path.expanduser('~')+'/.conf.metanet') parser.add_argument('--wikiserver',help='Server (and port) hosting the Wiki site', default=DEFAULT_SERVER) parser.add_argument('--protocol',help='Protocol to connect to the server with', default='https') parser.add_argument('--certificate',help='Path to self-signed certificate', default=wikietcpath + '/metanet.cert.pem') parser.add_argument('scriptpath',help="ScriptPath of the wiki on the server", default=DEFAULT_SCRIPT_PATH) parser.add_argument('-u','--dbuser',help='Db username', default='readonly_user') parser.add_argument('-p','--dbpw',help='Db password', default='readme') parser.add_argument('-d','--dbname',help='Db name',required=True) parser.add_argument('-s','--socket',help='Db socket',default='/tmp/mysql.sock') parser.add_argument('-r','--repository',help='Git repository root directory', default='.') parser.add_argument('-n','--new',action='store_true', help='Builds a new git repository rather than updating.') cmdline = parser.parse_args() # obscure passwords if entered in through cmdline pstr = setproctitle.getproctitle() pstr = re.sub(ur'(--wikiuser|--wikipw|--dbuser|--dbpw|-u|-p)(=|\s+)(\S+)',ur'\1\2XXXX',pstr) setproctitle.setproctitle(pstr) if not cmdline.dbpw: cmdline.dbpw = getpass.getpass('Enter database admin password: '******'r') as f: for line in f: if line.isspace(): continue line = line.strip() if line.startswith('#'): continue if '=' in line: variable, value = line.split('=',1) config[variable] = value if (not cmdline.wikiuser) and ('MNWIKI_USER' in config): cmdline.wikiuser = config['MNWIKI_USER'] if (not cmdline.wikipw) and ('MNWIKI_PW' in config): cmdline.wikipw = config['MNWIKI_PW'] return cmdline
def obfuscate_process_password(): process_title = setproctitle.getproctitle() if '://' in process_title: process_title = re.sub(r":(.*):(.*)@", r":\1:xxxx@", process_title) elif "=" in process_title: process_title = re.sub(r"password=(.+?)((\s[a-zA-Z]+=)|$)", r"password=xxxx\2", process_title) setproctitle.setproctitle(process_title)
def process(self, msg, kwargs): msg = '%s[%s] %s' % (setproctitle.getproctitle(), os.getpid(), msg) kwargs["extra"] = self.extra if config.LOG_METHOD_TRACE: self._extra['method'] = util_callstack.get_caller(-5) return msg, kwargs
def session(uri, sync=False, autoflush=False, expire_on_commit=False): ''' Returns a managed session to the postgres server. ''' application_name = ('%s:%s:%05d' % (getproctitle(), socket.gethostname(), os.getpid()))[-63:] connect_args = {'application_name': application_name} engine = create_engine(uri, connect_args=connect_args, poolclass=StaticPool) with ManagedSession(engine, autoflush=autoflush, expire_on_commit=expire_on_commit) as sesh: sesh.execute('SET synchronous_commit TO OFF;') if not sync else None yield sesh
def post_worker_init(_): """ Set process title. This is used by airflow.cli.commands.webserver_command to track the status of the worker. """ old_title = setproctitle.getproctitle() setproctitle.setproctitle(settings.GUNICORN_WORKER_READY_PREFIX + old_title)
def _start_application(self, component): '''App startup routines.''' logging.critical( "application '{}' {} (PID: {}) started.".format( getproctitle(), self.version, getpid() )) self.fire(do_join_room(), self.ichcapi.channel)
def appendproctitle(name): ''' Append "name" to the current process title From: https://github.com/saltstack/salt/blob/v2014.7.1/salt/utils/__init__.py#L2377 ''' if HAS_SETPROCTITLE: setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + name)
def format(self, record): """Prepends current process name to ``record.name`` if running in the context of a taskd process that is currently processing a task. """ title = getproctitle() if title.startswith('taskd:'): record.name = "{0}:{1}".format(title, record.name) return super(CustomWatchedFileHandler, self).format(record)
def _find_service_name(): import setproctitle service_name = setproctitle.getproctitle() bracket_pos = service_name.find("[") if bracket_pos > -1: service_name = service_name[bracket_pos + 1:service_name.find("]")] if len(service_name) > 10: service_name = service_name[-25:] return service_name
def post_worker_init(_): """ Set process title. This is used by airflow.cli.commands.webserver_command to track the status of the worker. """ old_title = setproctitle.getproctitle() # pylint: disable=c-extension-no-member setproctitle.setproctitle( # pylint: disable=c-extension-no-member settings.GUNICORN_WORKER_READY_PREFIX + old_title)
def connect_ray_pdb( host=None, port=None, patch_stdstreams=False, quiet=None, breakpoint_uuid=None, debugger_external=False, ): """ Opens a remote PDB on first available port. """ if debugger_external: assert not host, "Cannot specify both host and debugger_external" host = "0.0.0.0" elif host is None: host = os.environ.get("REMOTE_PDB_HOST", "127.0.0.1") if port is None: port = int(os.environ.get("REMOTE_PDB_PORT", "0")) if quiet is None: quiet = bool(os.environ.get("REMOTE_PDB_QUIET", "")) if not breakpoint_uuid: breakpoint_uuid = uuid.uuid4().hex if debugger_external: ip_address = ray.worker.global_worker.node_ip_address else: ip_address = "localhost" rdb = RemotePdb( breakpoint_uuid=breakpoint_uuid, host=host, port=port, ip_address=ip_address, patch_stdstreams=patch_stdstreams, quiet=quiet, ) sockname = rdb._listen_socket.getsockname() pdb_address = "{}:{}".format(ip_address, sockname[1]) parentframeinfo = inspect.getouterframes(inspect.currentframe())[2] data = { "proctitle": setproctitle.getproctitle(), "pdb_address": pdb_address, "filename": parentframeinfo.filename, "lineno": parentframeinfo.lineno, "traceback": "\n".join(traceback.format_exception(*sys.exc_info())), "timestamp": time.time(), "job_id": ray.get_runtime_context().job_id.hex(), } _internal_kv_put( "RAY_PDB_{}".format(breakpoint_uuid), json.dumps(data), overwrite=True, namespace=ray_constants.KV_NAMESPACE_PDB, ) rdb.listen() _internal_kv_del("RAY_PDB_{}".format(breakpoint_uuid), namespace=ray_constants.KV_NAMESPACE_PDB) return rdb
def appendproctitle(name): """ Append "name" to the current process title """ if HAS_SETPROCTITLE: current = setproctitle.getproctitle() if current.strip().endswith("MainProcess"): current, _ = current.rsplit("MainProcess", 1) setproctitle.setproctitle("{} {}".format(current.rstrip(), name))
def start_poller(proc_id, carbon_queue, job_queue): proc_title = setproctitle.getproctitle() setproctitle.setproctitle("%s - poller#%s" % (proc_title, proc_id)) logger.debug("start start_poller()") while True: lauch_time, job = job_queue.get() launch_timedelta = lauch_time - int(time()) if launch_timedelta > 0: logger.debug("sleep %s", launch_timedelta) sleep(launch_timedelta) else: logger.warning("lateness %s's", launch_timedelta) poll_start = int(time()) logger.warning("--polling--") config = job.config hosts = job.hosts # get indexes in first poll index_oids = config['indexes'].keys() if index_oids: index_oids_group = [(oid,) for oid in list(index_oids)] snmp_data = snmp_poller.poller(hosts, index_oids_group, COMMUNITY) index_table = defaultdict_rec() for snmp_res in snmp_data: host, base_oid, index_part, value = snmp_res index_name = config['indexes'][base_oid] index_table[host][index_name][index_part] = normalize_ifname(value) target_oid_indexes = {} target_oid_metric_pfx = {} for target_oid in config['target_oids']: if 'index_name' in target_oid: target_oid_indexes[target_oid['oid']] = target_oid['index_name'] target_oid_metric_pfx[target_oid['oid']] = target_oid['metric_prefix'] # get other in second poll oids_group = [(oid['oid'],) for oid in config['target_oids']] snmp_data = snmp_poller.poller(hosts, oids_group, COMMUNITY) request_time = int(time()) for snmp_res in snmp_data: host, base_oid, index_part, value = snmp_res if index_table[host][target_oid_indexes[base_oid]][index_part]: oid_index_name = index_table[host][target_oid_indexes[base_oid]][index_part] else: oid_index_name = '%s' % index_part metric_pfx = target_oid_metric_pfx[base_oid] short_hostname = normalize_hostname(host) if "{index}" in metric_pfx: metric = ("%s.%s" % (short_hostname, metric_pfx.format(index=oid_index_name))) else: metric = ("%s.%s.%s" % (short_hostname, metric_pfx, oid_index_name)) # print (metric, value, request_time) msg = "%s %s %s\n" % (metric, value, request_time) carbon_queue.put(msg) logger.debug("polling executed in %s's", int(time()) - poll_start)
def main_process(self): """ Main process for zfssnapd """ if (settings['rpdb2_wait']): # a wait to attach with rpdb2... log_info('Waiting for rpdb2 to attach.') time.sleep(float(settings['rpdb2_wait'])) log_info('program starting.') log_debug("The daemon_canary is: '{0}'".format( settings['daemon_canary'])) # Do a nice output message to the log pwnam = pwd.getpwnam(settings['run_as_user']) if setproctitle_support: gpt_output = getproctitle() else: gpt_output = "no getproctitle()" log_debug( "PID: {0} process name: '{1}' daemon: '{2}' User: '******' UID: {4} GID {5}" .format(os.getpid(), gpt_output, self.i_am_daemon(), pwnam.pw_name, os.getuid(), os.getgid())) if (settings['memory_debug']): # Turn on memory debugging log_info('Turning on GC memory debugging.') gc.set_debug(gc.DEBUG_LEAK) # Create a Process object so that we can check in on ourself resource # wise self.proc_monitor = psutil.Process(pid=os.getpid()) # Initialise a few nice things for the loop debug_mark = get_boolean_setting('debug_mark') sleep_time = int(get_numeric_setting('sleep_time', float)) debug_sleep_time = int(get_numeric_setting('debug_sleep_time', float)) sleep_time = debug_sleep_time if debug() else sleep_time # Initialise Manager stuff ds_settings = Config.read_ds_config() # Process Main Loop while (self.check_signals()): try: Manager.run(ds_settings, sleep_time) except Exception as ex: log_error('Exception: {0}'.format(str(ex))) if debug_mark: log_debug( "----MARK---- sleep({0}) seconds ----".format(sleep_time)) self.main_sleep(sleep_time) log_info('Exited main loop - process terminating normally.') sys.exit(os.EX_OK)
def proctitle(title): """Temporarily change the process title, then restore it.""" orig_title = getproctitle() try: setproctitle(title) yield setproctitle(orig_title) except: setproctitle(orig_title) raise
def title(cls, message=None, level='INFO'): '''Set the title of the process''' if message == None: return getproctitle() else: setproctitle('qless-py-worker %s' % message) if level == 'DEBUG': logger.debug(message) elif level == 'INFO': logger.info(message)
def proc_init_run(procnum, func, args, kwargs): """ Set the process title and run. """ title = spt.getproctitle() title = "{} : {} : {}".format(title, procnum, func.__name__) spt.setproctitle(title) return func(*args, **kwargs)
def __init__(self, rate: float, print_delay_threshold: Optional[float] = 0.0) -> None: """Rate in Hz for ratekeeping. print_delay_threshold must be nonnegative.""" self._interval = 1. / rate self._next_frame_time = sec_since_boot() + self._interval self._print_delay_threshold = print_delay_threshold self._frame = 0 self._remaining = 0.0 self._process_name = getproctitle() self._dts = deque([self._interval], maxlen=100) self._last_monitor_time = sec_since_boot()
def obfuscate_process_password(): process_title = setproctitle.getproctitle() if "://" in process_title: process_title = re.sub(r":(.*):(.*)@", r":\1:xxxx@", process_title) elif "=" in process_title: process_title = re.sub(r"password=(.+?)(\s|$)", r"password=xxxx\2", process_title) elif "-w" in process_title: process_title = re.sub(r"\-w\s+([^\s]+)(\s|$)", r"-w xxxx\2", process_title) setproctitle.setproctitle(process_title)
def __init__(self, configfile): # Initialize Logging self.log = logging.getLogger('diamond') # Initialize Members self.configfile = configfile self.config = None self.handlers = [] self.handler_queue = [] self.modules = {} # We do this weird process title swap around to get the sync manager # title correct for ps if setproctitle: oldproctitle = getproctitle() setproctitle('%s - SyncManager' % getproctitle()) self.manager = multiprocessing.Manager() if setproctitle: setproctitle(oldproctitle) self.metric_queue = self.manager.Queue()
def format(self, record): """ Format a log record :param record: """ if _setproctitle_is_available: record.__dict__['process_title'] = setproctitle.getproctitle() else: record.__dict__['process_title'] = sys.argv[0] return logging.Formatter.format(self, record)
def __init__(self,config,threadid,q): Process.__init__(self) self.threadid = threadid self.q = q # Initialize Logging self.log = logging.getLogger('metrichammer') # Initialize Members #self.configfile = configfile self.config = config self.proto = 'tcp' self.host = self.config['server']['host'] self.port = int(self.config['server']['port']) self.socket = None self.keepalive = 0 self.keepaliveinterval = int(self.config['server']['keepaliveinterval']) self.timeout = float(self.config['server']['timeout']) self.flow_info = 0 self.scope_id = 0 self.metrics = [] self.batch_size = int(self.config['server']['batchsize']) self.max_backlog_multiplier = 4 self.trim_backlog_multiplier = 5 self.namespace = self.config['server']['namespace'] self.maxmetrics = int(self.config['server']['maxmetrics']) self.runs = int(self.config['server']['runs']) self.metriccount = 0 # error logging throttling self.server_error_interval = float(120) self._errors = {} self.statsqueue = [] # We do this weird process title swap around to get the sync manager # title correct for ps if setproctitle: oldproctitle = getproctitle() setproctitle('%s - SyncManager' % getproctitle()) if setproctitle: setproctitle(oldproctitle)
def __init__(self, function_name, traceback_str): """Initialize a RayTaskError.""" if setproctitle: self.proctitle = setproctitle.getproctitle() else: self.proctitle = "ray_worker" self.pid = os.getpid() self.host = os.uname()[1] self.function_name = function_name self.traceback_str = traceback_str assert traceback_str is not None
def run(self): self._parent_proctitle = setproctitle.getproctitle() handler = NetworkMetadataProxyHandler(self.network_id, self.router_id, self.domain_id) proxy = wsgi.Server('opflex-network-metadata-proxy') proxy.start(handler, self.port, host=self.host) # Drop privileges after port bind super(ProxyDaemon, self).run() proxy.wait()
def service_process(service, config, log): proc = multiprocessing.current_process() if setproctitle: setproctitle("%s-%s" % (getproctitle(), proc.name)) log.debug("Starting process %s for sync service information" % proc.name) # signal.signal(signal.SIGALRM, signal_to_exception) signal.signal(signal.SIGHUP, signal_to_exception) #update service information for interval time , interval if from configfile interval = 4 if interval < 0 or interval is None: log.debug("Interval is invalid, so the interval of updating service\ information default value 360") next_window = math.floor(time.time() / interval) * interval stagger_offset = random.uniform(0, interval - 1) max_time = int(max(interval - stagger_offset, 1)) log.debug("Max sync service information time %s" % max_time) import pdb pdb.set_trace() while True: try: time_to_sleep = (next_window + stagger_offset) - time.time() if time_to_sleep > 0: time.sleep(time_to_sleep) elif time_to_sleep < 0: next_window = time.time() next_window += interval signal.alarm(max_time) #collect service information and update service in db # service.update() fetch_disks_info() sys.stdout.flush() log.info("fetch disks information") signal.alarm(0) except SIGALRMException: log.error("Took too long to run! Killed!") stagger_offset = stagger_offset * 0.9 max_time = int(max(interval - stagger_offset, 1)) log.debug("Max time of updating service information is: %s" % max_time) except SIGHUPException: pass except Exception: log.error("Updating service failed!") break
def wrap_proctitle(string): """Set process title for a given context :param str string: Context to display in process title """ if setproctitle: oldtitle = setproctitle.getproctitle() setproctitle.setproctitle("%s [%s]" % (sys.argv[0], string)) yield if setproctitle: setproctitle.setproctitle(oldtitle)
def group_interpreter(conn, objs, mapping, affinity=None, plasma_client_file_name=None, params=None): import psutil if affinity is not None: p = psutil.Process() p.cpu_affinity(affinity) # Initialize obects if params are given if params is not None: for i, param in enumerate(params): if param is not None: objs[i] = objs[i](*param[0], **param[1]) if plasma_client_file_name is not None: plasma_client = plasma_info.plasma.connect(plasma_client_file_name) else: plasma_client = None mapping = mapping.tolist() mapping_dict = dict() for i, m in enumerate(mapping): mapping_dict[m] = i # Set procces name process_name = "apalis: " for i, obj in enumerate(objs[:-1]): process_name += (f"{obj.__class__.__name__} {mapping[i]}, ") process_name += (f"{objs[-1].__class__.__name__} {mapping[-1]}") process_name += "; " + getproctitle() setproctitle(process_name) while True: tasks = conn.recv() if type(tasks) == str: break else: out = [] for task in tasks: i = mapping_dict[task['i']] res = execute(objs[i], task, plasma_client=plasma_client) out.append(res) if "task_id" in tasks[0]: conn.send((tasks[0]["task_id"], out)) else: conn.send(out)
def test_process_set_title(): from uuid import uuid4 from multiprocessing import Queue from setproctitle import getproctitle from bigchaindb.utils import Process queue = Queue() uuid = str(uuid4()) process = Process(target=lambda: queue.put(getproctitle()), name=uuid) process.start() assert queue.get() == uuid
def wrap_proctitle(string): """Set process title for a given context. Args: string (str): Context to display in process title """ if setproctitle: oldtitle = setproctitle.getproctitle() setproctitle.setproctitle('{} [{}]'.format(sys.argv[0], string)) yield if setproctitle: setproctitle.setproctitle(oldtitle)
def test_obfuscate_process_password(): original_title = setproctitle.getproctitle() setproctitle.setproctitle('vcli vertica://dbadmin:pass@localhost/dbname') obfuscate_process_password() title = setproctitle.getproctitle() assert title == 'vcli vertica://dbadmin:xxxx@localhost/dbname' setproctitle.setproctitle('vcli -h localhost -U dbadmin -w pass dbname') obfuscate_process_password() title = setproctitle.getproctitle() assert title == 'vcli -h localhost -U dbadmin -w xxxx dbname' setproctitle.setproctitle( 'vcli --host=localhost --user=dbadmin --password=pass dbname') obfuscate_process_password() title = setproctitle.getproctitle() assert title == ( 'vcli --host=localhost --user=dbadmin --password=xxxx dbname') setproctitle.setproctitle(original_title)
def obfuscate_process_password(): process_title = setproctitle.getproctitle() if '://' in process_title: process_title = re.sub(r":(.*):(.*)@", r":\1:xxxx@", process_title) elif "=" in process_title: process_title = re.sub(r"password=(.+?)(\s|$)", r"password=xxxx\2", process_title) elif "-w" in process_title: process_title = re.sub(r"\-w\s+([^\s]+)(\s|$)", r"-w xxxx\2", process_title) setproctitle.setproctitle(process_title)
def test_setup(dummy_actor_mocked): """ Test if the socket interface and the signal handler are correctly initialized and if the proc title is correctly set after the run function was call """ dummy_actor_mocked._setup() assert setproctitle.getproctitle() == ACTOR_NAME assert len(dummy_actor_mocked.socket_interface.setup.mock_calls) == 1 assert len(dummy_actor_mocked._signal_handler_setup.mock_calls) == 1 dummy_actor_mocked._kill_process()
def update_proctitle(procname): try: import setproctitle print('CHANGING PROCESS TITLE') old_title = setproctitle.getproctitle() print('old_title = %r' % (old_title, )) #new_title = 'IBEIS_' + procname + ' ' + old_title #new_title = procname + ' ' + old_title new_title = 'ibeis_zmq_loop' print('new_title = %r' % (new_title, )) setproctitle.setproctitle(new_title) except ImportError: print('pip install setproctitle')
def update_proctitle(procname): try: import setproctitle print('CHANGING PROCESS TITLE') old_title = setproctitle.getproctitle() print('old_title = %r' % (old_title,)) #new_title = 'IBEIS_' + procname + ' ' + old_title #new_title = procname + ' ' + old_title new_title = 'ibeis_zmq_loop' print('new_title = %r' % (new_title,)) setproctitle.setproctitle(new_title) except ImportError: print('pip install setproctitle')
def start(self): """Start the daemon.""" self._parent_proctitle = setproctitle.getproctitle() if self.pidfile is not None and self.pidfile.is_running(): self.pidfile.unlock() LOG.error('Pidfile %s already exist. Daemon already ' 'running?', self.pidfile) sys.exit(1) # Start the daemon self.daemonize() self.run()
async def dev(self, ctx: context.Context): """ Base command for bot developer commands. Displays a message with stats about the bot. """ python_version = f'{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}' discordpy_version = pkg_resources.get_distribution( 'discord.py').version platform = sys.platform process_name = setproctitle.getproctitle() process_id = self.bot.process.pid thread_count = self.bot.process.num_threads() description = [ f'I am running on the python version **{python_version}** on the OS **{platform}** ' f'using the discord.py version **{discordpy_version}**. ' f'The process is running as **{process_name}** on PID **{process_id}** and is using ' f'**{thread_count}** threads.' ] if isinstance(self.bot, commands.AutoShardedBot): description.append( f'The bot is automatically sharded with **{self.bot.shard_count}** shard(s) and can ' f'see **{len(self.bot.guilds)}** guilds and **{len(self.bot.users)}** users.' ) else: description.append( f'The bot is not sharded and can see **{len(self.bot.guilds)}** guilds and ' f'**{len(self.bot.users)}** users.') with self.bot.process.oneshot(): memory_info = self.bot.process.memory_full_info() physical_memory = humanize.naturalsize(memory_info.rss) virtual_memory = humanize.naturalsize(memory_info.vms) unique_memory = humanize.naturalsize(memory_info.uss) cpu_usage = self.bot.process.cpu_percent(interval=None) description.append( f'The process is using **{physical_memory}** of physical memory, **{virtual_memory}** ' f'of virtual memory and **{unique_memory}** of memory that is unique to the process. ' f'It is also using **{cpu_usage}%** of CPU.') embed = discord.Embed( title=f'{self.bot.user.name} bot information page.', colour=0xF5F5F5) embed.description = '\n\n'.join(description) return await ctx.send(embed=embed)
def handler_process(handlers, metric_queue, log): proc = multiprocessing.current_process() if setproctitle: setproctitle('%s - %s' % (getproctitle(), proc.name)) log.debug('Starting process %s', proc.name) while(True): metric = metric_queue.get(block=True, timeout=None) for handler in handlers: if metric is not None: handler._process(metric) else: handler._flush()
def wrapper(*args, **kwargs): start_time = int(time.time() * 1000) self = args[0] rid = kwargs.pop('request_id') entity = kwargs.pop('entity') from_id = kwargs.pop('from_id') wrapped_func = func result = None error = None if not asyncio.iscoroutine(func): wrapped_func = asyncio.coroutine(func) Stats.tcp_stats['total_requests'] += 1 try: result = yield from asyncio.wait_for(wrapped_func(self, **kwargs), 120) except asyncio.TimeoutError as e: Stats.tcp_stats['timedout'] += 1 error = str(e) except Exception as e: Stats.tcp_stats['total_errors'] += 1 _logger.exception('api request exception') error = str(e) else: Stats.tcp_stats['total_responses'] += 1 end_time = int(time.time() * 1000) hostname = socket.gethostname() service_name = '_'.join(setproctitle.getproctitle().split('_')[:-1]) logd = { 'endpoint': func.__name__, 'time_taken': end_time - start_time, 'hostname': hostname, 'service_name': service_name } logging.getLogger('stats').info(logd) _logger.debug('Time taken for %s is %d milliseconds', func.__name__, end_time - start_time) if not (old_api): return self._make_response_packet(request_id=rid, from_id=from_id, entity=entity, result=result, error=error) else: return self._make_response_packet(request_id=rid, from_id=from_id, entity=entity, result=result, error=error, old_api=old_api, replacement_api=replacement_api)
def _set_process_name(name): # pragma: no cover """Function for setting the name of new processes.""" # Set the name of a new process if 'setproctitle' exists. try: from setproctitle import getproctitle as getproctitle from setproctitle import setproctitle as setproctitle current_name = getproctitle() name = current_name + ' -> ' + name setproctitle(name) # If 'setproctitle' does not exist. Do nothing. except: pass