def start(self): """ Post-fork initialization. This is mainly done here for the future possibility that we'll be able to run mules post-fork without exec()ing. In a programmed mule it could be done at __init__ time. """ if self.stack._is_mule: if not uwsgi.in_farm(): raise RuntimeError( 'Mule %s is not in a farm! Set `farm = <pool_name>:%s` in uWSGI configuration' % (uwsgi.mule_id(), ','.join( map( str, range( 1, len([ x for x in self.stack._configured_mules if x.endswith('galaxy/main.py') ]) + 1))))) elif len(self.stack._farms) > 1: raise RuntimeError( 'Mule %s is in multiple farms! This configuration is not supported due to locking issues' % uwsgi.mule_id()) # only mules receive messages so don't bother starting the dispatcher if we're not a mule (although # currently it doesn't have any registered handlers and so wouldn't start anyway) super().start()
def handle_signal(signum, frame): log.info('Received signal %d, exiting', signum) if uwsgi and 'mule_id' in dir(uwsgi) and uwsgi.mule_id() > 0: farms = os.environ.get(UWSGI_FARMS_VAR, None) if farms: for farm in farms.split(','): uwsgi.farm_msg(farm, SHUTDOWN_MSG) else: uwsgi.mule_msg(SHUTDOWN_MSG, uwsgi.mule_id()) exit.set()
def main(): # connect and declare the message queue/kombu objects. # only py-amqp supports ssl and doesn't recognize amqps # so fix up the connection string accordingly connString = 'amqp://{0}:{1}@{2}:{3}/{4}'.format(options.mquser, options.mqpassword, options.mqserver, options.mqport, options.mqvhost) if options.mqprotocol == 'amqps': mqSSL = True else: mqSSL = False mqConn = Connection(connString, ssl=mqSSL) # Task Exchange for events sent via http for us to normalize and post to elastic search if options.mqack: # conservative, store msgs to disk, ack each message eventTaskExchange = Exchange(name=options.taskexchange, type='direct', durable=True, delivery_mode=2) else: # fast, transient delivery, store in memory only, auto-ack messages eventTaskExchange = Exchange(name=options.taskexchange, type='direct', durable=True, delivery_mode=1) eventTaskExchange(mqConn).declare() # Queue for the exchange if options.mqack: eventTaskQueue = Queue(options.taskexchange, exchange=eventTaskExchange, routing_key=options.taskexchange, durable=True, no_ack=False) else: eventTaskQueue = Queue(options.taskexchange, exchange=eventTaskExchange, routing_key=options.taskexchange, durable=True, no_ack=True) eventTaskQueue(mqConn).declare() # topic exchange for anyone who wants to queue and listen for mozdef.event eventTopicExchange = Exchange(name=options.eventexchange, type='topic', durable=False, delivery_mode=1) eventTopicExchange(mqConn).declare() if hasUWSGI: logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id())) else: logger.info('started without uwsgi') # consume our queue and publish on the topic exchange taskConsumer(mqConn, eventTaskQueue, eventTopicExchange, es).run()
def _mule_index_in_farm(self, farm_name, mule_id=None): mule_id = mule_id or uwsgi.mule_id() try: mules = self.configured_pools[farm_name] return mules.index(mule_id) except (KeyError, ValueError): return -1
def main(): # connect and declare the message queue/kombu objects. # only py-amqp supports ssl and doesn't recognize amqps # so fix up the connection string accordingly connString = 'amqp://{0}:{1}@{2}:{3}/{4}'.format(options.mquser, options.mqpassword, options.mqserver, options.mqport, options.mqvhost) if options.mqprotocol == 'amqps': mqSSL = True else: mqSSL = False mqConn = Connection(connString, ssl=mqSSL) # Task Exchange for events sent via http for us to normalize and post to elastic search if options.mqack: # conservative, store msgs to disk, ack each message eventTaskExchange = Exchange(name=options.taskexchange, type='direct', durable=True, delivery_mode=2) else: # fast, transient delivery, store in memory only, auto-ack messages eventTaskExchange = Exchange(name=options.taskexchange, type='direct', durable=True, delivery_mode=1) eventTaskExchange(mqConn).declare() # Queue for the exchange if options.mqack: eventTaskQueue = Queue(options.taskexchange, exchange=eventTaskExchange, routing_key=options.taskexchange, durable=True, no_ack=False) else: eventTaskQueue = Queue(options.taskexchange, exchange=eventTaskExchange, routing_key=options.taskexchange, durable=True, no_ack=True) eventTaskQueue(mqConn).declare() # topic exchange for anyone who wants to queue and listen for mozdef.event eventTopicExchange = Exchange(name=options.eventexchange, type='topic', durable=False, delivery_mode=1) eventTopicExchange(mqConn).declare() if hasUWSGI: sys.stdout.write("started as uwsgi mule {0}\n".format(uwsgi.mule_id())) else: sys.stdout.write('started without uwsgi\n') # consume our queue and publish on the topic exchange taskConsumer(mqConn, eventTaskQueue, eventTopicExchange, es).run()
def __call__(self): if uwsgi.mule_id() == self.num: print " i am the mule" while True: message = uwsgi.mule_get_msg() if message: self.f(message)
def print_with_color(msg: str, **kwargs): bold = kwargs.pop('bold', False) if bold: opts = kwargs.setdefault('opts', []) if 'bold' not in opts: opts.append('bold') pid = os.getpid() try: # noinspection PyPackageRequirements,PyUnresolvedReferences import uwsgi master = uwsgi.masterpid() worker = uwsgi.worker_id() mule = uwsgi.mule_id() except ImportError: uwsgi = None master = 0 worker = 0 mule = 0 if mule: print(colorize('[mule {}] {}'.format(mule, msg), **kwargs)) elif worker: print(colorize('[worker {}] {}'.format(worker, msg), **kwargs)) elif pid == master: print(colorize('[master] {}'.format(msg), **kwargs)) elif uwsgi: print(colorize('[spooler {}] {}'.format(pid, msg), **kwargs)) else: print(colorize(msg, **kwargs))
def __call__(self): if uwsgi.mule_id() == self.num: try: self.f() except: exc = sys.exc_info() sys.excepthook(exc[0], exc[1], exc[2]) sys.exit(1)
def instance_id(self): if not self._is_mule: instance_id = uwsgi.worker_id() elif self._farm_name: return self._mule_index_in_farm(self._farm_name) + 1 else: instance_id = uwsgi.mule_id() return instance_id
def register_postfork_function(cls, f, *args, **kwargs): if uwsgi.mule_id() == 0: cls.postfork_functions.append((f, args, kwargs)) else: # mules are forked from the master and run the master's postfork functions immediately before the forked # process is replaced. that is prevented in the _do_uwsgi_postfork function, and because programmed mules # are standalone non-forking processes, they should run postfork functions immediately f(*args, **kwargs)
def _do_uwsgi_postfork(): for i, mule in enumerate(_uwsgi_configured_mules()): if mule is not True and i + 1 == uwsgi.mule_id(): # mules will inherit the postfork function list and call them immediately upon fork, but programmed mules # should not do that (they will call the postfork functions in-place as they start up after exec()) UWSGIApplicationStack.postfork_functions = [(_mule_fixup, (), {})] for f, args, kwargs in [t for t in UWSGIApplicationStack.postfork_functions]: log.debug('Calling postfork function: %s', f) f(*args, **kwargs)
def get_unique_id(): try: return uwsgi.worker_id() except Exception: try: return uwsgi.mule_id() except Exception: return os.getpid() return "unknown"
def __call__(self): if uwsgi.mule_id() == self.num: while True: try: self.f() except BaseException: exc = sys.exc_info() sys.excepthook(exc[0], exc[1], exc[2]) sys.exit(1)
def __call__(self): if uwsgi.mule_id() == 0: return if not uwsgi.in_farm(self.farm): return while True: message = uwsgi.farm_get_msg() if message: self.f(message)
def __init__(self, mqConnection, taskQueue, topicExchange, esConnection): self.connection = mqConnection self.esConnection = esConnection self.taskQueue = taskQueue self.topicExchange = topicExchange self.mqproducer = self.connection.Producer(serializer='json') if hasUWSGI: self.muleid = uwsgi.mule_id() else: self.muleid = 0
def main(): if hasUWSGI: logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id())) else: logger.info("started without uwsgi") # establish api interface with papertrail ptRequestor = PTRequestor(options.ptapikey, evmax=options.ptquerymax) # consume our queue taskConsumer(ptRequestor, es).run()
def main(): if hasUWSGI: logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id())) else: logger.info('started without uwsgi') # establish api interface with papertrail ptRequestor = PTRequestor(options.ptapikey, evmax=options.ptquerymax) # consume our queue taskConsumer(ptRequestor, es).run()
def main(): if hasUWSGI: sys.stdout.write("started as uwsgi mule {0}\n".format(uwsgi.mule_id())) else: sys.stdout.write('started without uwsgi\n') # establish api interface with papertrail ptRequestor = PTRequestor(options.ptapikey, evmax=options.ptquerymax) # consume our queue taskConsumer(ptRequestor, es).run()
def main(): if hasUWSGI: logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id())) else: logger.info("started without uwsgi") if options.mqprotocol not in ("pubsub"): logger.error("Can only process pubsub queues, terminating") sys.exit(1) # connect to GCP and consume our queue PubSubtaskConsumer(es, options).run()
def __init__(self, mqConnection, taskQueue, esConnection): self.connection = mqConnection self.esConnection = esConnection self.taskQueue = taskQueue self.mqproducer = self.connection.Producer(serializer='json') if hasUWSGI: self.muleid = uwsgi.mule_id() else: self.muleid = 0 if options.esbulksize != 0: # if we are bulk posting enable a timer to occasionally flush the pyes bulker even if it's not full # to prevent events from sticking around an idle worker Timer(options.esbulktimeout, self.flush_es_bulk).start()
def __init__(self, mqConnection, taskQueue, topicExchange, esConnection): self.connection = mqConnection self.esConnection = esConnection self.taskQueue = taskQueue self.topicExchange = topicExchange self.mqproducer = self.connection.Producer(serializer='json') if hasUWSGI: self.muleid = uwsgi.mule_id() else: self.muleid = 0 if options.esbulksize != 0: # if we are bulk posting enable a timer to occasionally flush the bulker even if it's not full # to prevent events from sticking around an idle worker self.esConnection.start_bulk_timer()
def facts(self): facts = super(UWSGIApplicationStack, self).facts if not self._is_mule: facts.update({ 'pool_name': 'web', 'server_id': uwsgi.worker_id(), }) else: facts.update({ 'pool_name': self._farm_name, 'server_id': uwsgi.mule_id(), }) facts['instance_id'] = self.instance_id return facts
def main(): # connect and declare the message queue/kombu objects. # what sort of message queue are we talking to? if options.mqprotocol in ('amqp', 'amqps'): # only py-amqp supports ssl and doesn't recognize amqps # so fix up the connection string accordingly connString = 'amqp://{0}:{1}@{2}:{3}/{4}'.format(options.mquser, options.mqpassword, options.mqserver, options.mqport, options.mqvhost) if options.mqprotocol == 'amqps': mqSSL = True else: mqSSL = False mqConn = Connection(connString, ssl=mqSSL) # Task Exchange for events sent via http for us to normalize and post to elastic search if options.mqack: # conservative, store msgs to disk, ack each message eventTaskExchange = Exchange(name=options.taskexchange, type='direct', durable=True, delivery_mode=2) else: # fast, transient delivery, store in memory only, auto-ack messages eventTaskExchange = Exchange(name=options.taskexchange, type='direct', durable=True, delivery_mode=1) eventTaskExchange(mqConn).declare() # Queue for the exchange if options.mqack: eventTaskQueue = Queue(options.taskexchange, exchange=eventTaskExchange, routing_key=options.taskexchange, durable=True, no_ack=False) else: eventTaskQueue = Queue(options.taskexchange, exchange=eventTaskExchange, routing_key=options.taskexchange, durable=True, no_ack=True) eventTaskQueue(mqConn).declare() # topic exchange for anyone who wants to queue and listen for mozdef.event # commented out to begin deprecation for this feature # eventTopicExchange = Exchange(name=options.eventexchange, type='topic', durable=False, delivery_mode=1) # eventTopicExchange(mqConn).declare() if options.mqprotocol in ('sqs'): # amazon SQS connString = 'sqs://%s:%s@' % (urllib.quote(options.accesskey, safe=''), urllib.quote(options.secretkey, safe='')) mqConn = Connection(connString, transport_options=dict(region=options.region)) # for sqs, set taskexchange to the sqs queue name. eventTaskQueue = mqConn.SimpleQueue(options.taskexchange) if hasUWSGI: sys.stdout.write("started as uwsgi mule {0}\n".format(uwsgi.mule_id())) else: sys.stdout.write('started without uwsgi\n') # consume our queue taskConsumer(mqConn, eventTaskQueue, es).run()
def start(self): """ Post-fork initialization. This is mainly done here for the future possibility that we'll be able to run mules post-fork without exec()ing. In a programmed mule it could be done at __init__ time. """ if self.stack._is_mule: if not uwsgi.in_farm(): raise RuntimeError('Mule %s is not in a farm! Set `farm = <pool_name>:%s` in uWSGI configuration' % (uwsgi.mule_id(), ','.join(map(str, range(1, len([x for x in self.stack._configured_mules if x.endswith('galaxy/main.py')]) + 1))))) elif len(self.stack._farms) > 1: raise RuntimeError('Mule %s is in multiple farms! This configuration is not supported due to locking issues' % uwsgi.mule_id()) # only mules receive messages so don't bother starting the dispatcher if we're not a mule (although # currently it doesn't have any registered handlers and so wouldn't start anyway) super(UWSGIFarmMessageTransport, self).start()
def main(): if hasUWSGI: logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id())) else: logger.info('started without uwsgi') if options.mqprotocol not in ('sqs'): logger.error('Can only process SQS queues, terminating') sys.exit(1) sqs_queue = connect_sqs(region_name=options.region, aws_access_key_id=options.accesskey, aws_secret_access_key=options.secretkey, task_exchange=options.taskexchange) # consume our queue taskConsumer(sqs_queue, es, options).run()
def main(): if hasUWSGI: logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id())) else: logger.info('started without uwsgi') if options.mqprotocol not in ('sqs'): logger.error('Can only process SQS queues, terminating') sys.exit(1) mqConn, eventTaskQueue = connect_sqs(options.region, options.accesskey, options.secretkey, options.taskexchange) # consume our queue taskConsumer(mqConn, eventTaskQueue, es, options).run()
def main(): if hasUWSGI: logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id())) else: logger.info('started without uwsgi') if options.mqprotocol not in ('sqs'): logger.error('Can only process SQS queues, terminating') sys.exit(1) mqConn = boto.sqs.connect_to_region(options.region, aws_access_key_id=options.accesskey, aws_secret_access_key=options.secretkey) # attach to the queue eventTaskQueue = mqConn.get_queue(options.taskexchange) # consume our queue taskConsumer(mqConn, eventTaskQueue, es, options).run()
def main(): if hasUWSGI: logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id())) else: logger.info('started without uwsgi') if options.mqprotocol not in ('sqs'): logger.error('Can only process SQS queues, terminating') sys.exit(1) sqs_conn, eventTaskQueue = connect_sqs( task_exchange=options.taskexchange, **get_aws_credentials( options.region, options.accesskey, options.secretkey)) # consume our queue taskConsumer(sqs_conn, eventTaskQueue, es, options).run()
def main(): # meant only to talk to SQS using boto # and process events as json. if hasUWSGI: sys.stdout.write("started as uwsgi mule {0}\n".format(uwsgi.mule_id())) else: sys.stdout.write('started without uwsgi\n') if options.mqprotocol not in ('sqs'): sys.stdout.write('Can only process SQS queues, terminating\n') sys.exit(1) sqs_conn = boto.sqs.connect_to_region(options.region, aws_access_key_id=options.accesskey, aws_secret_access_key=options.secretkey) # attach to the queue eventTaskQueue = sqs_conn.get_queue(options.taskexchange) # consume our queue taskConsumer(sqs_conn, eventTaskQueue, es).run()
def main(): # meant only to talk to SQS using boto # and process events as json. if hasUWSGI: logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id())) else: logger.info('started without uwsgi') if options.mqprotocol not in ('sqs'): logger.error('Can only process SQS queues, terminating') sys.exit(1) sqs_conn, eventTaskQueue = connect_sqs( task_exchange=options.taskexchange, **get_aws_credentials(options.region, options.accesskey, options.secretkey)) # consume our queue taskConsumer(sqs_conn, eventTaskQueue, es).run()
def main(): # meant only to talk to SQS using boto # and process events as json. if hasUWSGI: sys.stdout.write("started as uwsgi mule {0}\n".format(uwsgi.mule_id())) else: sys.stdout.write('started without uwsgi\n') if options.mqprotocol not in ('sqs'): sys.stdout.write('Can only process SQS queues, terminating\n'); sys.exit(1) mqConn = boto.sqs.connect_to_region(options.region, aws_access_key_id=options.accesskey, aws_secret_access_key=options.secretkey) # attach to the queue eventTaskQueue = mqConn.create_queue(options.taskexchange) # consume our queue taskConsumer(mqConn, eventTaskQueue, es).run()
def main(): # meant only to talk to SQS using boto # and process events as json. if hasUWSGI: logger.info("started as uwsgi mule {0}".format(uwsgi.mule_id())) else: logger.info("started without uwsgi") if options.mqprotocol not in ("sqs"): logger.error("Can only process SQS queues, terminating") sys.exit(1) sqs_queue = connect_sqs( region_name=options.region, aws_access_key_id=options.accesskey, aws_secret_access_key=options.secretkey, task_exchange=options.taskexchange, ) # consume our queue taskConsumer(sqs_queue, es).run()
def main(): #connect and declare the message queue/kombu objects. connString='amqp://{0}:{1}@{2}:{3}//'.format(options.mquser,options.mqpassword,options.mqserver,options.mqport) mqConn=Connection(connString) #Task Exchange for events sent via http for us to normalize and post to elastic search eventTaskExchange=Exchange(name=options.taskexchange,type='direct',durable=True) eventTaskExchange(mqConn).declare() #Queue for the exchange eventTaskQueue=Queue(options.taskexchange,exchange=eventTaskExchange,routing_key=options.taskexchange) eventTaskQueue(mqConn).declare() #topic exchange for anyone who wants to queue and listen for mozdef.event eventTopicExchange=Exchange(name=options.eventexchange,type='topic',durable=False,delivery_mode=1) eventTopicExchange(mqConn).declare() if hasUWSGI: sys.stdout.write("started as uwsgi mule {0}\n".format(uwsgi.mule_id())) else: sys.stdout.write('started without uwsgi\n') #consume our queue and publish on the topic exchange taskConsumer(mqConn,eventTaskQueue,eventTopicExchange,es).run()
#!/usr/bin/python import uwsgi from time import sleep print( '\n* server #{} is up on port 200{}\n'.format( uwsgi.mule_id(), uwsgi.mule_id() ) ) while True: sleep(1)
def worker(): mule_id = uwsgi.mule_id() if mule_id == 0: uwsgi.setprocname("%s worker %s" % (app_name, uwsgi.worker_id())) else: uwsgi.setprocname("%s mule %s" % (app_name, uwsgi.mule_id()))
def _is_mule(self): return uwsgi.mule_id() > 0
def _farms(self): farms = [] for farm, mules in self.configured_pools.items(): if uwsgi.mule_id() in mules: farms.append(farm) return farms
def __call__(self): if uwsgi.mule_id() == self.num: while True: message = uwsgi.mule_get_msg() if message: self.f(message)
def _mule_index_in_farm(self, farm_name): try: mules = self._configured_farms[farm_name] return mules.index(uwsgi.mule_id()) except (KeyError, ValueError): return -1
def filter(self, record): record.worker_id = uwsgi.worker_id() record.mule_id = uwsgi.mule_id() return True
def listen_loop(): if uwsgi.mule_id() == 0: gevent.spawn(wr.msg_listen_loop)
rules.max_runs = event['max_runs'] if event['drop_runs'] is not None: rules.drop_runs = event['drop_runs'] return rules else: return None ####################################### if __name__ == '__main__': logging.warning("start recalc scores mule") db = get_db() while True: logging.debug("recalc waiting... mule_id=%r", uwsgi.mule_id()) uwsgi.mule_get_msg() # wait on any msg indicating we need to recalc something # FIXME consider changing this to uwsgi.signal_wait() so that we can filter on a particular type logging.debug("RECALC") run_id = 1 # trigger first iteration while run_id is not None: # find next run to recalc with db: event = get_event(db) run_id = db.query_single("SELECT run_id FROM runs WHERE event_id=? AND recalc", (event['event_id'],)) if run_id is not None: # indicate we are processing run db.update('runs', run_id, recalc=2) rules = get_rules(event) rules.recalc_run(db, run_id)
def __call__(self): if uwsgi.mule_id() == self.num: while True: self.f()
def listen_loop(): if (mule_id is None or uwsgi.mule_id() == mule_id) and (worker_id is None or uwsgi.worker_id() == worker_id): gevent.spawn(*args, **kwargs)
def __call__(self): if uwsgi.mule_id() == self.id: uwsgi.setprocname(self.name) self.function(self)
def _farms(self): farms = [] for farm, mules in self._configured_farms.items(): if uwsgi.mule_id() in mules: farms.append(farm) return farms
def __call__(self): if uwsgi.mule_id() == self.num: self.f()