def wrap_select_with_coroutine_select(): warnings.warn( "eventlet.util.wrap_select_with_coroutine_select() is now " "eventlet.patcher.monkey_patch(all=False, select=True)", DeprecationWarning, stacklevel=2) from eventlet import patcher patcher.monkey_patch(all=False, select=True)
def wrap_socket_with_coroutine_socket(use_thread_pool=None): warnings.warn( "eventlet.util.wrap_socket_with_coroutine_socket() is now " "eventlet.patcher.monkey_patch(all=False, socket=True)", DeprecationWarning, stacklevel=2) from eventlet import patcher patcher.monkey_patch(all=False, socket=True)
def run_forever(self, *args, **kwargs): """ Run the updator continuously. """ time.sleep(random() * self.interval) while True: self.logger.info(_('Begin container update sweep')) begin = time.time() now = time.time() expired_suppressions = \ [a for a, u in self.account_suppressions.iteritems() if u < now] for account in expired_suppressions: del self.account_suppressions[account] pid2filename = {} # read from account ring to ensure it's fresh self.get_account_ring().get_nodes('') for path in self.get_paths(): while len(pid2filename) >= self.concurrency: pid = os.wait()[0] try: self._load_suppressions(pid2filename[pid]) finally: del pid2filename[pid] fd, tmpfilename = mkstemp() os.close(fd) pid = os.fork() if pid: pid2filename[pid] = tmpfilename else: signal.signal(signal.SIGTERM, signal.SIG_DFL) patcher.monkey_patch(all=False, socket=True) self.no_changes = 0 self.successes = 0 self.failures = 0 self.new_account_suppressions = open(tmpfilename, 'w') forkbegin = time.time() self.container_sweep(path) elapsed = time.time() - forkbegin self.logger.debug( _('Container update sweep of %(path)s completed: ' '%(elapsed).02fs, %(success)s successes, %(fail)s ' 'failures, %(no_change)s with no changes'), {'path': path, 'elapsed': elapsed, 'success': self.successes, 'fail': self.failures, 'no_change': self.no_changes}) sys.exit() while pid2filename: pid = os.wait()[0] try: self._load_suppressions(pid2filename[pid]) finally: del pid2filename[pid] elapsed = time.time() - begin self.logger.info(_('Container update sweep completed: %.02fs'), elapsed) dump_recon_cache({'container_updater_sweep': elapsed}, self.rcache, self.logger) if elapsed < self.interval: time.sleep(self.interval - elapsed)
def main(): patcher.monkey_patch() hubs.get_hub().debug_exceptions = False conffile = '/etc/swift/dispersion.conf' parser = OptionParser(usage=''' Usage: %%prog [options] [conf_file] [conf_file] defaults to %s'''.strip() % conffile) parser.add_option('-j', '--dump-json', action='store_true', default=False, help='dump dispersion report in json format') parser.add_option('-d', '--debug', action='store_true', default=False, help='print 404s to standard error') parser.add_option('-p', '--partitions', action='store_true', default=False, help='print missing partitions to standard error') parser.add_option('--container-only', action='store_true', default=False, help='Only run container report') parser.add_option('--object-only', action='store_true', default=False, help='Only run object report') parser.add_option('--insecure', action='store_true', default=False, help='Allow accessing insecure keystone server. ' 'The keystone\'s certificate will not be verified.') parser.add_option('-P', '--policy-name', dest='policy_name', help="Specify storage policy name") options, args = parser.parse_args() if args: conffile = args.pop(0) if options.debug: global debug debug = True c = ConfigParser() if not c.read(conffile): exit('Unable to read config file: %s' % conffile) conf = dict(c.items('dispersion')) if options.dump_json: conf['dump_json'] = 'yes' if options.object_only: conf['container_report'] = 'no' if options.container_only: conf['object_report'] = 'no' if options.insecure: conf['keystone_api_insecure'] = 'yes' if options.partitions: conf['partitions'] = 'yes' output = generate_report(conf, options.policy_name) if json_output: print(json.dumps(output))
def __init__(self): patcher.monkey_patch() # CHECK TMP FILE(SEND) # pck_in_recv.check_exists_tmp(self.SEND_PATH) # CHECK TMP FILE(RECV) self.check_exists_tmp(self.RECV_PATH) ctx = zmq.Context() self.recv_sock = ctx.socket(zmq.SUB) self.recv_sock.connect(self.IPC_PATH_RECV) self.recv_sock.setsockopt(zmq.SUBSCRIBE, "")
def serve(): config = get_config() app = resolve_app(config['app']) socket = config['socket'] threads = config['threads'] if (threads > 0): app = tpool_wsgi(app) else: patcher.monkey_patch(all=False, socket=True) wsgi.server(socket, app, log=StdOutLogger())
def wrap_threading_local_with_coro_local(): """ monkey patch ``threading.local`` with something that is greenlet aware. Since greenlets cannot cross threads, so this should be semantically identical to ``threadlocal.local`` """ warnings.warn("eventlet.util.wrap_threading_local_with_coro_local() is now " "eventlet.patcher.monkey_patch(all=False, thread=True) -- though" "note that more than just _local is patched now.", DeprecationWarning, stacklevel=2) from eventlet import patcher patcher.monkey_patch(all=False, thread=True)
def serve(): config = get_config() socket = config['socket'] threads = config['threads'] app = WSGIHandler() app = AdminMediaHandler(app) if (threads > 0): app = tpool_wsgi(app) else: patcher.monkey_patch(all=False, socket=True) wsgi.server(socket, app, log=StdOutLogger())
def run_forever(self, *args, **kwargs): """Run the updater continuously.""" time.sleep(random() * self.interval) while True: self.logger.info(_('Begin object update sweep')) begin = time.time() pids = [] # read from container ring to ensure it's fresh self.get_container_ring().get_nodes('') for device in self._listdir(self.devices): if self.mount_check and \ not ismount(os.path.join(self.devices, device)): self.logger.increment('errors') self.logger.warning( _('Skipping %s as it is not mounted'), device) continue while len(pids) >= self.concurrency: pids.remove(os.wait()[0]) pid = os.fork() if pid: pids.append(pid) else: signal.signal(signal.SIGTERM, signal.SIG_DFL) patcher.monkey_patch(all=False, socket=True, select=True, thread=True) self.successes = 0 self.failures = 0 forkbegin = time.time() self.object_sweep(os.path.join(self.devices, device)) elapsed = time.time() - forkbegin self.logger.info( _('Object update sweep of %(device)s' ' completed: %(elapsed).02fs, %(success)s successes' ', %(fail)s failures'), {'device': device, 'elapsed': elapsed, 'success': self.successes, 'fail': self.failures}) sys.exit() while pids: pids.remove(os.wait()[0]) elapsed = time.time() - begin self.logger.info(_('Object update sweep completed: %.02fs'), elapsed) dump_recon_cache({'object_updater_sweep': elapsed}, self.rcache, self.logger) if elapsed < self.interval: time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs): """ Run the updater once. """ patcher.monkey_patch(all=False, socket=True) self.logger.info(_('Begin container update single threaded sweep')) begin = time.time() self.no_changes = 0 self.successes = 0 self.failures = 0 for path in self.get_paths(): self.container_sweep(path) elapsed = time.time() - begin self.logger.info(_('Container update single threaded sweep completed: ' '%(elapsed).02fs, %(success)s successes, %(fail)s failures, ' '%(no_change)s with no changes'), {'elapsed': elapsed, 'success': self.successes, 'fail': self.failures, 'no_change': self.no_changes})
def run_forever(self, *args, **kwargs): """Run the updater continuously.""" time.sleep(random() * self.interval) while True: self.logger.info(_("Begin object update sweep")) begin = time.time() pids = [] # read from container ring to ensure it's fresh self.get_container_ring().get_nodes("") for device in os.listdir(self.devices): if self.mount_check and not os.path.ismount(os.path.join(self.devices, device)): self.logger.increment("errors") self.logger.warn(_("Skipping %s as it is not mounted"), device) continue while len(pids) >= self.concurrency: pids.remove(os.wait()[0]) pid = os.fork() if pid: pids.append(pid) else: signal.signal(signal.SIGTERM, signal.SIG_DFL) patcher.monkey_patch(all=False, socket=True) self.successes = 0 self.failures = 0 forkbegin = time.time() self.object_sweep(os.path.join(self.devices, device)) elapsed = time.time() - forkbegin self.logger.info( _( "Object update sweep of %(device)s" " completed: %(elapsed).02fs, %(success)s successes" ", %(fail)s failures" ), {"device": device, "elapsed": elapsed, "success": self.successes, "fail": self.failures}, ) sys.exit() while pids: pids.remove(os.wait()[0]) elapsed = time.time() - begin self.logger.info(_("Object update sweep completed: %.02fs"), elapsed) dump_recon_cache({"object_updater_sweep": elapsed}, self.rcache, self.logger) if elapsed < self.interval: time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs): """ Run the updater once. """ patcher.monkey_patch(all=False, socket=True) self.logger.info(_("Begin container update single threaded sweep")) begin = time.time() self.no_changes = 0 self.successes = 0 self.failures = 0 for path in self.get_paths(): self.container_sweep(path) elapsed = time.time() - begin self.logger.info( _( "Container update single threaded sweep completed: " "%(elapsed).02fs, %(success)s successes, %(fail)s failures, " "%(no_change)s with no changes" ), {"elapsed": elapsed, "success": self.successes, "fail": self.failures, "no_change": self.no_changes}, ) dump_recon_cache({"container_updater_sweep": elapsed}, self.rcache, self.logger)
def __init__(self, *args, **kwargs): try: # ログ設定ファイル読み込み logging.config.fileConfig(COMMON_PATH + const.RYU_LOG_CONF, disable_existing_loggers=False) self.logger = logging.getLogger(__name__) self.logger.debug("") super(mld_controller, self).__init__(*args, **kwargs) # コンテキストからDPSetの取得 self.dpset = kwargs["dpset"] # システムモジュールのソケットに対しパッチを適用 patcher.monkey_patch() # 設定情報の読み込み config = read_json(COMMON_PATH + const.CONF_FILE) self.logger.info("%s:%s", const.CONF_FILE, json.dumps(config.data, indent=4, sort_keys=True, ensure_ascii=False)) self.config = config.data[const.SETTING] # ループフラグの設定 self.loop_flg = True # ZMQの接続文字列を取得 zmq_conn = self.get_zmq_connect(config) self.zmq_pub = zmq_conn[0] self.zmq_sub = zmq_conn[1] # ZMQ送受信用ソケット生成 self.create_socket(self.zmq_pub, self.zmq_sub) # mldからの受信スレッドを開始 hub.spawn(self.receive_from_mld) except: self.logger.error("%s ", traceback.print_exc())
import threading import eventlet import socketio import logging from eventlet import patcher, wsgi from app import app, sio patcher.monkey_patch(all=True) class SocketIoRunner(object): def __init__(self, url): self.host, port_str = url.split(':') self.port = int(port_str) self.server = None # create the thread object self.thread = threading.Thread(target=self._start_listening_blocking) # wrap Flask application with socketio's middleware self.app = socketio.Middleware(sio, app) def start_listening_async(self): wsgi.is_accepting = True self.thread.start() def stop_listening(self): wsgi.is_accepting = False def _start_listening_blocking(self): # deploy as an eventlet WSGI server
try: CommandCenter().cmdloop() except KeyboardInterrupt: print 'bye' sys.exit(0) ########NEW FILE######## __FILENAME__ = commands """Command dispatcher and commands to run. Look up the command from the command center, attempt to map it to a local method. """ from eventlet import patcher patcher.monkey_patch(all=True) import boto import time import datetime import sys import cmd import settings from microarmy.firepower import (init_cannons, terminate_cannons, reboot_cannons, setup_cannons, slam_host, setup_siege, setup_siege_urls, find_deployed_cannons, destroy_deployed_cannons)
from __future__ import print_function from eventlet import patcher # no standard tests in this file, ignore __test__ = False if __name__ == '__main__': import MySQLdb as m from eventlet.green import MySQLdb as gm patcher.monkey_patch(all=True, MySQLdb=True) print("mysqltest {0}".format(",".join(sorted(patcher.already_patched.keys())))) print("connect {0}".format(m.connect == gm.connect))
import eventlet eventlet.sleep(0) from eventlet import patcher patcher.monkey_patch() def assimilate_real(name): print "Assimilating", name try: modobj = __import__('test.' + name, globals(), locals(), ['test_main']) except ImportError: print "Not importing %s, it doesn't exist in this installation/version of Python" % name return else: method_name = name + "_test_main" try: globals()[method_name] = modobj.test_main modobj.test_main.__name__ = name + '.test_main' except AttributeError: print "No test_main for %s, assuming it tests on import" % name import all_modules for m in all_modules.get_modules(): assimilate_real(m)
def inner_run(): # Install eventlet patches after everything else has been run, # and inside our server thread patcher.monkey_patch() wsgi.server(eventlet.listen((self.host, self.port)), application)
import eventlet eventlet.sleep(0) from eventlet import patcher patcher.monkey_patch() def assimilate_real(name): print("Assimilating", name) try: modobj = __import__('test.' + name, globals(), locals(), ['test_main']) except ImportError: print("Not importing %s, it doesn't exist in this installation/version of Python" % name) return else: method_name = name + "_test_main" try: globals()[method_name] = modobj.test_main modobj.test_main.__name__ = name + '.test_main' except AttributeError: print("No test_main for %s, assuming it tests on import" % name) import all_modules for m in all_modules.get_modules(): assimilate_real(m)
def run_forever(self, *args, **kwargs): """ Run the updator continuously. """ time.sleep(random() * self.interval) while True: self.logger.info(_('Begin container update sweep')) begin = time.time() now = time.time() expired_suppressions = \ [a for a, u in self.account_suppressions.items() if u < now] for account in expired_suppressions: del self.account_suppressions[account] pid2filename = {} # read from account ring to ensure it's fresh self.get_account_ring().get_nodes('') for path in self.get_paths(): while len(pid2filename) >= self.concurrency: pid = os.wait()[0] try: self._load_suppressions(pid2filename[pid]) finally: del pid2filename[pid] fd, tmpfilename = mkstemp() os.close(fd) pid = os.fork() if pid: pid2filename[pid] = tmpfilename else: signal.signal(signal.SIGTERM, signal.SIG_DFL) patcher.monkey_patch(all=False, socket=True) self.no_changes = 0 self.successes = 0 self.failures = 0 self.new_account_suppressions = open(tmpfilename, 'w') forkbegin = time.time() self.container_sweep(path) elapsed = time.time() - forkbegin self.logger.debug( _('Container update sweep of %(path)s completed: ' '%(elapsed).02fs, %(success)s successes, %(fail)s ' 'failures, %(no_change)s with no changes'), { 'path': path, 'elapsed': elapsed, 'success': self.successes, 'fail': self.failures, 'no_change': self.no_changes }) sys.exit() while pid2filename: pid = os.wait()[0] try: self._load_suppressions(pid2filename[pid]) finally: del pid2filename[pid] elapsed = time.time() - begin self.logger.info(_('Container update sweep completed: %.02fs'), elapsed) dump_recon_cache({'container_updater_sweep': elapsed}, self.rcache, self.logger) if elapsed < self.interval: time.sleep(self.interval - elapsed)
#!/usr/bin/env python import os if os.environ.get('EVENTLET'): from eventlet.patcher import monkey_patch monkey_patch() import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "inthe_am.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
__test__ = False if __name__ == "__main__": import MySQLdb as m from eventlet import patcher from eventlet.green import MySQLdb as gm patcher.monkey_patch(all=True, MySQLdb=True) patched_set = set(patcher.already_patched) - set(["psycopg"]) assert patched_set == frozenset(["MySQLdb", "os", "select", "socket", "thread", "time"]) assert m.connect == gm.connect print("pass")
#!/usr/bin/env python import os if os.environ.get('EVENTLET'): from eventlet.patcher import monkey_patch monkey_patch() import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "inthe_am.settings") from django.core.management import execute_from_command_line from django.conf import settings if settings.DEBUG: if os.environ.get('RUN_MAIN') or os.environ.get('WERKZEUG_RUN_MAIN'): import ptvsd ptvsd.enable_attach(address=('0.0.0.0', 3000)) execute_from_command_line(sys.argv)