def profile(function: Callable, profile_fname: str, filter: str = "", period=0.0001, **kwargs): """ Profile a given function with ``vmprof``. Args: function (Callable): function to profile profile_fname (str): path where to save profile (`.txt` file will be saved with line profile) filter (str, optional): filter name (e.g. module name) to filter profile. Defaults to "". period (float, optional): frequency of calling profiler in seconds. Defaults to 0.0001. """ import vmprof from vmprof.show import LinesPrinter # profiler config with open(profile_fname, "wb+") as fd: # start profiler vmprof.enable(fd.fileno(), lines=True, period=period) # run function function(**kwargs) # stop profiler vmprof.disable() # write report to disk if kwargs.get("lines", True): with open(f"{os.path.splitext(profile_fname)[0]}.txt", "w") as f: with redirect_stdout(f): LinesPrinter(filter=filter).show(profile_fname)
def main(): args = vmprof.cli.parse_args(sys.argv[1:]) if args.web: output_mode = OUTPUT_WEB elif args.output: output_mode = OUTPUT_FILE else: output_mode = OUTPUT_CLI if output_mode == OUTPUT_FILE: prof_file = args.output prof_name = prof_file.name else: prof_file = tempfile.NamedTemporaryFile(delete=False) prof_name = prof_file.name vmprof.enable(prof_file.fileno(), args.period, args.mem) try: sys.argv = [args.program] + args.args sys.path.insert(0, os.path.dirname(args.program)) runpy.run_path(args.program, run_name='__main__') except BaseException as e: if not isinstance(e, (KeyboardInterrupt, SystemExit)): raise vmprof.disable() prof_file.close() show_stats(prof_name, output_mode, args) if output_mode != OUTPUT_FILE: os.unlink(prof_name)
def test_is_enabled(self): assert vmprof.is_enabled() == False tmpfile = tempfile.NamedTemporaryFile(delete=False) vmprof.enable(tmpfile.fileno()) assert vmprof.is_enabled() == True vmprof.disable() assert vmprof.is_enabled() == False
def test_basic(): tmpfile = tempfile.NamedTemporaryFile(delete=False) vmprof.enable(tmpfile.fileno()) function_foo() vmprof.disable() tmpfile.close() assert b"function_foo" in gzip.GzipFile(tmpfile.name).read()
def main(): args = vmprof.cli.parse_args(sys.argv[1:]) if args.web: output_mode = OUTPUT_WEB elif args.output: output_mode = OUTPUT_FILE else: output_mode = OUTPUT_CLI if output_mode == OUTPUT_FILE: prof_file = args.output else: prof_file = tempfile.NamedTemporaryFile() vmprof.enable(prof_file.fileno(), args.period) try: sys.argv = [args.program] + args.args runpy.run_path(args.program, run_name='__main__') except BaseException as e: if not isinstance(e, (KeyboardInterrupt, SystemExit)): raise vmprof.disable() show_stats(prof_file.name, output_mode, args)
def test_basic(): tmpfile = tempfile.NamedTemporaryFile(delete=False) vmprof.enable(tmpfile.fileno()) function_foo() vmprof.disable() tmpfile.close() assert b"function_foo" in open(tmpfile.name, 'rb').read()
def enable(self): if not self.is_enabled: if not os.path.exists(self.basepath): os.makedirs(self.basepath) self.file = tempfile.NamedTemporaryFile(delete=False, dir=self.basepath) vmprof.enable(self.file.fileno()) self.is_enabled = True
def after_reactor_started(): term_print('CROSSBAR[{}]:REACTOR_STARTED'.format(options.worker)) if _HAS_VMPROF and options.vmprof: outfn = os.path.join(options.cbdir, '.vmprof-worker-{}-{}.dat'.format(options.worker, os.getpid())) _vm_prof['outfd'] = os.open(outfn, os.O_RDWR | os.O_CREAT | os.O_TRUNC) vmprof.enable(_vm_prof['outfd'], period=0.01) term_print('CROSSBAR[{}]:VMPROF_ENABLED:{}'.format(options.worker, outfn))
def _enable_profiler(self): log.info("Enable Profiler") self._profiler_fd = os.open( PROFILER_FILE_NAME, os.O_RDWR | os.O_CREAT | os.O_TRUNC ) vmprof.enable(self._profiler_fd) self._profiler_running = True
def test_read_bit_by_bit(): tmpfile = tempfile.NamedTemporaryFile(delete=False) vmprof.enable(tmpfile.fileno()) function_foo() vmprof.disable() tmpfile.close() stats = read_profile(tmpfile.name) stats.get_tree()
def collect_profile(fname="profile.dat"): with open(fname, "w+b") as fobj: vmprof.enable(fobj.fileno(), memory=False) try: yield Profile(fname) finally: vmprof.disable()
def test_vmprof_show(): tmpfile = tempfile.NamedTemporaryFile(delete=False) vmprof.enable(tmpfile.fileno()) function_bar() vmprof.disable() tmpfile.close() pp = PrettyPrinter() pp.show(tmpfile.name)
def test_gzip_problem(): tmpfile = tempfile.NamedTemporaryFile(delete=False) vmprof.enable(tmpfile.fileno()) vmprof._gzip_proc.kill() function_foo() with py.test.raises(Exception) as exc_info: vmprof.disable() assert "Error while writing profile" in str(exc_info) tmpfile.close()
def f(*args): prefix = name.replace(' ', '_') + '-' suffix = '.vmprof' + "%d%d" % (sys.version_info[:2]) handle, _ = tempfile.mkstemp(suffix, prefix=prefix, dir=runner.args.temp) vmprof.enable(handle, lines=True) result = func(*args) vmprof.disable() os.close(handle) return result
def install_vmprof(name="thread"): cpid = multiprocessing.current_process().name ctid = threading.current_thread().name fname = "vmprof-{}-{}-{}-{}.dat".format(name, cpid, ctid, time.time()) flags = os.O_RDWR | os.O_CREAT | os.O_TRUNC outfd = os.open(fname, flags) vmprof.enable(outfd, period=0.01)
def main(): tk.utils.better_exceptions() tk.log.init(None) random.seed(1) np.random.seed(1) default_output_dir = pathlib.Path(tempfile.gettempdir()) / "tk-benchmark-data" parser = argparse.ArgumentParser() parser.add_argument("--load", action="store_true") parser.add_argument("--mask", action="store_true") parser.add_argument("--profile", action="store_true") parser.add_argument( "--output-dir", default=default_output_dir, type=pathlib.Path, help=f"確認用画像の出力先ディレクトリ (既定値: {default_output_dir})", ) args = parser.parse_args() base_dir = pathlib.Path(__file__).resolve().parent.parent.parent data_dir = base_dir / "pytoolkit" / "_test_data" args.output_dir.mkdir(parents=True, exist_ok=True) iterations = 128 if args.load: X = np.array([data_dir / "9ab919332a1dceff9a252b43c0fb34a0_m.jpg"] * batch_size) iterations //= 2 else: X = tk.ndimage.load(data_dir / "9ab919332a1dceff9a252b43c0fb34a0_m.jpg") X = np.tile(np.expand_dims(X, axis=0), (batch_size, 1, 1, 1)) if args.mask: y = X iterations //= 2 else: y = np.zeros((batch_size,)) dataset = tk.data.Dataset(X, y) data_loader = MyDataLoader(data_augmentation=True, mask=args.mask) ds, _ = data_loader.get_ds(dataset, shuffle=True) if args.profile: import vmprof with pathlib.Path("benchmark.prof").open("w+b") as fd: vmprof.enable(fd.fileno()) _run(ds, iterations=iterations) vmprof.disable() logger.info("example: vmprofshow --prune_percent=5 benchmark.prof") else: # 1バッチ分を保存 X_batch, _ = next(iter(ds)) for ix, x in enumerate(X_batch): tk.ndimage.save( args.output_dir / f"{ix}.png", np.clip(x, 0, 255).astype(np.uint8) ) # 適当にループして速度を見る _run(ds, iterations=iterations)
def test_read_bit_by_bit(): tmpfile = tempfile.NamedTemporaryFile(delete=False) vmprof.enable(tmpfile.fileno()) function_foo() vmprof.disable() tmpfile.close() with open(tmpfile.name, "rb") as f: period, profiles, virtual_symbols, interp_name = read_prof_bit_by_bit(f) stats = Stats(profiles, virtual_symbols, interp_name) stats.get_tree()
def test_get_profile_path(self): assert vmprof.get_profile_path() == None tmpfile = tempfile.NamedTemporaryFile(delete=False) vmprof.enable(tmpfile.fileno()) if not vmprof.get_profile_path() == tmpfile.name: with open(vmprof.get_profile_path(), 'rb') as fd1: with open(tmpfile.name, "rb") as fd2: assert fd1.read() == fd2.read() vmprof.disable() assert vmprof.get_profile_path() == None
def test_read_bit_by_bit(): tmpfile = tempfile.NamedTemporaryFile(delete=False) vmprof.enable(tmpfile.fileno()) function_foo() vmprof.disable() tmpfile.close() with open(tmpfile.name, 'rb') as f: period, profiles, virtual_symbols, interp_name = read_prof_bit_by_bit( f) stats = Stats(profiles, virtual_symbols, interp_name) stats.get_tree()
def __call__(self, environ, start_response): prof_file = tempfile.NamedTemporaryFile() vmprof.enable(prof_file.fileno(), 0.0005) self.application(environ, start_response) vmprof.disable() stats = vmprof.read_profile(prof_file.name) stats_log = self.stats(stats, getattr(settings, "VMPROF_EXCLUDE", None)) return HttpResponse("<pre>VMprof \n\n===========\n%s</pre>" % stats_log)
def handler(*_): nonlocal file, filename if file: vmprof.disable() file.close() file = None logger.info('vmprof saved to {}'.format(filename)) else: filename = _get_prof_filename(profile) file = open(filename, 'wb') logger.info('vmprof writing to {}'.format(filename)) vmprof.enable(file.fileno(), period=0.01)
def test_basic(): tmpfile = tempfile.NamedTemporaryFile(delete=False) vmprof.enable(tmpfile.fileno()) function_foo() vmprof.disable() tmpfile.close() if GZIP: assert b"function_foo" in gzip.GzipFile(tmpfile.name).read() else: with open(tmpfile.name, 'rb') as file: content = file.read() assert b"function_foo" in content
def test_gzip_problem(): tmpfile = tempfile.NamedTemporaryFile(delete=False) vmprof.enable(tmpfile.fileno()) vmprof._gzip_proc.kill() vmprof._gzip_proc.wait() # ensure that the gzip process really tries to write # to the gzip proc that was killed function_foo() with py.test.raises(Exception) as exc_info: vmprof.disable() assert "Error while writing profile" in str(exc_info) tmpfile.close()
def __call__(self, environ, start_response): prof_file = tempfile.NamedTemporaryFile() vmprof.enable(prof_file.fileno(), 0.0005) self.application(environ, start_response) vmprof.disable() stats = vmprof.read_profile(prof_file.name) stats_log = self.stats(stats, getattr(settings, 'VMPROF_EXCLUDE', None)) return HttpResponse("<pre>VMprof \n\n===========\n%s</pre>" % stats_log)
def start(self): if self.output_mode == OUTPUT_FILE: self.prof_file = open(self.prof_name, "w+b") else: self.prof_file = tempfile.NamedTemporaryFile(delete=False) self.prof_name = self.prof_file.name if self.jitlog and _jitlog: self.jitlog_fd = os.open(self.prof_file + ".jitlog", os.O_WRONLY | os.O_TRUNC | os.O_CREAT) _jitlog.enable(self.jitlog_fd) vmprof.enable(self.prof_file.fileno(), self.period, self.mem, self.lines)
def vmprof_signal_handler(signal, frame): import vmprof curr_proc = multiprocessing.current_process() logger = logging.getLogger('eyrie.script.profile') if getattr(curr_proc, 'vmprof_enabled', False): logger.warn('Disabling vmprof, output path: %s', curr_proc.profile_output_path) vmprof.disable() curr_proc.vmprof_enabled = False else: fileno, output_path = mkstemp(dir=curr_proc.profile_output_dir) curr_proc.profile_output_path = output_path logger.warn('Enabling vmprof, output path: %s', output_path) vmprof.enable(fileno) curr_proc.vmprof_enabled = True
def enable(self, request: django.http.HttpRequest): self.profile.started_at = django.utils.timezone.now() self.profile.request_user_id = request.user.id self.profile.request_path = request.path self.cpu_real = time.monotonic() self.cpu_time = self.process.cpu_times() self.mem_info = self.process.memory_full_info() # profile at ~100 Hz, asking for more is asking for trouble # the web view has no support for lines and memory views # native currently fails when trying to unload a library on next request # real_time is necessary for our use case - otherwise we'll only profile CPU time, which we don't care about vmprof.enable(self.profile_file.fileno(), period=0.0099, lines=False, memory=False, native=False, real_time=True)
def enable(self, request: django.http.HttpRequest): self.profile.started_at = django.utils.timezone.now() self.profile.request_user_id = request.user.id self.profile.request_path = request.path self.cpu_real = time.monotonic() self.cpu_time = self.process.cpu_times() self.mem_info = self.process.memory_full_info() vmprof.enable(self.profile_file.fileno(), period=0.00099, lines=False, memory=False, native=True, real_time=True)
def test_line_profiling(): tmpfile = tempfile.NamedTemporaryFile(delete=False) vmprof.enable(tmpfile.fileno(), lines=True, native=False) # enable lines profiling function_foo() vmprof.disable() tmpfile.close() def walk(tree): assert len(tree.lines) >= len(tree.children) for v in six.itervalues(tree.children): walk(v) stats = read_profile(tmpfile.name) walk(stats.get_tree())
def test_profile_get_repo_files(annex_path, new_dataset): ds_id = os.path.basename(new_dataset.path) ds = Dataset(str(annex_path.join(ds_id))) for each in range(5000): filename = 'file-{}'.format(each) path = os.path.join(new_dataset.path, filename) with open(path, 'a'): os.utime(path) # Add all generated files ds.add('.') # Profile get_repo_files by itself with open('{}.prof'.format(__name__), 'w+b') as fd: vmprof.enable(fd.fileno()) for n in range(1): get_repo_files(ds) vmprof.disable()
def test_line_profiling(): filename = "/Users/palpant/test.vmprof" with open(filename, "wb+") as fd: vmprof.enable(fd.fileno(), lines=True, native=False) # enable lines profiling function_foo() vmprof.disable() def walk(tree): assert len(tree.lines) >= len(tree.children) for v in six.itervalues(tree.children): walk(v) stats = read_profile(filename) walk(stats.get_tree())
def main(): args = vmprof.cli.parse_args(sys.argv[1:]) # None means default on this platform native = None if args.no_native: native = False if args.web: output_mode = OUTPUT_WEB elif args.output: output_mode = OUTPUT_FILE else: output_mode = OUTPUT_CLI if output_mode == OUTPUT_FILE: prof_file = args.output prof_name = prof_file.name else: prof_file = tempfile.NamedTemporaryFile(delete=False) prof_name = prof_file.name vmprof.enable(prof_file.fileno(), args.period, args.mem, args.lines, native=native) if args.jitlog and _jitlog: fd = os.open(prof_name + '.jit', os.O_WRONLY | os.O_TRUNC | os.O_CREAT) _jitlog.enable(fd) # invoke the user program: try: sys.argv = [args.program] + args.args sys.path.insert(0, os.path.dirname(args.program)) runpy.run_path(args.program, run_name='__main__') except BaseException as e: if not isinstance(e, (KeyboardInterrupt, SystemExit)): raise # vmprof.disable() if args.jitlog and _jitlog: _jitlog.disable() prof_file.close() show_stats(prof_name, output_mode, args) if output_mode != OUTPUT_FILE: os.unlink(prof_name)
def test_line_profiling(): tmpfile = tempfile.NamedTemporaryFile(delete=False) vmprof.enable(tmpfile.fileno(), lines=True) # enable lines profiling function_foo() vmprof.disable() tmpfile.close() def walk(tree): assert len(tree.lines) >= len(tree.children) for v in six.itervalues(tree.children): walk(v) with open(tmpfile.name, "rb") as f: period, profiles, virtual_symbols, interp_name = read_prof_bit_by_bit(f) stats = Stats(profiles, virtual_symbols, interp_name) walk(stats.get_tree())
def main(): with tempfile.NamedTemporaryFile() as prof_file: vmprof.enable(prof_file.fileno(), 0.001) try: program = sys.argv[1] del sys.argv[1] # sys.argv = [args.program] + args.args runpy.run_path(program, run_name="__main__") except (KeyboardInterrupt, SystemExit): pass vmprof.disable() stats = vmprof.read_profile(prof_file.name, virtual_only=True) show(stats)
def test_line_profiling(): tmpfile = tempfile.NamedTemporaryFile(delete=False) vmprof.enable(tmpfile.fileno(), lines=True) # enable lines profiling function_foo() vmprof.disable() tmpfile.close() def walk(tree): assert len(tree.lines) >= len(tree.children) for v in six.itervalues(tree.children): walk(v) with open(tmpfile.name, 'rb') as f: period, profiles, virtual_symbols, interp_name = read_prof_bit_by_bit( f) stats = Stats(profiles, virtual_symbols, interp_name) walk(stats.get_tree())
def main(): with tempfile.NamedTemporaryFile() as prof_file: vmprof.enable(prof_file.fileno(), 0.001) try: program = sys.argv[1] del sys.argv[1] # sys.argv = [args.program] + args.args runpy.run_path(program, run_name="__main__") except BaseException as e: if not isinstance(e, (KeyboardInterrupt, SystemExit)): raise vmprof.disable() stats = vmprof.read_profile(prof_file.name, virtual_only=True) show(stats)
def profile_vmprof(name, env): if vmprof is None: print('vmprof not found. Please install vmprof and try again.') return func = create_bench(name, env) gc.collect() # # Based on: https://github.com/vmprof/vmprof-python/blob/master/vmprof/__main__.py # prof_file = tempfile.NamedTemporaryFile(delete=False) filename = prof_file.name vmprof.enable(prof_file.fileno()) try: for __ in range(1000000): func() except BaseException as e: if not isinstance(e, (KeyboardInterrupt, SystemExit)): raise vmprof.disable() service = Service('vmprof.com') service.post({ Service.FILE_CPU_PROFILE: filename, Service.FILE_JIT_PROFILE: filename + '.jit', 'argv': ' '.join(sys.argv[:]), 'VM': platform.python_implementation(), }) prof_file.close()
def main(): parser = create_argument_parser() args = parser.parse_args() if args.web: output_mode = OUTPUT_WEB elif args.output: output_mode = OUTPUT_FILE else: output_mode = OUTPUT_CLI if output_mode == OUTPUT_FILE: prof_file = args.output else: prof_file = tempfile.NamedTemporaryFile() vmprof.enable(prof_file.fileno(), args.period) try: sys.argv = [args.program] + args.args runpy.run_path(args.program, run_name='__main__') except BaseException, e: if not isinstance(e, (KeyboardInterrupt, SystemExit)): raise
def test_basic(): tmpfile = tempfile.NamedTemporaryFile() vmprof.enable(tmpfile.fileno()) function_foo() vmprof.disable() assert b"function_foo" in open(tmpfile.name, 'rb').read()
def start(self, runtime=10): """ Start profiling with VMprof for the given duration. """ if self._state != Profiler.STATE_STOPPED: raise Exception("profile currently not stopped - cannot start") self._profile_filename = os.path.join(self._profile_dir, "cb_vmprof_{}_{}.dat".format(os.getpid(), utcnow())) profile_fd = os.open(self._profile_filename, os.O_RDWR | os.O_CREAT | os.O_TRUNC) vmprof.enable(profile_fd, period=0.01) self._state = Profiler.STATE_RUNNING self._finished = Deferred() self._profile_id = newid() # this will run on a background thread def convert_profile(profile_filename): self.log.info("Converting profile file {}".format(profile_filename)) try: stats = vmprof.read_profile(profile_filename, virtual_only=True, include_extra_info=True) except Exception as e: self.log.error("Fatal: could not read vmprof profile file '{}': {}".format(profile_filename, e)) raise e tree = stats.get_tree() total = float(tree.count) res = [] def process_node(parent, node, level): parent_name = parent.name if parent else None perc = round(100. * float(node.count) / total, 1) if parent and parent.count: perc_of_parent = round(100. * float(node.count) / float(parent.count), 1) else: perc_of_parent = 100. parts = node.name.count(':') if parts == 3: block_type, funname, funline, filename = node.name.split(':') res.append( { u'type': u'py', u'level': level, u'parent': parent_name, u'fun': funname, u'filename': filename, u'dirname': os.path.dirname(filename), u'basename': os.path.basename(filename), u'line': funline, u'perc': perc, u'perc_of_parent': perc_of_parent, u'count': node.count, u'parent_count': parent.count if parent else None, }) elif parts == 1: block_type, funname = node.name.split(':') res.append( { u'type': u'jit', u'level': level, u'parent': parent_name, u'fun': funname, u'perc': perc, u'perc_of_parent': perc_of_parent, u'count': node.count, u'parent_count': parent.count if parent else None, }) else: raise Exception("fail!") self._walk_tree(None, tree, 0, process_node) return res def finish_profile(): vmprof.disable() self.log.info("Profile created under {filename}", filename=self._profile_filename) # now defer to thread conversion d = deferToThread(convert_profile, self._profile_filename) def on_profile_converted(res): self.log.info("Profile data with {count} log entries generated", count=len(res)) self._finished.callback(res) def on_profile_conversaion_failed(err): self.log.error(err.value) self._finished.errback(err) d.addCallbacks(on_profile_converted, on_profile_conversaion_failed) def cleanup(res): # reset state self._state = Profiler.STATE_STOPPED self._profile_filename = None self._started = None self._finished = None self._profile_id = None d.addBoth(cleanup) self.log.info("Starting profiling using {profiler} for {runtime} seconds.", profiler=self._id, runtime=runtime) reactor.callLater(runtime, finish_profile) return self._profile_id, self._finished
def run(): """ Entry point into (native) worker processes. This wires up stuff such that a worker instance is talking WAMP-over-stdio to the node controller. """ import os import sys import platform import signal # Ignore SIGINT so we get consistent behavior on control-C versus # sending SIGINT to the controller process. When the controller is # shutting down, it sends TERM to all its children but ctrl-C # handling will send a SIGINT to all the processes in the group # (so then the controller sends a TERM but the child already or # will very shortly get a SIGINT as well). Twisted installs signal # handlers, but not for SIGINT if there's already a custom one # present. def ignore(sig, frame): log.debug("Ignoring SIGINT in worker.") signal.signal(signal.SIGINT, ignore) # create the top-level parser # import argparse parser = argparse.ArgumentParser() parser.add_argument('--reactor', default=None, choices=['select', 'poll', 'epoll', 'kqueue', 'iocp'], help='Explicit Twisted reactor selection (optional).') parser.add_argument('--loglevel', default="info", choices=['none', 'error', 'warn', 'info', 'debug', 'trace'], help='Initial log level.') parser.add_argument('-c', '--cbdir', type=str, help="Crossbar.io node directory (required).") parser.add_argument('-n', '--node', type=str, help='Crossbar.io node ID (required).') parser.add_argument('-w', '--worker', type=str, help='Crossbar.io worker ID (required).') parser.add_argument('-r', '--realm', type=str, help='Crossbar.io node (management) realm (required).') parser.add_argument('-t', '--type', choices=['router', 'container'], help='Worker type (required).') parser.add_argument('--title', type=str, default=None, help='Worker process title to set (optional).') options = parser.parse_args() # make sure logging to something else than stdio is setup _first_ # from crossbar._logging import make_JSON_observer, cb_logging_aware, _stderr from crossbar._logging import make_logger, log_publisher, start_logging from crossbar._logging import set_global_log_level # Set the global log level set_global_log_level(options.loglevel) log = make_logger() # Print a magic phrase that tells the capturing logger that it supports # Crossbar's rich logging print(cb_logging_aware, file=_stderr) _stderr.flush() flo = make_JSON_observer(_stderr) log_publisher.addObserver(flo) start_logging() try: import setproctitle except ImportError: log.debug("Could not set worker process title (setproctitle not installed)") else: # set process title if requested to # if options.title: setproctitle.setproctitle(options.title) else: WORKER_TYPE_TO_TITLE = { 'router': 'crossbar-worker [router]', 'container': 'crossbar-worker [container]' } setproctitle.setproctitle(WORKER_TYPE_TO_TITLE[options.type].strip()) # we use an Autobahn utility to import the "best" available Twisted reactor # from autobahn.twisted.choosereactor import install_reactor reactor = install_reactor(options.reactor) from twisted.python.reflect import qual log.info("Worker running under {python}-{reactor}", python=platform.python_implementation(), reactor=qual(reactor.__class__).split('.')[-1]) options.cbdir = os.path.abspath(options.cbdir) os.chdir(options.cbdir) # log.msg("Starting from node directory {}".format(options.cbdir)) from crossbar.worker.router import RouterWorkerSession from crossbar.worker.container import ContainerWorkerSession WORKER_TYPE_TO_CLASS = { 'router': RouterWorkerSession, 'container': ContainerWorkerSession } from autobahn.twisted.websocket import WampWebSocketServerProtocol class WorkerServerProtocol(WampWebSocketServerProtocol): def connectionLost(self, reason): try: # this log message is unlikely to reach the controller (unless # only stdin/stdout pipes were lost, but not stderr) log.warn("Connection to node controller lost.") WampWebSocketServerProtocol.connectionLost(self, reason) except: pass finally: # losing the connection to the node controller is fatal: # stop the reactor and exit with error log.info("No more controller connection; shutting down.") reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1) try: reactor.stop() except ReactorNotRunning: pass try: # create a WAMP application session factory # from autobahn.twisted.wamp import ApplicationSessionFactory from autobahn.wamp.types import ComponentConfig session_config = ComponentConfig(realm=options.realm, extra=options) session_factory = ApplicationSessionFactory(session_config) session_factory.session = WORKER_TYPE_TO_CLASS[options.type] # create a WAMP-over-WebSocket transport server factory # from autobahn.twisted.websocket import WampWebSocketServerFactory transport_factory = WampWebSocketServerFactory(session_factory, "ws://localhost", debug=False, debug_wamp=False) transport_factory.protocol = WorkerServerProtocol transport_factory.setProtocolOptions(failByDrop=False) # create a protocol instance and wire up to stdio # from twisted.python.runtime import platform as _platform from twisted.internet import stdio proto = transport_factory.buildProtocol(None) if _platform.isWindows(): stdio.StandardIO(proto) else: stdio.StandardIO(proto, stdout=3) # now start reactor loop # if False: log.info("vmprof enabled.") import os import vmprof PROFILE_FILE = 'vmprof_{}.dat'.format(os.getpid()) outfd = os.open(PROFILE_FILE, os.O_RDWR | os.O_CREAT | os.O_TRUNC) vmprof.enable(outfd, period=0.01) log.info("Entering event loop...") reactor.run() vmprof.disable() else: log.debug("Entering event loop...") reactor.run() except Exception as e: log.info("Unhandled exception: {}".format(e)) if reactor.running: reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1) reactor.stop() else: sys.exit(1)
def __enter__(self): vmprof.enable(self.tmpfile.fileno(), 0.001)
def _run_command_exec_worker(options, reactor=None, personality=None): """ Entry point into (native) worker processes. This wires up stuff such that a worker instance is talking WAMP-over-stdio to the node controller. """ import os import sys import platform import signal # https://coverage.readthedocs.io/en/coverage-4.4.2/subprocess.html#measuring-sub-processes MEASURING_COVERAGE = False if 'COVERAGE_PROCESS_START' in os.environ: try: import coverage except ImportError: pass else: # The following will read the environment variable COVERAGE_PROCESS_START, # and that should be set to the .coveragerc file: # # export COVERAGE_PROCESS_START=${PWD}/.coveragerc # coverage.process_startup() MEASURING_COVERAGE = True # we use an Autobahn utility to import the "best" available Twisted reactor from autobahn.twisted.choosereactor import install_reactor reactor = install_reactor(options.reactor) # make sure logging to something else than stdio is setup _first_ from crossbar._logging import make_JSON_observer, cb_logging_aware from txaio import make_logger, start_logging from twisted.logger import globalLogPublisher from twisted.python.reflect import qual log = make_logger() # Print a magic phrase that tells the capturing logger that it supports # Crossbar's rich logging print(cb_logging_aware, file=sys.__stderr__) sys.__stderr__.flush() flo = make_JSON_observer(sys.__stderr__) globalLogPublisher.addObserver(flo) # Ignore SIGINT so we get consistent behavior on control-C versus # sending SIGINT to the controller process. When the controller is # shutting down, it sends TERM to all its children but ctrl-C # handling will send a SIGINT to all the processes in the group # (so then the controller sends a TERM but the child already or # will very shortly get a SIGINT as well). Twisted installs signal # handlers, but not for SIGINT if there's already a custom one # present. def ignore(sig, frame): log.debug("Ignoring SIGINT in worker.") signal.signal(signal.SIGINT, ignore) # actually begin logging start_logging(None, options.loglevel) # get personality klass, eg "crossbar.personality.Personality" l = options.personality.split('.') personality_module, personality_klass = '.'.join(l[:-1]), l[-1] # now load the personality module and class _mod = importlib.import_module(personality_module) Personality = getattr(_mod, personality_klass) # get worker klass, eg "crossbar.worker.container.ContainerController" l = options.klass.split('.') worker_module, worker_klass = '.'.join(l[:-1]), l[-1] # now load the worker module and class _mod = importlib.import_module(worker_module) klass = getattr(_mod, worker_klass) log.info( 'Starting worker "{worker_id}" for node "{node_id}" with personality "{personality}" {worker_class}', worker_id=options.worker, node_id=options.node, personality=Personality.NAME, worker_class=hltype(klass), ) log.info( 'Running as PID {pid} on {python}-{reactor}', pid=os.getpid(), python=platform.python_implementation(), reactor=qual(reactor.__class__).split('.')[-1], ) if MEASURING_COVERAGE: log.info(hl('Code coverage measurements enabled (coverage={coverage_version}).', color='green', bold=True), coverage_version=coverage.__version__) # set process title if requested to # try: import setproctitle except ImportError: log.debug("Could not set worker process title (setproctitle not installed)") else: if options.title: setproctitle.setproctitle(options.title) else: setproctitle.setproctitle('crossbar-worker [{}]'.format(options.klass)) # node directory # options.cbdir = os.path.abspath(options.cbdir) os.chdir(options.cbdir) # log.msg("Starting from node directory {}".format(options.cbdir)) # set process title if requested to # try: import setproctitle except ImportError: log.debug("Could not set worker process title (setproctitle not installed)") else: if options.title: setproctitle.setproctitle(options.title) else: setproctitle.setproctitle( 'crossbar-worker [{}]'.format(options.klass) ) from twisted.internet.error import ConnectionDone from autobahn.twisted.websocket import WampWebSocketServerProtocol class WorkerServerProtocol(WampWebSocketServerProtocol): def connectionLost(self, reason): # the behavior here differs slightly whether we're shutting down orderly # or shutting down because of "issues" if isinstance(reason.value, ConnectionDone): was_clean = True else: was_clean = False try: # this log message is unlikely to reach the controller (unless # only stdin/stdout pipes were lost, but not stderr) if was_clean: log.info("Connection to node controller closed cleanly") else: log.warn("Connection to node controller lost: {reason}", reason=reason) # give the WAMP transport a change to do it's thing WampWebSocketServerProtocol.connectionLost(self, reason) except: # we're in the process of shutting down .. so ignore .. pass finally: # after the connection to the node controller is gone, # the worker is "orphane", and should exit # determine process exit code if was_clean: exit_code = 0 else: exit_code = 1 # exit the whole worker process when the reactor has stopped reactor.addSystemEventTrigger('after', 'shutdown', os._exit, exit_code) # stop the reactor try: reactor.stop() except ReactorNotRunning: pass try: # define a WAMP application session factory # from autobahn.wamp.types import ComponentConfig def make_session(): session_config = ComponentConfig(realm=options.realm, extra=options) session = klass(config=session_config, reactor=reactor, personality=Personality) return session # create a WAMP-over-WebSocket transport server factory # from autobahn.twisted.websocket import WampWebSocketServerFactory transport_factory = WampWebSocketServerFactory(make_session, u'ws://localhost') transport_factory.protocol = WorkerServerProtocol transport_factory.setProtocolOptions(failByDrop=False) # create a protocol instance and wire up to stdio # from twisted.python.runtime import platform as _platform from twisted.internet import stdio proto = transport_factory.buildProtocol(None) if _platform.isWindows(): stdio.StandardIO(proto) else: stdio.StandardIO(proto, stdout=3) # now start reactor loop # if False: log.info("vmprof enabled.") import os import vmprof PROFILE_FILE = 'vmprof_{}.dat'.format(os.getpid()) outfd = os.open(PROFILE_FILE, os.O_RDWR | os.O_CREAT | os.O_TRUNC) vmprof.enable(outfd, period=0.01) log.info(hl('Entering event reactor ...', color='cyan', bold=True)) reactor.run() vmprof.disable() else: log.info(hl('Entering event reactor ...', color='cyan', bold=True)) reactor.run() except Exception as e: log.info("Unhandled exception: {e}", e=e) if reactor.running: reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1) reactor.stop() else: sys.exit(1)
# JSON serializer is always available serializers.append(serializer.JsonSerializer()) #serializers.append(serializer.JsonSerializer(batched=True)) # MsgPack serializers are optional if hasattr(serializer, 'MsgPackSerializer'): serializers.append(serializer.MsgPackSerializer()) #serializers.append(serializer.MsgPackSerializer(batched=True)) else: print("MsgPack not installed (pip install msgpack-python)") for test in tests: for ser in serializers: print("Running {} on serializer {} for {} seconds ..".format(test.__name__, ser.__class__, RUNTIME)) profile = PROFILE_FILE.format(test.__name__, ser.SERIALIZER_ID) outfd = os.open(profile, os.O_RDWR | os.O_CREAT | os.O_TRUNC) vmprof.enable(outfd, period=0.01) runtime, total_bytes, total_cnt = test(ser, RUNTIME) vmprof.disable() avg_msg_len = round(float(total_bytes) / float(total_cnt)) print("Processed {} messages in {} secs ({} total bytes serialized) at {} msgs/sec ({} bytes avg.)".format(total_cnt, round(runtime, 1), total_bytes, round(float(total_cnt) / runtime), avg_msg_len)) print("Profile written to {}.".format(profile)) print("To view the profile, run: vmprofshow {} --indent=2 --prune_percent=5".format(profile)) print
def __enter__(self): vmprof.enable(self.tmpfile.fileno(), self.period, self.memory, native=self.native, real_time=self.real_time)