def _initCoverage(): if 'COVERAGE_PROCESS_START' in os.environ: try: import coverage coverage.process_startup() except ImportError: pass
def startVesperInstance(port, queue): try: import coverage, sys, signal, atexit coverage.process_startup() def safeterminate(num, frame): #coverage registers an atexit function #so have atexit functions called when terminating atexit._run_exitfuncs() #for some reason sys.exit isn't calling this sys.exit() signal.signal(signal.SIGTERM, safeterminate) except ImportError: pass @app.Action def sendServerStartAction(kw, retVal): # print "startReplication callback!" queue.put('server ready') @Route('testresult')#, REQUEST_METHOD='POST') def handleTestresult(kw, retval): queue.put(json.loads(kw._postContent)) kw._responseHeaders['Content-Type'] = 'application/json' return '"OK"' tmpdir = tempfile.gettempdir() print "creating vesper instance on port %d" % (port),'tmp at', tmpdir app.createApp(__name__, 'vesper.web.admin', port=port, storage_url="mem://", static_path='browser', actions = {'load-model':[sendServerStartAction]}, template_path='browser/templates', mako_module_dir = os.path.join(tmpdir, 'browserTest_makomodules') ,logconfig=logconfig ).run()
def main(): import sys, os, socket test_root = os.path.dirname(os.path.abspath(__file__)) os.chdir(test_root) sys.path.insert(0, os.path.dirname(test_root)) sys.path.insert(0, test_root) try: server = sys.argv[1] port = int(sys.argv[2]) if server == 'gevent': from gevent import monkey monkey.patch_all() elif server == 'eventlet': import eventlet eventlet.monkey_patch() try: import coverage coverage.process_startup() except ImportError: pass from bottle import route, run route('/test', callback=lambda: 'OK') run(port=port, server=server, quiet=True) except socket.error: sys.exit(3) except ImportError: sys.exit(128) except KeyboardInterrupt: pass
def start_parallel_coverage(): """ Start the coverage process manually for multi-process measurement. """ import coverage coverage.process_startup()
def startVesperInstance(trunk_id, nodeId, port, queueHost, queuePort, channel): try: import coverage, sys, signal, atexit coverage.process_startup() def safeterminate(num, frame): # coverage registers an atexit function # so have atexit functions called when terminating atexit._run_exitfuncs() # for some reason sys.exit isn't calling this sys.exit() signal.signal(signal.SIGTERM, safeterminate) except ImportError: pass print "creating vesper instance:%s (%s:%d)" % (nodeId, queueHost, port) # assume remote queue implements message ack sendAck = True if startQueue is startMorbidQueue: # morbid doesn't support stomp ack sendAck = False conf = { "storage_url": "mem://", "save_history": True, "trunk_id": trunk_id, "branch_id": nodeId, "replication_hosts": [(queueHost, queuePort)], "replication_channel": channel, "send_stomp_ack": sendAck, "actions": {"http-request": route.gensequence}, } app.createApp(baseapp="miniserver.py", model_uri="test:", port=port, **conf).run()
def test_end_to_end(self, input: str, keypair: ElGamalKeyPair, use_keypair: bool) -> None: coverage.process_startup( ) # necessary for coverage testing to work in parallel cvrs = read_dominion_csv(StringIO(input)) self.assertIsNotNone(cvrs) _, ballots, _ = cvrs.to_election_description() assert len(ballots) > 0, "can't have zero ballots!" if use_keypair: tally = fast_tally_everything(cvrs, self.pool, verbose=True, secret_key=keypair.secret_key) else: tally = fast_tally_everything(cvrs, self.pool, verbose=True) self.assertTrue(tally.all_proofs_valid(verbose=True)) # Now, while we've got a tally and a set of cvrs, we'll test some of the other utility # methods that we have. This is going to be much faster than regenerating cvrs and tallies. # TODO: tests for get_contest_titles_matching and get_ballot_styles_for_contest_titles for ballot_style in cvrs.metadata.style_map.keys(): ballots_query = tally.get_ballots_matching_ballot_styles( [ballot_style]) ballots_pandas = cvrs.data[cvrs.data.BallotType == ballot_style] self.assertEqual(len(ballots_pandas), len(ballots_query))
def trySetupCoverage( ): # pragma: no cover - can hardly measure coverage here :) try: import coverage coverage.process_startup( ) # doesn't do anything unless COVERAGE_PROCESS_START is set except Exception: pass
def start_coverage(): if not in_coverage_mode(): return os.environ['COVERAGE_PROCESS_START'] = '' try: import coverage coverage.process_startup() except ImportError: pass
def main(): # type: () -> None """ Main mpi piper worker :return: None """ # Configure the global tracing variable from the argument global TRACING global WORKER_CONF global CACHE_IDS global CACHE_QUEUE TRACING = (int(sys.argv[4]) > 0) # Enable coverage if performed if "COVERAGE_PROCESS_START" in os.environ: import coverage coverage.process_startup() # Configure the piper worker with the arguments WORKER_CONF = PiperWorkerConfiguration() WORKER_CONF.update_params(sys.argv) persistent_storage = (WORKER_CONF.storage_conf != 'null') _, _, _, log_dir = load_loggers(WORKER_CONF.debug, persistent_storage) cache_profiler = False if WORKER_CONF.cache_profiler.lower() == 'true': cache_profiler = True if is_worker(): # Setup cache if is_cache_enabled(str(WORKER_CONF.cache)): # Deploy the necessary processes cache = True cache_params = start_cache(None, str(WORKER_CONF.cache), cache_profiler, log_dir) else: # No cache cache = False cache_params = (None, None, None, None) # type: ignore else: # Otherwise it is an executor cache = False # to stop only the cache from the main process cache_params = (None, None, None, None) # type: ignore smm, cache_process, CACHE_QUEUE, CACHE_IDS = cache_params if is_worker(): with trace_mpi_worker() if TRACING else dummy_context(): compss_persistent_worker(WORKER_CONF) else: with trace_mpi_executor() if TRACING else dummy_context(): compss_persistent_executor(WORKER_CONF) if cache and is_worker(): stop_cache(smm, CACHE_QUEUE, cache_profiler, cache_process) # noqa
def sync_world_listener(host, port, queue, state, stats, level, name): try: import coverage coverage.process_startup() except ImportError: pass logging.basicConfig(level=level) wl = SyncWorldListener(host, port, queue, state, stats, name) wl.run()
def setup_coverage() -> None: try: import coverage # The module may be missing during early stage setup, no need to abort everything. except ImportError as ex: print('COVERAGE NOT CONFIGURED:', ex, file=sys.stderr) else: # Coverage configuration; see https://coverage.readthedocs.io/en/coverage-4.2/subprocess.html # This is kind of a big gun because it makes us track coverage of everything we run, even doc generation, # but it's acceptable. os.environ['COVERAGE_PROCESS_START'] = str(OWN_PATH.parent / 'setup.cfg') coverage.process_startup()
def init_coverage (extension=None): # Child-process coverage support if os.environ.has_key("COVERAGE_PROCESS_START"): # Set destination file init_coverage_file (extension) # Import module try: import coverage coverage.process_startup() except ImportError: pass
def run(self): """Start up the server.""" subprocess.run(["flask", "db", "upgrade"], check=True) # pylint: disable=import-outside-toplevel import coverage coverage.process_startup() # pylint: disable=import-outside-toplevel from animeu.app import app self.app = app self.app.config["SERVER_NAME"] = f"{self.host}:{self.port}" self.server = wsgi.Server((self.host, self.port), self.app, max=1) self.server.start()
def main(): import os if os.getenv("TRAVIS", None) or os.getenv("FORCE_COVERAGE", None): # Enable coverage if it is Travis-CI or env variable FORCE_COVERAGE set to true import coverage coverage.process_startup() cli = Cli() cli.parse_args() print(cli.run())
def run_tests(self): if self.coverage and self.start_coverage: import coverage p = current_process() p._coverage = coverage.Coverage(data_suffix=True) coverage.process_startup() p._coverage.start() from pulsar.apps.test import TestSuite params = self.get_test_parameters() test_suite = TestSuite(argv=self.test_args, **params) test_suite.start()
def init_coverage(extension=None): # Child-process coverage support if os.environ.has_key("COVERAGE_PROCESS_START"): # Set destination file init_coverage_file(extension) # Import module try: import coverage coverage.process_startup() except ImportError: pass
def subproc(): if os.getenv('COVERAGE_PROCESS_START'): import coverage coverage.process_startup() def handler(signum, frame): sys.exit(_IT_WORKS) install_shutdown_handlers(handler) print('start') count = 0 while count < 5: time.sleep(1) count += 1
def run_tests(args): extra_pythonpath = args[1] sys.path.append(extra_pythonpath) LOGGER.info("Appending extra PYTHONPATH %s", extra_pythonpath) import coverage coverage.process_startup() # I split this into separate function to increase coverage # ever so slightly. # I am not clear why, but it seems that coverage misses out on lines # within the same function as coverage.process_startup() got called. # Caling into another function seems to help it. _run_tests(args)
def _bootstrap_coverage(): logger = logging.getLogger('__cpy2py__.bootstrap.plugin.coverage') try: import coverage logger.info('plugin coverage available') except ImportError: logger.warning('plugin coverage unavailable') else: coverage.process_startup() if hasattr(coverage.process_startup, "done"): logger.info('plugin coverage enabled') else: logger.info('plugin coverage disabled')
def deploy_models(self, username: str, password: str): repo_dir = os.path.abspath(os.path.dirname(tabpy.__file__)) path = os.path.join(repo_dir, 'models', 'deploy_models.py') with open(self.tmp_dir + '/deploy_models_output.txt', 'w') as outfile: outfile.write(f'--<< Running {self.py} {path} ' f'{self._get_config_file_name()} >>--\n') input_string = f'{username}\n{password}\n' outfile.write(f'--<< Input = {input_string} >>--') coverage.process_startup() p = subprocess.run( [self.py, path, self._get_config_file_name()], input=input_string.encode('utf-8'), stdout=outfile, stderr=outfile)
def create_app(self): """Create the test server application instance.""" coverage.process_startup() app = create_test_app() selenium_server_url = "http://{}:{}/wd/hub".format( os.environ.get("{{ cookiecutter.project_slug|upper }}_SELENIUM_HOST", "chrome"), os.environ.get("{{ cookiecutter.project_slug|upper }}_SELENIUM_PORT", "4444"), ) self.browser = Remote( command_executor=selenium_server_url, desired_capabilities=DesiredCapabilities.CHROME.copy(), ) self.browser.implicitly_wait(3) return app
def _ipc_recv(logfilename, queue, state, level, retries=10): """The only way for bots to send message back is through the log file we have standardized our log lines so we know which bot is sending us a message """ try: import coverage coverage.process_startup() except ImportError: pass logging.basicConfig(level=level) recv = IPCRecv(logfilename, queue, state) recv.connect() recv.run()
def deploy_models(self, username: str, password: str): repo_dir = os.path.abspath(os.path.dirname(tabpy.__file__)) path = os.path.join(repo_dir, "models", "deploy_models.py") with open(self.tmp_dir + "/deploy_models_output.txt", "w") as outfile: outfile.write(f"--<< Running {self.py} {path} " f"{self._get_config_file_name()} >>--\n") input_string = f"{username}\n{password}\n" outfile.write(f"--<< Input = {input_string} >>--") coverage.process_startup() p = subprocess.run( [self.py, path, self._get_config_file_name()], input=input_string.encode("utf-8"), stdout=outfile, stderr=outfile, )
def run_with_coverage(): # pragma: no cover """ Invoked when `-c|--coverage` is used on the command line """ try: import coverage except ImportError: warnings.warn( 'Coverage data will not be generated because coverage is not installed. ' 'Please run `pip install coverage` and try again.') return coverage.process_startup() # need to register a shutdown handler for SIGTERM since it won't run the # atexit functions required by coverage signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(0))
def main() -> int: # Remove old run data out_dir = root_path / '.coverage' mesonlib.windows_proof_rmtree(out_dir.as_posix()) out_dir.mkdir(parents=True, exist_ok=True) # Setup coverage python_path = (root_path / 'ci').as_posix() os.environ['PYTHONPATH'] = os.pathsep.join( [python_path, os.environ.get('PYTHONPATH', '')]) os.environ['COVERAGE_PROCESS_START'] = generate_coveragerc().as_posix() coverage.process_startup() # Run the actual command cmd = mesonlib.python_command + sys.argv[1:] return subprocess.run(cmd, env=os.environ.copy()).returncode
class TestingConfig(Config): TESTING = True DEBUG = True coverage.process_startup() MONGO_DBNAME = 'recipe_app_testing' MONGO_URI = 'mongodb://*****:*****@127.0.0.1:27017/recipe_app_testing' SECRET_KEY = '<replace>'
def main(): covstart = os.environ.get('COVERAGE_PROCESS_START') if covstart is not None: sys.path.extend(os.environ['PYTHONPATH'].split(os.path.sep)) import coverage coverage.process_startup() # Get everything after '--' as those are arguments # to our script args = sys.argv[sys.argv.index('--') + 1:] logging.basicConfig(level=logging.INFO) action = args[0] if action == 'render': render()
def start_coverage(self): if self.cfg.coverage: if not coverage: self.logger.error('Coverage module not installed. ' 'Cannot start coverage.') return if self.is_arbiter(): if not self.coverage: self.logger.warning('Start coverage') p = current_process() p._coverage = coverage.Coverage(data_suffix=True) coverage.process_startup() p._coverage.start() config_file = self.coverage.config_file os.environ['COVERAGE_PROCESS_START'] = config_file elif self.cfg.concurrency == 'process': coverage.process_startup()
def start_coverage(self): if self.cfg.coverage: if not coverage: self.logger.error('Coverage module not installed. ' 'Cannot start coverage.') return if self.is_arbiter(): if not self.coverage: self.logger.warning('Start coverage') p = current_process() p._coverage = coverage.Coverage(data_suffix=True) coverage.process_startup() p._coverage.start() config_file = self.coverage.config_file os.environ['COVERAGE_PROCESS_START'] = config_file elif self.cfg.concurrency == 'subprocess': coverage.process_startup()
def main(): if sys.argv[1:] == ['subprocess']: print('subprocess') cov = coverage.process_startup() subprocess_main() elif sys.argv[1:] == []: print('process') subprocess.check_call((sys.executable, __file__, 'subprocess'))
def run_with_coverage(): # pragma: no cover """ Invoked when `-c|--coverage` is used on the command line """ try: import coverage except ImportError: warnings.warn( 'Coverage data will not be generated because coverage is not ' 'installed. Please run `pip install coverage` and try again.' ) return coverage.process_startup() # need to register a shutdown handler for SIGTERM since it won't run the # atexit functions required by coverage signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(0))
def define_jobs_context(self, context): CompTests.global_output_dir = self.get_options().output self.info('Setting output dir to %s' % CompTests.global_output_dir) CompTests.output_dir_for_current_test = None GlobalConfig.global_load_dir('default') modules = self.get_modules() if self.options.circle: env = os.environ v_index, v_total = 'CIRCLE_NODE_INDEX', 'CIRCLE_NODE_TOTAL' if v_index in env and v_total in env: index = int(os.environ[v_index]) total = int(os.environ[v_total]) msg = 'Detected I am worker #%s of %d in CircleCI.' % (index, total) self.info(msg) mine = [] for i in range(len(modules)): if i % total == index: mine.append(modules[i]) msg = 'I am only doing these modules: %s, instead of %s' % (mine, modules) self.info(msg) modules = mine if not modules: raise Exception('No modules found.') # XXX: what's the nicer way? options = self.get_options() do_coverage = options.coverage if do_coverage: import coverage coverage.process_startup() if not options.nonose: self.instance_nosetests_jobs(context, modules, do_coverage) #self.instance_nosesingle_jobs(context, modules) if not options.nocomp: self.instance_comptests_jobs(context, modules, create_reports=options.reports, do_coverage=do_coverage)
def cov(): import coverage cov = None try: cov = coverage.process_startup() yield finally: if cov: cov.save()
def main(): # Configure the global tracing variable from the argument global TRACING global WORKER_CONF global CACHE_IDS global CACHE_QUEUE TRACING = (int(sys.argv[4]) > 0) # Enable coverage if performed if "COVERAGE_PROCESS_START" in os.environ: import coverage coverage.process_startup() # Configure the piper worker with the arguments WORKER_CONF = PiperWorkerConfiguration() WORKER_CONF.update_params(sys.argv) if is_worker(): # Setup cache if is_cache_enabled(WORKER_CONF.cache): # Deploy the necessary processes cache = True cache_params = start_cache(None, WORKER_CONF.cache) else: # No cache cache = False cache_params = (None, None, None, None) else: # Otherwise it is an executor cache = False # to stop only the cache from the main process cache_params = (None, None, None, None) smm, cache_process, CACHE_QUEUE, CACHE_IDS = cache_params if is_worker(): with trace_mpi_worker() if TRACING else dummy_context(): compss_persistent_worker(WORKER_CONF) else: with trace_mpi_executor() if TRACING else dummy_context(): compss_persistent_executor(WORKER_CONF) if cache and is_worker(): stop_cache(smm, CACHE_QUEUE, cache_process) # noqa
def startup(cls): """Startup function which is invoked by every(!) python process during coverage measurement. If the process is relevant we start measuring coverage.""" argv = open('/proc/%s/cmdline' % os.getpid()).read().split('\x00') if os.getuid() != 0 or not any('univention' in arg or 'udm' in arg or 'ucs' in arg or 'ucr' in arg for arg in argv): if argv != ['/usr/bin/python2.7', '']: cls.debug_message('skip non-ucs process', argv) return # don't change non UCS-python scripts if any('listener' in arg or 'notifier' in arg for arg in argv[2:]): cls.debug_message('skip listener', argv) return # we don't need to cover the listener currently. some tests failed, maybe because of measuring the listener? cls.debug_message('START', argv) atexit.register(lambda: cls.debug_message('STOP')) if not os.environ.get('COVERAGE_PROCESS_START'): os.environ["COVERAGE_PROCESS_START"] = os.path.abspath(os.path.expanduser('~/.coveragerc')) cls.debug_message('ENVIRON WAS CLEARED BY PARENT PROCESS', argv) import coverage cls.coverage = coverage.process_startup() if not cls.coverage: cls.debug_message('no coverage startup (already started?, environ cleared?)') return # FIXME: univention-cli-server calls os.fork() which causes the coverage measurement not to start in the forked process # https://bitbucket.org/ned/coveragepy/issues/310/coverage-fails-with-osfork-and-os_exit osfork = getattr(os, 'fork') def fork(*args, **kwargs): pid = osfork(*args, **kwargs) if pid == 0: cls.debug_message('FORK CHILD') cls.startup() else: cls.debug_message('FORK PARENT') cls.stop_measurement(True) return pid os.fork = fork # https://bitbucket.org/ned/coveragepy/issues/43/coverage-measurement-fails-on-code # if the process calls one of the process-replacement functions the coverage must be started in the new process for method in ['execl', 'execle', 'execlp', 'execlpe', 'execv', 'execve', 'execvp', 'execvpe', '_exit']: if isinstance(getattr(os, method), StopCoverageDecorator): continue # restarted in the same process (e.g. os.fork()) setattr(os, method, StopCoverageDecorator(getattr(os, method))) # There are test cases which e.g. kill the univention-cli-server. # The atexit-handler of coverage will not be called for SIGTERM, so we need to stop coverage manually def sigterm(sig, frame): cls.debug_message('signal handler', sig, argv) cls.stop_measurement() signal.signal(signal.SIGTERM, previous) os.kill(os.getpid(), sig) previous = signal.signal(signal.SIGTERM, sigterm)
def __init__(self): try: import coverage except ImportError: raise ImportError('coverage is not installed') if coverage.__version__ < '4': raise ImportError('coverage>=4 required') coverage_config_file = None for argv in sys.argv: if argv.startswith('--coverage-rcfile='): _, coverage_config_file = argv.split('=') self.coverage = coverage.coverage( branch=True, config_file=coverage_config_file or self.default_coverage_config()) # TODO: only if tests are parallel coverage.process_startup() self.coverage.start()
def setup_coverage(path_to_coveragerc): if 'RUN_COVERAGE' not in os.environ: return if not os.path.exists(path_to_coveragerc): raise ValueError('coveragerc file %s does not exist.' % path_to_coveragerc) os.environ['COVERAGE_PROCESS_START'] = path_to_coveragerc rootdir = os.path.dirname(path_to_coveragerc) def combine_report(): subprocess.call( [ sys.executable, '-m', 'coverage', 'combine', ], cwd=rootdir, ) subprocess.call( [ sys.executable, '-m', 'coverage', 'report', ], cwd=rootdir, ) if path_to_coveragerc: try: import coverage print("Coverage configured with %s" % path_to_coveragerc) if 'COVERAGE_REPORT' in os.environ: import atexit atexit.register(combine_report) coverage.process_startup() except ImportError: print("You try to run coverage " "but coverage is not installed in your environment.") sys.exit(1)
PNUM = 70 import saltunittest from integration import TestDaemon try: import xmlrunner except ImportError: xmlrunner = None TEST_DIR = os.path.dirname(os.path.normpath(os.path.abspath(__file__))) try: import coverage # Cover any subprocess coverage.process_startup() # Setup coverage code_coverage = coverage.coverage( branch=True, source=[os.path.join(os.path.dirname(TEST_DIR), 'salt')], ) except ImportError: code_coverage = None REQUIRED_OPEN_FILES = 2048 TEST_RESULTS = [] def print_header(header, sep='~', top=True, bottom=True, inline=False, centered=False):
def main(): coverage.process_startup() sys.path.insert(0, os.path.abspath('src')) return pytest.main()
def run_with_coverage(): if os.path.exists('.coverage'): os.remove('.coverage') coverage.process_startup() runtests()
def start_coverage(): COV.start() coverage.process_startup()
def _run_command_exec_worker(options, reactor=None, personality=None): """ Entry point into (native) worker processes. This wires up stuff such that a worker instance is talking WAMP-over-stdio to the node controller. """ import os import sys import platform import signal # https://coverage.readthedocs.io/en/coverage-4.4.2/subprocess.html#measuring-sub-processes MEASURING_COVERAGE = False if 'COVERAGE_PROCESS_START' in os.environ: try: import coverage except ImportError: pass else: # The following will read the environment variable COVERAGE_PROCESS_START, # and that should be set to the .coveragerc file: # # export COVERAGE_PROCESS_START=${PWD}/.coveragerc # coverage.process_startup() MEASURING_COVERAGE = True # we use an Autobahn utility to import the "best" available Twisted reactor from autobahn.twisted.choosereactor import install_reactor reactor = install_reactor(options.reactor) # make sure logging to something else than stdio is setup _first_ from crossbar._logging import make_JSON_observer, cb_logging_aware from txaio import make_logger, start_logging from twisted.logger import globalLogPublisher from twisted.python.reflect import qual log = make_logger() # Print a magic phrase that tells the capturing logger that it supports # Crossbar's rich logging print(cb_logging_aware, file=sys.__stderr__) sys.__stderr__.flush() flo = make_JSON_observer(sys.__stderr__) globalLogPublisher.addObserver(flo) # Ignore SIGINT so we get consistent behavior on control-C versus # sending SIGINT to the controller process. When the controller is # shutting down, it sends TERM to all its children but ctrl-C # handling will send a SIGINT to all the processes in the group # (so then the controller sends a TERM but the child already or # will very shortly get a SIGINT as well). Twisted installs signal # handlers, but not for SIGINT if there's already a custom one # present. def ignore(sig, frame): log.debug("Ignoring SIGINT in worker.") signal.signal(signal.SIGINT, ignore) # actually begin logging start_logging(None, options.loglevel) # get personality klass, eg "crossbar.personality.Personality" l = options.personality.split('.') personality_module, personality_klass = '.'.join(l[:-1]), l[-1] # now load the personality module and class _mod = importlib.import_module(personality_module) Personality = getattr(_mod, personality_klass) # get worker klass, eg "crossbar.worker.container.ContainerController" l = options.klass.split('.') worker_module, worker_klass = '.'.join(l[:-1]), l[-1] # now load the worker module and class _mod = importlib.import_module(worker_module) klass = getattr(_mod, worker_klass) log.info( 'Starting worker "{worker_id}" for node "{node_id}" with personality "{personality}" {worker_class}', worker_id=options.worker, node_id=options.node, personality=Personality.NAME, worker_class=hltype(klass), ) log.info( 'Running as PID {pid} on {python}-{reactor}', pid=os.getpid(), python=platform.python_implementation(), reactor=qual(reactor.__class__).split('.')[-1], ) if MEASURING_COVERAGE: log.info(hl('Code coverage measurements enabled (coverage={coverage_version}).', color='green', bold=True), coverage_version=coverage.__version__) # set process title if requested to # try: import setproctitle except ImportError: log.debug("Could not set worker process title (setproctitle not installed)") else: if options.title: setproctitle.setproctitle(options.title) else: setproctitle.setproctitle('crossbar-worker [{}]'.format(options.klass)) # node directory # options.cbdir = os.path.abspath(options.cbdir) os.chdir(options.cbdir) # log.msg("Starting from node directory {}".format(options.cbdir)) # set process title if requested to # try: import setproctitle except ImportError: log.debug("Could not set worker process title (setproctitle not installed)") else: if options.title: setproctitle.setproctitle(options.title) else: setproctitle.setproctitle( 'crossbar-worker [{}]'.format(options.klass) ) from twisted.internet.error import ConnectionDone from autobahn.twisted.websocket import WampWebSocketServerProtocol class WorkerServerProtocol(WampWebSocketServerProtocol): def connectionLost(self, reason): # the behavior here differs slightly whether we're shutting down orderly # or shutting down because of "issues" if isinstance(reason.value, ConnectionDone): was_clean = True else: was_clean = False try: # this log message is unlikely to reach the controller (unless # only stdin/stdout pipes were lost, but not stderr) if was_clean: log.info("Connection to node controller closed cleanly") else: log.warn("Connection to node controller lost: {reason}", reason=reason) # give the WAMP transport a change to do it's thing WampWebSocketServerProtocol.connectionLost(self, reason) except: # we're in the process of shutting down .. so ignore .. pass finally: # after the connection to the node controller is gone, # the worker is "orphane", and should exit # determine process exit code if was_clean: exit_code = 0 else: exit_code = 1 # exit the whole worker process when the reactor has stopped reactor.addSystemEventTrigger('after', 'shutdown', os._exit, exit_code) # stop the reactor try: reactor.stop() except ReactorNotRunning: pass try: # define a WAMP application session factory # from autobahn.wamp.types import ComponentConfig def make_session(): session_config = ComponentConfig(realm=options.realm, extra=options) session = klass(config=session_config, reactor=reactor, personality=Personality) return session # create a WAMP-over-WebSocket transport server factory # from autobahn.twisted.websocket import WampWebSocketServerFactory transport_factory = WampWebSocketServerFactory(make_session, u'ws://localhost') transport_factory.protocol = WorkerServerProtocol transport_factory.setProtocolOptions(failByDrop=False) # create a protocol instance and wire up to stdio # from twisted.python.runtime import platform as _platform from twisted.internet import stdio proto = transport_factory.buildProtocol(None) if _platform.isWindows(): stdio.StandardIO(proto) else: stdio.StandardIO(proto, stdout=3) # now start reactor loop # if False: log.info("vmprof enabled.") import os import vmprof PROFILE_FILE = 'vmprof_{}.dat'.format(os.getpid()) outfd = os.open(PROFILE_FILE, os.O_RDWR | os.O_CREAT | os.O_TRUNC) vmprof.enable(outfd, period=0.01) log.info(hl('Entering event reactor ...', color='cyan', bold=True)) reactor.run() vmprof.disable() else: log.info(hl('Entering event reactor ...', color='cyan', bold=True)) reactor.run() except Exception as e: log.info("Unhandled exception: {e}", e=e) if reactor.running: reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1) reactor.stop() else: sys.exit(1)
def main(): coverage.process_startup() os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.testing") sys.path.insert(0, os.path.abspath('src')) sys.path.insert(0, "examples/simple") return pytest.main()
def run(self, args): select_meta = None self.reporter_manager = ReporterPluginManager() self.reporter_manager.add_to_arguments(self.arg_parser) self.arguments = self.arg_parser.parse_args(args) # Let each reporter parse cli arguments self.reporter_manager.process_arguments(self.arguments) if self.arguments.parallel: coverage.process_startup() self.parallel_manager = ParallelManager( num_processes=self.arguments.num_processes, track_coverage=self.arguments.coverage, coverage_omit=self.get_coverage_omit_list()) if self.arguments.select_meta: metas = [meta.split('=') for meta in self.arguments.select_meta] select_meta = {meta[0]: meta[1].strip('"\'') for meta in metas} if not self.arguments.no_art: print(self.generate_ascii_art()) if self.arguments.coverage: print(_(' - Running with coverage enabled - ')) self.coverage = coverage.coverage( omit=self.get_coverage_omit_list(), data_suffix=self.arguments.parallel) self.coverage._warn_no_data = False self.coverage.start() self.suite_scanner = SuiteScanner(self.arguments.search or 'spec') self.suite_types = self.suite_scanner.scan( self.arguments.select_module) # Serial: Add and Execute | Parallel: Collect all with the add process for suite_type in self.suite_types: suite = suite_type() self.suites.append(suite) self.reporter_manager.subscribe_all_to_spec(suite) suite.execute(select_metadata=select_meta, parallel_manager=self.parallel_manager, select_tests=self.arguments.select_tests) # Actually execute the tests for parallel now if self.arguments.parallel: self.parallel_manager.execute_all() # Save coverage data if enabled if self.coverage: self.coverage.stop() self.coverage.save() if self.arguments.parallel: self.combine_coverage_reports( self.get_coverage_omit_list(), self.arguments.parallel) # Print all console summaries for reporter in self.reporter_manager.get_console_reporters(): reporter.print_summary() self.reporter_manager.finish_all() self.suite_scanner.destroy()
def trySetupCoverage(): # pragma: no cover - can hardly measure coverage here :) try: import coverage coverage.process_startup() # doesn't do anything unless COVERAGE_PROCESS_START is set except Exception: pass
def setUp(self): try: import coverage coverage.process_startup() except ImportError, EnvironmentError: pass
def main(): print_system_info() parser = argparse.ArgumentParser() parser.add_argument('--cov', action='store_true') parser.add_argument('--backend', default=None, dest='backend', choices=backendlist) parser.add_argument('--cross', default=False, dest='cross', action='store_true') parser.add_argument('--failfast', action='store_true') (options, _) = parser.parse_known_args() # Enable coverage early... enable_coverage = options.cov if enable_coverage: os.makedirs('.coverage', exist_ok=True) sys.argv.remove('--cov') import coverage coverage.process_startup() returncode = 0 cross = options.cross backend, _ = guess_backend(options.backend, shutil.which('msbuild')) # Running on a developer machine? Be nice! if not mesonlib.is_windows() and not mesonlib.is_haiku() and 'CI' not in os.environ: os.nice(20) # Appveyor sets the `platform` environment variable which completely messes # up building with the vs2010 and vs2015 backends. # # Specifically, MSBuild reads the `platform` environment variable to set # the configured value for the platform (Win32/x64/arm), which breaks x86 # builds. # # Appveyor setting this also breaks our 'native build arch' detection for # Windows in environment.py:detect_windows_arch() by overwriting the value # of `platform` set by vcvarsall.bat. # # While building for x86, `platform` should be unset. if 'APPVEYOR' in os.environ and os.environ['arch'] == 'x86': os.environ.pop('platform') # Run tests print(mlog.bold('Running unittests.').get_text(mlog.colorize_console)) print(flush=True) # Can't pass arguments to unit tests, so set the backend to use in the environment env = os.environ.copy() env['MESON_UNIT_TEST_BACKEND'] = backend.name with tempfile.TemporaryDirectory() as temp_dir: # Enable coverage on all subsequent processes. if enable_coverage: Path(temp_dir, 'usercustomize.py').open('w').write( 'import coverage\n' 'coverage.process_startup()\n') env['COVERAGE_PROCESS_START'] = '.coveragerc' if 'PYTHONPATH' in env: env['PYTHONPATH'] = os.pathsep.join([temp_dir, env.get('PYTHONPATH')]) else: env['PYTHONPATH'] = temp_dir if not cross: cmd = mesonlib.python_command + ['run_meson_command_tests.py', '-v'] if options.failfast: cmd += ['--failfast'] returncode += subprocess.call(cmd, env=env) if options.failfast and returncode != 0: return returncode cmd = mesonlib.python_command + ['run_unittests.py', '-v'] if options.failfast: cmd += ['--failfast'] returncode += subprocess.call(cmd, env=env) if options.failfast and returncode != 0: return returncode cmd = mesonlib.python_command + ['run_project_tests.py'] + sys.argv[1:] returncode += subprocess.call(cmd, env=env) else: cross_test_args = mesonlib.python_command + ['run_cross_test.py'] print(mlog.bold('Running armhf cross tests.').get_text(mlog.colorize_console)) print(flush=True) cmd = cross_test_args + ['cross/ubuntu-armhf.txt'] if options.failfast: cmd += ['--failfast'] returncode += subprocess.call(cmd, env=env) if options.failfast and returncode != 0: return returncode print(mlog.bold('Running mingw-w64 64-bit cross tests.') .get_text(mlog.colorize_console)) print(flush=True) cmd = cross_test_args + ['cross/linux-mingw-w64-64bit.txt'] if options.failfast: cmd += ['--failfast'] returncode += subprocess.call(cmd, env=env) return returncode