Пример #1
0
def setup_kill_signals():
    global _signals_already_setup
    if _signals_already_setup:
        return
    gevent.signal_handler(signal.SIGTERM, kill_all_workers)
    gevent.signal_handler(signal.SIGINT, kill_all_workers)
    _signals_already_setup = True
Пример #2
0
def install_signal_handler(signum, name, f, *args, **kwargs):
    def handler():
        gevent.getcurrent().name = name
        logger.info(f"Received {signal.Signals(signum).name} signal.")
        f(*args, **kwargs)

    gevent.signal_handler(signum, handler)
Пример #3
0
    def __init__(self, appConfig: ApplicationConfiguration):
        log.info("Start the application")
        self.appConfig = appConfig
        self.migrationList = {}
        self.daoList = {}
        self.preparationList = {}
        self.telegramBotList = {}
        if appConfig.telegramToken and not appConfig.telegramTokens:
            self.prepareBotForStart(appConfig, _SINGLE_MODE_CONST)
        elif not appConfig.telegramToken and appConfig.telegramTokens:
            for botName in appConfig.telegramTokens.keys():
                self.prepareBotForStart(appConfig, botName)
        else:
            raise RuntimeError(
                "Can't start helper, wrong parameters, please set either telegramToken on telegramTokens!")

        self.server = server.BottleServer(appConfig, self.daoList, self.preparationList, self.telegramBotList)
        self.server.start()

        if appConfig.server.engine == "gevent":
            for sig in (SIGINT, SIGTERM, SIGABRT):
                signal_handler(sig, self.gevent_signal_stop_handler)
            # process reload signal
            signal_handler(SIGHUP, self.gevent_signal_reload_handler)
        else:
            for sig in (SIGINT, SIGTERM, SIGABRT):
                signal(sig, self.signal_stop_handler)
            # process reload signal
            signal(SIGHUP, self.signal_reload_handler)
Пример #4
0
 def __init__(self, **kwargs):
     self.kwargs = kwargs
     self.producer = _Producer(**self.get_producer_settings())
     self.serializer = self.get_message_serializer()
     self.topic_name = self.get_topic_name()
     self.key_schema = self.get_key_schema()
     self.value_schema = self.get_value_schema()
     gevent.signal_handler(signal.SIGTERM, self._flush)
Пример #5
0
def main():
    rebuild_index(False)
    server = get_server()

    gevent.signal_handler(signal.SIGTERM, server.stop)
    gevent.signal_handler(signal.SIGTERM, server.stop)

    server.serve_forever()
    gevent.get_hub().join()
Пример #6
0
    def main(self, args=None):
        logger.info("Pachi Counter")

        parser = argparse.ArgumentParser()
        parser.add_argument("-r", "--reset", dest="reset", action="store_true")
        parser.add_argument("machine")
        args = parser.parse_args(args)

        # ハードウエアレシーバオブジェクト作成
        try:
            hardware = hwReceiverFactory("usbio")
        except HwReceiverError as e:
            logger.critical(e)
            return 1
        logger.info("Hardware: " + hardware.receiver_name)

        # 引数で指定され機種に対応したモジュールをインポートする
        machine = args.machine
        loader = PluginLoader()
        machine_plugin = loader.getInstance(machine)
        logger.info("Machine: " + machine_plugin.machine_name)

        # 設定ファイル保存ディレクトリとファイルのパスを生成
        os.makedirs(self.resourcedir, exist_ok=True)
        datafilepath = os.path.join(self.resourcedir, machine)
        logger.info("Datafile: " + datafilepath)

        counter_data = machine_plugin.createCountData()
        if not args.reset:
            counter_data.load(datafilepath)

        # PCounterオブジェクト作成
        pc = PCounter(hardware, machine_plugin, counter_data)

        # メインループオブジェクト作成
        def mainloop():
            while True:
                gevent.sleep(self.pollingInterval)
                pc.loop()

        greenlet = gevent.spawn(mainloop)

        # シグナルハンドラ設定
        if sys.platform != "win32":
            gevent.signal_handler(signal.SIGTERM, lambda: greenlet.kill())

        # メインループ
        try:
            greenlet.join()
        except KeyboardInterrupt:
            pass
        finally:
            counter_data.save(datafilepath)
            logger.info("Counter data saved to {}".format(datafilepath))

        return 0
Пример #7
0
 def run_comp(self):
     """
     Starts running all the component's routines.
     """
     self.logger.info("Running component")
     self.stop_event.clear()
     if self.use_memory and sys.version_info.minor < 8:
         self.generator.create_memories()
     self._start()
     gevent.signal_handler(signal.SIGTERM, self.stop_run)
Пример #8
0
def main(
    dbconnect,
    creds_file,
    playlists,
    upload_location_allowlist="youtube",
    interval=600,
    metrics_port=8007,
    backdoor_port=0,
):
    """
	dbconnect should be a postgres connection string

	creds_file should contain youtube api creds

	upload_location_allowlist is a comma-seperated list of database upload locations to
	consider as eligible to being added to playlists. For these locations, the database video id
	must be a youtube video id.

	interval is how often to check for new videos, default every 10min.
	"""
    common.PromLogCountsHandler.install()
    common.install_stacksampler()
    prom.start_http_server(metrics_port)

    if backdoor_port:
        gevent.backdoor.BackdoorServer(('127.0.0.1', backdoor_port),
                                       locals=locals()).start()

    upload_locations = upload_location_allowlist.split(
        ",") if upload_location_allowlist else []
    playlists = dict(playlists)

    stop = gevent.event.Event()
    gevent.signal_handler(signal.SIGTERM, stop.set)  # shut down on sigterm

    logging.info("Starting up")

    with open(creds_file) as f:
        creds = json.load(f)
    client = GoogleAPIClient(creds['client_id'], creds['client_secret'],
                             creds['refresh_token'])

    dbmanager = DBManager(dsn=dbconnect)
    manager = PlaylistManager(dbmanager, client, upload_locations, playlists)

    while not stop.is_set():
        try:
            manager.run_once()
        except Exception:
            logging.exception("Failed to run playlist manager")
            manager.reset()
        stop.wait(interval)  # wait for interval, or until stopping

    logging.info("Stopped")
Пример #9
0
def main():
    args = sys.argv[1:]
    if len(args) != 2:
        sys.exit('Usage: %s source-address destination-address' % __file__)
    source = args[0]
    dest = parse_address(args[1])
    server = PortForwarder(source, dest)
    log('Starting port forwarder %s:%s -> %s:%s', *(server.address[:2] + dest))
    gevent.signal_handler(signal.SIGTERM, server.close)
    gevent.signal_handler(signal.SIGINT, server.close)
    server.start()
    gevent.wait()
Пример #10
0
def run_server():
    server = WSGIServer(('127.0.0.1', 8002), get_wsgi_application())
    gevent.signal_handler(signal.SIGTERM, server.stop)

    print('Starting WSGIServer...', flush=True)
    server.serve_forever()

    print('Stopping WSGIServer. Waiting for all requests to complete...',
          flush=True)
    gevent.get_hub().join()

    print('Requests completed. Exiting gracefully.', flush=True)
Пример #11
0
    def run(self):
        # register shutdown handler
        gevent.signal_handler(signal.SIGINT, self.close)
        gevent.signal_handler(signal.SIGTERM, self.close)

        queue = self._service.start()
        self._publish_task = gevent.spawn(self._publish, (queue))
        self._server_task = gevent.spawn(self.s.run)

        self._logger.info('Server started')
        while not self._pill2kill.is_set():
            asyncio.get_event_loop().run_until_complete(asyncio.sleep(0))
            gevent.sleep(0)
Пример #12
0
    def start(self, wait: bool = False, cert: bool = True):
        # start default servers in the rack
        # handle signals
        for signal_type in (signal.SIGTERM, signal.SIGINT, signal.SIGKILL):
            gevent.signal_handler(signal_type, self.stop)

        # mark app as started
        if self.is_running():
            return

        self.check_dependencies()

        self.redis.start()
        self.nginx.start()
        j.sals.nginx.get(self.nginx.server_name).cert = cert
        self.mainapp = j.servers.appserver.make_main_app()

        self.rack.start()
        j.logger.register(f"threebot_{self.instance_name}")
        if j.config.get("SEND_REMOTE_ALERTS", False):
            j.tools.alerthandler.register_handler(send_alert)

        # add default packages
        for package_name in DEFAULT_PACKAGES:
            j.logger.info(f"Configuring package {package_name}")
            try:
                package = self.packages.get(package_name)
                self.packages.install(package)
            except Exception as e:
                self.stop()
                raise j.core.exceptions.Runtime(
                    f"Error happened during getting or installing {package_name} package, the detailed error is {str(e)}"
                ) from e

        # install all package

        j.logger.info("Adding packages")
        self.packages._install_all()
        j.logger.info("jsappserver")
        self.jsappserver = WSGIServer(("localhost", 31000), apply_main_middlewares(self.mainapp))
        j.logger.info("rack add")
        self.rack.add(f"appserver", self.jsappserver)

        j.logger.info("Reloading nginx")
        self.nginx.reload()

        # mark server as started
        self._started = True
        j.logger.info(f"routes: {self.mainapp.routes}")
        j.logger.info(f"Threebot is running at http://localhost:{PORTS.HTTP} and https://localhost:{PORTS.HTTPS}")
        self.rack.start(wait=wait)  # to keep the server running
Пример #13
0
def _run_locust(locust_classes, master):
    config_options(master=master)
    print('*** Starting locust: {}:{} ***'.format(options.web_host,
                                                  options.port))
    if options.master:
        runners.locust_runner = MasterLocustRunner(locust_classes, options)
    else:
        runners.locust_runner = LocalLocustRunner(locust_classes, options)
    logging.info("Starting web monitor at http://%s:%s" %
                 (options.web_host or "0.0.0.0", options.port))
    main_greenlet = gevent.spawn(locust_web.start, locust_classes, options)
    stats_printer_greenlet = None

    def shutdown(code=0):
        """
        Shut down locust by firing quitting event, printing/writing stats and exiting
        """
        logging.info("Shutting down (exit code %s), bye." % code)
        if stats_printer_greenlet is not None:
            stats_printer_greenlet.kill(block=False)
        logging.info("Cleaning up runner...")
        if runners.locust_runner is not None:
            runners.locust_runner.quit()
        logging.info("Running teardowns...")
        events.quitting.fire(reverse=True)
        print_stats(runners.locust_runner.stats, current=False)
        print_percentile_stats(runners.locust_runner.stats)
        if options.csvfilebase:
            write_stat_csvs(options.csvfilebase, options.stats_history_enabled)
        print_error_report()
        sys.exit(code)

    # install SIGTERM handler
    def sig_term_handler():
        logging.info("Got SIGTERM signal")
        shutdown(0)

    gevent.signal_handler(signal.SIGTERM, sig_term_handler)

    try:
        logging.info("Starting Locust...")
        main_greenlet.join()
        code = 0
        lr = runners.locust_runner
        if len(lr.errors) or len(lr.exceptions) or lr.cpu_log_warning():
            code = options.exit_code_on_error
        shutdown(code=code)
    except KeyboardInterrupt:
        shutdown(0)
Пример #14
0
        def request_stop():
            if not self._stop_requested:
                gevent.signal_handler(signal.SIGINT, request_force_stop)
                gevent.signal_handler(signal.SIGTERM, request_force_stop)

                self.log.warning("Warm shut down requested.")
                self.log.warning(
                    "Stopping after all greenlets are finished. "
                    "Press Ctrl+C again for a cold shutdown."
                )

                self._stop_requested = True
                self.gevent_pool.join()
                if self.gevent_worker is not None:
                    self.gevent_worker.kill(StopRequested)
Пример #15
0
 def disable_exit_signals(self):
     gevent.signal_handler(signal.SIGINT, lambda: signal.SIG_IGN)
     gevent.signal_handler(signal.SIGTERM, lambda: signal.SIG_IGN)
     try:
         yield
     finally:
         gevent.signal_handler(signal.SIGINT, self.stop)
         gevent.signal_handler(signal.SIGTERM, self.stop)
Пример #16
0
 def gevent_set_signal_handlers(signal_handler_map):
     signals = {}
     for (signal_number, handler) in signal_handler_map.items():
         #gevent.signal have not params, but, signal handler want params(signal_number, stack_frame)
         if callable(handler):
             signals[signal_number] = signal_handler(
                 signal_number, handler, signal_number, None)
     app.signals = signals
Пример #17
0
    def cleanup():
        if hasattr(cleanup, 'started'):
            return
        cleanup.started = True
        logging.info('Process %s exiting normally', os.getpid())
        gevent.signal_handler(signal.SIGINT, lambda: None)
        gevent.signal_handler(signal.SIGTERM, lambda: None)
        if aj.master:
            gateway.destroy()

        p = psutil.Process(os.getpid())
        for c in p.children(recursive=True):
            try:
                os.killpg(c.pid, signal.SIGTERM)
                os.killpg(c.pid, signal.SIGKILL)
            except OSError:
                pass
Пример #18
0
def main():
    logbook.concurrency.enable_gevent()

    global log
    StderrHandler().push_application()
    log = Logger('xbbs.worker')
    inst = XbbsWorker()

    XBBS_CFG_DIR = os.getenv("XBBS_CFG_DIR", "/etc/xbbs")
    with open(path.join(XBBS_CFG_DIR, "worker.toml"), "r") as fcfg:
        cfg = CONFIG_VALIDATOR.validate(toml.load(fcfg))

    job_request = msgs.JobRequest(capabilities=cfg["capabilities"]).pack()

    gevent.signal_handler(signal.SIGUSR1, gevent.util.print_run_info)

    log.info(cfg)
    while True:
        with inst.zmq.socket(zmq.REQ) as jobs:
            jobs.connect(cfg["job_endpoint"])

            while True:
                jobs.send(job_request)
                log.debug("waiting for job...")
                # the coordinator sends a heartbeat each minute, so 1.5 minutes
                # should be a sane duration to assume coordinator death on
                if jobs.poll(90000) == 0:
                    # breaking the inner loop will cause a reconnect
                    # since the coordinator is presumed dead, drop requests yet
                    # unsent to it
                    jobs.set(zmq.LINGER, 0)
                    log.debug("dropping socket after a heartbeat timeout")
                    break
                try:
                    msg = jobs.recv()
                    if len(msg) == 0:
                        # drop null msgs
                        continue
                    process_job_msg(inst, msg)
                except KeyboardInterrupt:
                    log.exception("interrupted")
                    return
                except Exception as e:
                    log.exception("job error", e)
Пример #19
0
def main(channels, base_dir=".", qualities="source", metrics_port=8001, backdoor_port=0):
	qualities = qualities.split(",") if qualities else []

	managers = [
		StreamsManager(channel.rstrip('!'), base_dir, qualities, important=channel.endswith('!'))
		for channel in channels
	]

	def stop():
		for manager in managers:
			manager.stop()

	gevent.signal_handler(signal.SIGTERM, stop) # shut down on sigterm

	common.PromLogCountsHandler.install()
	common.install_stacksampler()
	prom.start_http_server(metrics_port)

	logging.info("Starting up")

	workers = [gevent.spawn(manager.run) for manager in managers]

	if backdoor_port:
		gevent.backdoor.BackdoorServer(('127.0.0.1', backdoor_port), locals=locals()).start()

	# Wait for any to die
	gevent.wait(workers, count=1)
	# If one has stopped, either:
	# 1. stop() was called and all are stopping
	# 2. one errored and we should stop all remaining and report the error
	# Our behaviour in both cases is the same:
	# 1. Tell all managers to gracefully stop
	stop()
	# 2. Wait (with timeout) until they've stopped
	gevent.wait(workers)
	# 3. Check if any of them failed. If they did, report it. If mulitple failed, we report
	#    one arbitrarily.
	for worker in workers:
		worker.get() # re-raise error if failed

	logging.info("Gracefully stopped")
Пример #20
0
    def install_signal_handlers(self):
        """ Handle events like Ctrl-C from the command line. """

        self.graceful_stop = False

        def request_shutdown_now():
            self.shutdown_now()

        def request_shutdown_graceful():

            # Second time CTRL-C, shutdown now
            if self.graceful_stop:
                self.shutdown_now()
            else:
                self.graceful_stop = True
                self.shutdown_graceful()

        # First time CTRL-C, try to shutdown gracefully
        gevent.signal_handler(signal.SIGINT, request_shutdown_graceful)

        # User (or Heroku) requests a stop now, just mark tasks as interrupted.
        gevent.signal_handler(signal.SIGTERM, request_shutdown_now)
Пример #21
0
    def start(self):
        '''Call this to run the API without blocking'''
        if self.running:
            gevent.signal_handler(signal.SIGINT, self.sig_handler)
            gevent.signal_handler(signal.SIGTERM, self.sig_handler)

        self.running = True

        self.httpServer.start()

        while not self.httpServer.started.is_set():
            self.logger.writeDebug('Waiting for httpserver to start...')
            self.httpServer.started.wait()

        if self.httpServer.failed is not None:
            raise self.httpServer.failed

        self.logger.writeDebug("Running on port: {}"
                               .format(self.httpServer.port))

        self.facade.register_service("http://127.0.0.1:{}".format(self.httpServer.port),
                                     "{}{}/".format(CONN_ROOT[1:], CONN_APIVERSIONS[-1]))
        try:
            from nmosconnectiondriver.httpIpstudioDriver import httpIpstudioDriver
            self.logger.writeInfo("Using ipstudio driver")
            # Start the IPStudio driver
            self.driver = httpIpstudioDriver(
                self.httpServer.api,
                self.logger,
                self.facade
            )
        except ImportError:
            # Start the mock driver
            self.driver = NmosDriver(
                self.httpServer.api,
                self.logger,
                self.facade
            )
Пример #22
0
def main():
    config = Config()
    parser = argparse.ArgumentParser(description='Garden lights')
    on_off = parser.add_mutually_exclusive_group(required=True)
    on_off.add_argument('--off',
                        nargs='*',
                        type=int,
                        help='Turn off all the lights')
    on_off.add_argument('--on',
                        nargs='*',
                        type=int,
                        help='Turn on all the lights')
    on_off.add_argument('--status',
                        nargs='*',
                        type=int,
                        help='Checkt the status of each port')
    on_off.add_argument('--random', type=int, help='Random sequence')
    on_off.add_argument('--light-show', action="store_true", help='Light show')
    on_off.add_argument('--cron', action="store_true", help='Automatic mode')
    pargs = parser.parse_args()
    gevent.signal_handler(signal.SIGHUP, sig_dump)

    lights = Lights(config.ports)

    if pargs.on is not None:
        lights.on(pargs.on)
    elif pargs.off is not None:
        lights.off(pargs.off)
    elif pargs.status is not None:
        check_status(lights, pargs.status)
    elif pargs.light_show:
        light_show(lights)
    elif pargs.cron:
        automation(lights)
    elif pargs.random:
        lights.random(count=pargs.random)
Пример #23
0
def serve(config_file, cookie_secret):
    from gevent import pywsgi, signal_handler
    import signal

    app = make_app(
        config=config_file, cookie_secret=cookie_secret, database_writable=True
    )

    host = app.config.get("HOSTAPI")
    port = app.config.get("PORTAPI")

    print(u"serving on {host}:{port}".format(**locals()))
    server = pywsgi.WSGIServer((host, port), app)

    def shutdown():
        app.logger.info("api is being shutdown")

        server.stop(timeout=10)

        exit(signal.SIGTERM)

    signal_handler(signal.SIGTERM, shutdown)
    signal_handler(signal.SIGINT, shutdown)
    server.serve_forever(stop_timeout=10)
Пример #24
0
    def __init__(self,
                 size=500,
                 name="default",
                 generate_blockdiag=False,
                 blockdiag_dir="./build/blockdiag"):
        signal_handler(signal.SIGINT, self.stop)
        signal_handler(signal.SIGTERM, self.stop)

        self.name = name
        self.actors = {}
        self.size = size

        self.log_actor = self.__create_actor(STDOUT, "default_stdout")
        self.error_actor = self.__create_actor(EventLogger,
                                               "default_error_logger")

        self.__running = False
        self.__block = self._async_class()
        self.__block.clear()

        self.blockdiag_dir = blockdiag_dir
        self.generate_blockdiag = generate_blockdiag
        if generate_blockdiag:
            self.blockdiag_out = """diagram admin {\n"""
Пример #25
0
def main():
    global log
    StderrHandler().push_application()
    log = Logger("xbbs.coordinator")

    XBBS_CFG_DIR = os.getenv("XBBS_CFG_DIR", "/etc/xbbs")
    with open(path.join(XBBS_CFG_DIR, "coordinator.toml"), "r") as fcfg:
        cfg = CONFIG_VALIDATOR.validate(toml.load(fcfg))

    inst = Xbbs.create(cfg)

    for name, elem in cfg["projects"].items():
        project = Project(name,
                          **elem,
                          base=path.join(inst.project_base, name))
        inst.projects[name] = project
        os.makedirs(project.base, exist_ok=True)
        log.debug("got project {}", inst.projects[name])

    with inst.zmq.socket(zmq.REP) as sock_cmd, \
         inst.zmq.socket(zmq.PULL) as inst.intake, \
         inst.zmq.socket(zmq.ROUTER) as inst.worker_endpoint:
        # XXX: potentially make perms overridable? is that useful in any
        #      capacity?
        inst.intake.bind(cfg["intake"]["bind"])
        _ipc_chmod(cfg["intake"]["bind"], 0o664)

        inst.worker_endpoint.bind(cfg["worker_endpoint"])
        inst.worker_endpoint.set(zmq.ROUTER_MANDATORY, 1)
        _ipc_chmod(cfg["worker_endpoint"], 0o664)

        sock_cmd.bind(cfg["command_endpoint"]["bind"])
        _ipc_chmod(cfg["command_endpoint"]["bind"], 0o664)

        dumper = gevent.signal_handler(signal.SIGUSR1, dump_projects, inst)
        log.info("startup")
        intake = gevent.spawn(intake_loop, inst)
        job_pull = gevent.spawn(job_pull_loop, inst)
        try:
            command_loop(inst, sock_cmd)
        finally:
            # XXX: This may not be the greatest way to handle this
            gevent.killall(inst.project_greenlets[:])
            gevent.kill(intake)
            gevent.kill(job_pull)
            dumper.cancel()
    def run(self) -> None:
        """
        Starts all poller instances.
        After that, polling is performed in regular intervals specified by the polling_delay_ms property.
        By default this function blocks the current thread.
        """
        self._validation_service.validate_all_addresses()

        self._logger.info("Gateway Application started")

        task_group = gevent.pool.Group()

        gevent.signal_handler(signal.SIGINT,
                              self._coin_transaction_polling_service.cancel)
        gevent.signal_handler(signal.SIGINT,
                              self._waves_transaction_polling_service.cancel)

        task_group.start(self._coin_transaction_polling_service)
        task_group.start(self._waves_transaction_polling_service)

        attempt_list_workers = self._create_attempt_list_workers()

        for worker in attempt_list_workers:
            gevent.signal_handler(signal.SIGINT, worker.cancel)
            task_group.start(worker)

        http = gevent.pywsgi.WSGIServer((self._host, self._port),
                                        self._flask,
                                        log=gevent.pywsgi.LoggingLogAdapter(
                                            self._logger.getChild('pywsgi')))

        gevent.signal_handler(signal.SIGINT, http.close)

        self._logger.info('Listening on %s:%s', self._host, str(self._port))

        http.serve_forever()

        task_group.join(raise_error=True)
Пример #27
0
    def test_alarm(self):
        sig = gevent.signal_handler(signal.SIGALRM, raise_Expected)
        self.assertFalse(sig.ref)
        sig.ref = True
        self.assertTrue(sig.ref)
        sig.ref = False

        def test():
            signal.alarm(1)
            with self.assertRaises(Expected) as exc:
                gevent.sleep(2)

            ex = exc.exception
            self.assertEqual(str(ex), 'TestSignal')

        try:
            test()
            # also let's check that the handler stays installed.
            test()
        finally:
            sig.cancel()
Пример #28
0
    def run_forever(self):
        # Start threads
        for pool in self.pools:
            pool.start()

        gevent.signal_handler(gevent.signal.SIGINT, self.stop)
        gevent.signal_handler(gevent.signal.SIGTERM, self.stop)

        for pool in self.pools:
            # Either pools stop by themself
            # Or they will get stopped when input queue is empty
            pool.wait_until_nothing_to_process()

        if not self.exit_event.is_set():
            gevent.signal_handler(gevent.signal.SIGINT, lambda: signal.SIG_IGN)
            gevent.signal_handler(gevent.signal.SIGTERM,
                                  lambda: signal.SIG_IGN)
            # Makes sure threads finish properly so that
            # we can make sure the workflow is not used and can be closed
            for pool in self.pools:
                pool.join()

        self.cleanup()
        return self.stop_asked
Пример #29
0
 def test_orphaned_signal_watcher(self):
     # Install libev-based signal watcher.
     try:
         s = gevent.signal(signal.SIGTERM, signals_test_sigterm_handler)
     except AttributeError:
         # This function got renamed in gevent 1.5
         s = gevent.signal_handler(signal.SIGTERM,
                                   signals_test_sigterm_handler)
     # Normal behavior: signal handlers become inherited by children.
     # Bogus behavior of libev-based signal watchers in child process:
     # They should not be active anymore when 'orphaned' (when their
     # corresponding event loop has been destroyed). What happens, however:
     # The old handler stays active and registering a new handler does not
     # 'overwrite' the old one -- both are active.
     # Since this test is about testing the behavior of 'orphaned' libev
     # signal watchers, the signal must be transmitted *after* event loop
     # recreation, so wait here for the child process to go through
     # the hub & event loop destruction (and recreation) process before
     # sending the signal. Waiting is realized with sync through pipe.
     # Without cleanup code in gipc, the inherited but orphaned libev signal
     # watcher would be active in the fresh event loop and trigger the
     # handler. This is a problem. With cleanup code, this handler must
     # never be called. Child exitcode 20 means that the inherited handler
     # has been called, -15 (-signal.SIGTERM) means that the child was
     # actually killed by SIGTERM within a certain short time interval.
     # Returncode 0 would mean that the child finished normally after that
     # short time interval.
     with pipe() as (r, w):
         p = start_process(signals_test_child_a, (w, ))
         assert r.get() == p.pid
         os.kill(p.pid, signal.SIGTERM)
         p.join()
         if not WINDOWS:
             assert p.exitcode == -signal.SIGTERM
         else:
             assert p.exitcode == signal.SIGTERM
     s.cancel()
Пример #30
0
def main():
    # find specified locustfile and make sure it exists, using a very simplified
    # command line parser that is only used to parse the -f option
    locustfile = parse_locustfile_option()

    # import the locustfile
    docstring, user_classes, shape_class = load_locustfile(locustfile)

    # parse all command line options
    options = parse_options()

    if options.slave or options.expect_slaves:
        sys.stderr.write(
            "The --slave/--expect-slaves parameters have been renamed --worker/--expect-workers\n"
        )
        sys.exit(1)

    if options.step_time or options.step_load or options.step_users or options.step_clients:
        sys.stderr.write(
            "The step load feature was removed in Locust 1.3. You can achieve similar results using a LoadTestShape class. See https://docs.locust.io/en/stable/generating-custom-load-shape.html\n"
        )
        sys.exit(1)

    if options.hatch_rate:
        sys.stderr.write(
            "[DEPRECATED] The --hatch-rate parameter has been renamed --spawn-rate\n"
        )
        options.spawn_rate = options.hatch_rate

    # setup logging
    if not options.skip_log_setup:
        if options.loglevel.upper() in [
                "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"
        ]:
            setup_logging(options.loglevel, options.logfile)
        else:
            sys.stderr.write(
                "Invalid --loglevel. Valid values are: DEBUG/INFO/WARNING/ERROR/CRITICAL\n"
            )
            sys.exit(1)

    logger = logging.getLogger(__name__)
    greenlet_exception_handler = greenlet_exception_logger(logger)

    if options.list_commands:
        print("Available Users:")
        for name in user_classes:
            print("    " + name)
        sys.exit(0)

    if not user_classes:
        logger.error("No User class found!")
        sys.exit(1)

    # make sure specified User exists
    if options.user_classes:
        missing = set(options.user_classes) - set(user_classes.keys())
        if missing:
            logger.error("Unknown User(s): %s\n" % (", ".join(missing)))
            sys.exit(1)
        else:
            names = set(options.user_classes) & set(user_classes.keys())
            user_classes = [user_classes[n] for n in names]
    else:
        # list() call is needed to consume the dict_view object in Python 3
        user_classes = list(user_classes.values())

    if os.name != "nt" and not options.master:

        try:
            import resource

            minimum_open_file_limit = 10000
            current_open_file_limit = resource.getrlimit(
                resource.RLIMIT_NOFILE)[0]

            if current_open_file_limit < minimum_open_file_limit:
                # Increasing the limit to 10000 within a running process should work on at least MacOS.
                # It does not work on all OS:es, but we should be no worse off for trying.
                resource.setrlimit(
                    resource.RLIMIT_NOFILE,
                    [minimum_open_file_limit, resource.RLIM_INFINITY])
        except BaseException:
            logger.warning((
                f"System open file limit '{current_open_file_limit}' is below minimum setting '{minimum_open_file_limit}'. "
                "It's not high enough for load testing, and the OS didn't allow locust to increase it by itself. "
                "See https://github.com/locustio/locust/wiki/Installation#increasing-maximum-number-of-open-files-limit for more info."
            ))

    # create locust Environment
    environment = create_environment(user_classes,
                                     options,
                                     events=locust.events,
                                     shape_class=shape_class)

    if shape_class and (options.num_users or options.spawn_rate):
        logger.warning(
            "The specified locustfile contains a shape class but a conflicting argument was specified: users or spawn-rate. Ignoring arguments"
        )

    if options.show_task_ratio:
        print("\n Task ratio per User class")
        print("-" * 80)
        print_task_ratio(user_classes)
        print("\n Total task ratio")
        print("-" * 80)
        print_task_ratio(user_classes, total=True)
        sys.exit(0)
    if options.show_task_ratio_json:
        from json import dumps

        task_data = {
            "per_class": get_task_ratio_dict(user_classes),
            "total": get_task_ratio_dict(user_classes, total=True),
        }
        print(dumps(task_data))
        sys.exit(0)

    if options.master:
        runner = environment.create_master_runner(
            master_bind_host=options.master_bind_host,
            master_bind_port=options.master_bind_port,
        )
    elif options.worker:
        try:
            runner = environment.create_worker_runner(options.master_host,
                                                      options.master_port)
            logger.debug("Connected to locust master: %s:%s",
                         options.master_host, options.master_port)
        except socket.error as e:
            logger.error("Failed to connect to the Locust master: %s", e)
            sys.exit(-1)
    else:
        runner = environment.create_local_runner()

    # main_greenlet is pointing to runners.greenlet by default, it will point the web greenlet later if in web mode
    main_greenlet = runner.greenlet

    if options.run_time:
        if not options.headless:
            logger.error(
                "The --run-time argument can only be used together with --headless"
            )
            sys.exit(1)
        if options.worker:
            logger.error(
                "--run-time should be specified on the master node, and not on worker nodes"
            )
            sys.exit(1)
        try:
            options.run_time = parse_timespan(options.run_time)
        except ValueError:
            logger.error(
                "Valid --run-time formats are: 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc."
            )
            sys.exit(1)

        def spawn_run_time_limit_greenlet():
            logger.info("Run time limit set to %s seconds" % options.run_time)

            def timelimit_stop():
                logger.info("Time limit reached. Stopping Locust.")
                runner.quit()

            gevent.spawn_later(
                options.run_time,
                timelimit_stop).link_exception(greenlet_exception_handler)

    if options.csv_prefix:
        stats_csv_writer = StatsCSVFileWriter(environment,
                                              stats.PERCENTILES_TO_REPORT,
                                              options.csv_prefix,
                                              options.stats_history_enabled)
    else:
        stats_csv_writer = StatsCSV(environment, stats.PERCENTILES_TO_REPORT)

    # start Web UI
    if not options.headless and not options.worker:
        # spawn web greenlet
        protocol = "https" if options.tls_cert and options.tls_key else "http"
        try:
            if options.web_host == "*":
                # special check for "*" so that we're consistent with --master-bind-host
                web_host = ""
            else:
                web_host = options.web_host
            if web_host:
                logger.info("Starting web interface at %s://%s:%s" %
                            (protocol, web_host, options.web_port))
            else:
                logger.info(
                    "Starting web interface at %s://0.0.0.0:%s (accepting connections from all network interfaces)"
                    % (protocol, options.web_port))
            web_ui = environment.create_web_ui(
                host=web_host,
                port=options.web_port,
                auth_credentials=options.web_auth,
                tls_cert=options.tls_cert,
                tls_key=options.tls_key,
                stats_csv_writer=stats_csv_writer,
                delayed_start=True,
            )
        except AuthCredentialsError:
            logger.error(
                "Credentials supplied with --web-auth should have the format: username:password"
            )
            sys.exit(1)
    else:
        web_ui = None

    # Fire locust init event which can be used by end-users' code to run setup code that
    # need access to the Environment, Runner or WebUI.
    environment.events.init.fire(environment=environment,
                                 runner=runner,
                                 web_ui=web_ui)

    if web_ui:
        web_ui.start()
        main_greenlet = web_ui.greenlet

    if options.headless:
        # headless mode
        if options.master:
            # wait for worker nodes to connect
            while len(runner.clients.ready) < options.expect_workers:
                logging.info(
                    "Waiting for workers to be ready, %s of %s connected",
                    len(runner.clients.ready),
                    options.expect_workers,
                )
                time.sleep(1)
        if not options.worker:
            # apply headless mode defaults
            if options.num_users is None:
                options.num_users = 1
            if options.spawn_rate is None:
                options.spawn_rate = 1

            # start the test
            if environment.shape_class:
                environment.runner.start_shape()
            else:
                runner.start(options.num_users, options.spawn_rate)

    if options.run_time:
        spawn_run_time_limit_greenlet()

    stats_printer_greenlet = None
    if not options.only_summary and (options.print_stats or
                                     (options.headless
                                      and not options.worker)):
        # spawn stats printing greenlet
        stats_printer_greenlet = gevent.spawn(stats_printer(runner.stats))
        stats_printer_greenlet.link_exception(greenlet_exception_handler)

    if options.csv_prefix:
        gevent.spawn(stats_csv_writer.stats_writer).link_exception(
            greenlet_exception_handler)

    gevent.spawn(stats_history, runner)

    def shutdown():
        """
        Shut down locust by firing quitting event, printing/writing stats and exiting
        """
        logger.info("Running teardowns...")
        environment.events.quitting.fire(environment=environment, reverse=True)

        # determine the process exit code
        if log.unhandled_greenlet_exception:
            code = 2
        elif environment.process_exit_code is not None:
            code = environment.process_exit_code
        elif len(runner.errors) or len(runner.exceptions):
            code = options.exit_code_on_error
        else:
            code = 0

        logger.info("Shutting down (exit code %s), bye." % code)
        if stats_printer_greenlet is not None:
            stats_printer_greenlet.kill(block=False)
        logger.info("Cleaning up runner...")
        if runner is not None:
            runner.quit()

        print_stats(runner.stats, current=False)
        print_percentile_stats(runner.stats)

        print_error_report(runner.stats)

        sys.exit(code)

    # install SIGTERM handler
    def sig_term_handler():
        logger.info("Got SIGTERM signal")
        shutdown()

    gevent.signal_handler(signal.SIGTERM, sig_term_handler)

    try:
        logger.info("Starting Locust %s" % version)
        main_greenlet.join()
        shutdown()
    except KeyboardInterrupt:
        shutdown()