def __init__(self, scheduler): self.scheduler = scheduler self.stealable_all = [set() for i in range(15)] self.stealable = dict() self.key_stealable = dict() self.stealable_unknown_durations = defaultdict(set) self.cost_multipliers = [1 + 2 ** (i - 6) for i in range(15)] self.cost_multipliers[0] = 1 for worker in scheduler.workers: self.add_worker(worker=worker) self._pc = PeriodicCallback(callback=self.balance, callback_time=100, io_loop=self.scheduler.loop) self.scheduler.loop.add_callback(self._pc.start) self.scheduler.plugins.append(self) self.scheduler.extensions['stealing'] = self self.log = deque(maxlen=100000) self.count = 0 scheduler.worker_handlers['long-running'] = self.transition_long_running
def main() -> None: parse_command_line() io_loop = IOLoop.instance() if options.debug: tornado.autoreload.start() for option, value in sorted(options.as_dict().items()): gen_log.debug(f"Option: {option}: {value}") ari_client = AriClient() io_loop.add_callback(ari_client.run) if options.ping_interval: ping_periodic_callback = PeriodicCallback(do_ping, options.ping_interval * 1000) ping_periodic_callback.start() io_loop.start()
def __init__(self, ctrl): self.ctrl = ctrl self.addrs = self.ctrl.args.lcd_addr self.addr = self.addrs[0] self.addr_num = 0 self.width = 20 self.height = 4 self.lcd = None self.timeout = None self.reset = False self.page = None self.pages = [] self.current_page = 0 self.screen = self.new_screen() self.set_message('Loading...') # Redraw screen every 5 seconds self.redraw_timer = PeriodicCallback(self._redraw, 5000, ctrl.ioloop) self.redraw_timer.start() atexit.register(self.goodbye)
def initialize_culler(self): """Start idle culler if 'cull_idle_timeout' is greater than zero. Regardless of that value, set flag that we've been here. """ if not self._initialized_culler and self.cull_idle_timeout > 0: if self._culler_callback is None: loop = IOLoop.current() if self.cull_interval <= 0: #handle case where user set invalid value self.log.warning("Invalid value for 'cull_interval' detected (%s) - using default value (%s).", self.cull_interval, self.cull_interval_default) self.cull_interval = self.cull_interval_default self._culler_callback = PeriodicCallback( self.cull_kernels, 1000*self.cull_interval) self.log.info("Culling kernels with idle durations > %s seconds at %s second intervals ...", self.cull_idle_timeout, self.cull_interval) if self.cull_busy: self.log.info("Culling kernels even if busy") if self.cull_connected: self.log.info("Culling kernels even with connected clients") self._culler_callback.start() self._initialized_culler = True
def __init__(self, keys, options=None): assert options if options.debug: logger.setLevel(logging.DEBUG) self.keys = keys handlers = [] settings = {'debug': True} # Starts MQTT controller self._mqtt_controller = MQTTController( on_message_cb=self.send_to_broker, port=options.mqtt_port, max_time=options.max_time) PeriodicCallback(self._mqtt_controller.check_dead_nodes, 1000).start() # Create connection to broker self.create_broker_connection( "ws://{}:{}/gw".format(options.broker_host, options.broker_port)) super().__init__(handlers, **settings) logger.info('MQTT gateway application started')
def _periodic_task_iter(self): """ Iterates through all the periodic tasks: - the service registry pinging - default dummy task if on Windows - user defined periodic tasks :return: """ for url in self.service_registry_urls: registry = RemoteService(url) self.registries.append(registry) periodic_servreg_ping = functools.partial( self._ping_to_service_registry, registry) periodic_servreg_ping() # initial ping self.default_periodic_tasks.append( (periodic_servreg_ping, self.service_registry_ping_interval)) all_periodic_tasks = self.default_periodic_tasks + self.periodic_tasks for func, timer_in_seconds in all_periodic_tasks: timer_milisec = timer_in_seconds * 1000 yield PeriodicCallback(func, timer_milisec, io_loop=self.io_loop)
def broadcast_sys_info(): global pcb if pcb is None: pcb = PeriodicCallback(broadcast_sys_info, 500) pcb.start() cpu = psutil.cpu_percent() net = psutil.net_io_counters() bytes_sent = '{0:.2f} kb'.format(net.bytes_recv / 1024) bytes_rcvd = '{0:.2f} kb'.format(net.bytes_sent / 1024) publish_data('sysinfo_sent', { 'kb_received': bytes_sent, }) publish_data('sysinfo_rec', { 'kb_sent': bytes_rcvd, }) publish_data('sysinfo_cpu', { 'cpu': cpu, })
def __init__(self, enter_code, man_code, close_callback): self.enter_code = enter_code self.man_code = man_code self.owner = None self.created_time = time.time() self.questions = [] self.reward = 0 # 成功奖励 self.status = 0 # 记录房间状态: 等待开始(0)/开始倒计时(1)/出题收集答案(2)/等待揭示结果(3)/答案结果展示(4)/奖金展示(5)/结束(6) self.status_conf = [1, 5, 10, 20, 10, 120, 5] # 状态值 # self.status_conf = [1, 1, 1, 1, 1, 1, 10] # 状态值(测试用) self.question_idx = -1 # 当前问题序号 self._snapshot = {'st': 0, 'duration': 0} # 当前快照信息 self.counter = 0 self.user_count = 0 # 当前房间人数 self.failed_users = set() # 失败人数 self.passed_users = set() # 通过人数 self.user_answers = defaultdict(list) self.close_handler = close_callback self.event_handler = None self.timer = PeriodicCallback(self.ticker, 1000)
def __init__(self, service, host, port, timeout=30, name=None, raise_empty=False, max_conn=30, connection_class=ThriftClient, keepalive=None, tracking=False, tracker_factory=None, use_limit=None, check_interval=10): super(TornadoHeartbeatClientPool, self).__init__( service=service, host=host, port=port, timeout=timeout, name=name, raise_empty=raise_empty, max_conn=max_conn, connection_class=connection_class, keepalive=keepalive, tracking=tracking, tracker_factory=tracker_factory, use_limit=use_limit, ) self.check_interval = check_interval self._heartbeat_timer = PeriodicCallback( self.maintain_connections, max(1, self.timeout - 5) * 1000) self._heartbeat_timer.start()
def run_scheduler(q, center_port=None, **kwargs): from distributed import Scheduler from tornado.ioloop import IOLoop, PeriodicCallback import logging IOLoop.clear_instance() loop = IOLoop() loop.make_current() PeriodicCallback(lambda: None, 500).start() logging.getLogger("tornado").setLevel(logging.CRITICAL) center = ('127.0.0.1', center_port) if center_port else None scheduler = Scheduler(center=center, **kwargs) scheduler.listen(0) if center_port: loop.run_sync(scheduler.sync_center) done = scheduler.start(0) q.put(scheduler.port) try: loop.start() finally: loop.close(all_fds=True)
def handle(self, *args, **options): application = Application([ (r'/api/.*', HttpHandler), (r'/photo/(.*)', PychatStaticFileHandler, {'path': settings.MEDIA_ROOT}), (r'/ws', TornadoHandler), ], debug=settings.DEBUG, default_host=options['host']) self.http_server = HTTPServer(application, ssl_options=TORNADO_SSL_OPTIONS, max_buffer_size=1000000000) # 1GB, limit in nginx if u need self.http_server.bind(options['port'], options['host']) print('tornado server started at {}:{}'.format(options['host'], options['port'])) # uncomment me for multiple process self.http_server.start(1) # Init signals handler if not options['keep_online']: call_command('flush_online') if not hasattr(settings, 'MAIN_TORNADO_PROCESS_PORT') \ or settings.MAIN_TORNADO_PROCESS_PORT == options['port']: logger.info("Starting pinger") PeriodicCallback(ping_online, settings.PING_INTERVAL).start() else: logger.info("Skipping pinger for this instance") signal.signal(signal.SIGTERM, self.sig_handler) # This will also catch KeyboardInterrupt exception IOLoop.instance().start()
def __init__(self, config): self.server = config.get_server() # Temperature Store Tracking self.last_temps = {} self.gcode_queue = deque(maxlen=MAX_GCODE_LINES) self.temperature_store = {} self.temp_update_cb = PeriodicCallback(self._update_temperature_store, TEMPERATURE_UPDATE_MS) # Register status update event self.server.register_event_handler("server:status_update", self._set_current_temps) self.server.register_event_handler("server:gcode_response", self._update_gcode_store) self.server.register_event_handler("server:klippy_ready", self._init_sensors) # Register endpoints self.server.register_endpoint("/server/temperature_store", ['GET'], self._handle_temp_store_request) self.server.register_endpoint("/server/gcode_store", ['GET'], self._handle_gcode_store_request)
def run_center(q): from distributed import Center from tornado.ioloop import IOLoop, PeriodicCallback import logging IOLoop.clear_instance() loop = IOLoop(); loop.make_current() PeriodicCallback(lambda: None, 500).start() logging.getLogger("tornado").setLevel(logging.CRITICAL) center = Center('127.0.0.1') while True: try: center.listen(0) break except Exception as e: logging.info("Could not start center on port. Retrying", exc_info=True) q.put(center.port) try: loop.start() finally: loop.close(all_fds=True)
def setup_sensors(self): super(ApsMasterController, self).setup_sensors() self._disk_fill_level_sensor = Sensor.float( "beegfs-fill-level", description="The percentage fill level of the BeeGFS cluster", default=0.0, unit="percentage", initial_status=Sensor.UNKNOWN) self.add_sensor(self._disk_fill_level_sensor) def check_disk_fill_level(): try: used, avail = map(float, check_output(["df", "/DATA/"] ).splitlines()[1].split()[2:4]) percent_used = 100.0 * used / (used + avail) self._disk_fill_level_sensor.set_value(percent_used) except Exception as error: log.warning("Failed to check disk usage level: {}".format( str(error))) check_disk_fill_level() self._disk_fill_callback = PeriodicCallback( check_disk_fill_level, 60 * 1000) self._disk_fill_callback.start()
def main(): parser = argparse.ArgumentParser() parser.add_argument('config', help='config file') args = parser.parse_args() logger.warn('Reading config from {}'.format(args.config)) config = {} with open(args.config, 'r') as infile: config = json.load(infile) if config == {}: sys.exit() serve_config = config.get('car_serve', {}) logger.warn(serve_config) app = CarServer(config) try: logger.info('Opening HTTP server.') http_server = HTTPServer(app) http_server.listen(serve_config.get('port', 9001), address=serve_config.get('ip_address', '127.0.0.1')) update_ms = serve_config.get('update_ms', 100) logger.debug( 'Registering periodic callback. Every {} ms'.format(update_ms)) i = PeriodicCallback(app.car_state.update_physical_state, update_ms) i.start() IOLoop.current().start() except (SystemExit, KeyboardInterrupt): pass logger.info('Stopping server.') http_server.stop() IOLoop.current().stop() sys.exit(0)
def tornado_bidder_run(): """runs httpapi bidder agent""" # bind tcp port to launch processes on requests sockets = netutil.bind_sockets(CONFIG_OBJ["Bidder"]["Port"]) # fork working processes process.fork_processes(0) # Tornado app implementation app = Application([url(r"/", TornadoFixPriceBidAgentRequestHandler)]) # start http servers and attach the web app to it server = httpserver.HTTPServer(app) server.add_sockets(sockets) # perform following actions only in the parent process process_counter = process.task_id() if (process_counter == 0): # run dummy ad server adserver_win = Application([url(r"/", TornadoDummyRequestHandler)]) winport = CONFIG_OBJ["Bidder"]["Win"] adserver_win.listen(winport) adserver_evt = Application([url(r"/", TornadoDummyRequestHandler)]) evtport = CONFIG_OBJ["Bidder"]["Event"] adserver_evt.listen(evtport) # --instantiate budget pacer pacer = BudgetControl() pacer.start(CONFIG_OBJ) # add periodic event to call pacer PeriodicCallback(pacer.http_request, CONFIG_OBJ["Banker"]["Period"]).start() # main io loop. it will loop waiting for requests IOLoop.instance().start()
def execute_subprocess(self): future = Future() # init items file workspace = ProjectWorkspace(self.task.project_name) self.items_file = os.path.join(self.workspace_dir, '%s.%s' % (self.task.id, 'jl')) python = workspace.python runner = 'scrapyd.runner' pargs = [python, '-m', runner, 'crawl', self.task.spider_name] for spider_parameter_key, spider_parameter_value in self.task.spider_parameters.items(): pargs += [ '-s', '%s=%s' % (spider_parameter_key, spider_parameter_value) ] env = os.environ.copy() env['SCRAPY_PROJECT'] = str(self.task.project_name) env['SCRAPY_JOB'] = str(self.task.id) env['SCRAPY_FEED_URI'] = str(path_to_file_uri(self.items_file)) try: self.p = subprocess.Popen(pargs, env=env, stdout=self._f_output, cwd=self.workspace_dir, stderr=self._f_output) if self.on_subprocess_start: self.on_subprocess_start(self.task, self.p.pid) except Exception as e: return self.complete_with_error('Error when starting crawl subprocess : %s' % e) logger.info('job %s started on pid: %d' % (self.task.id, self.p.pid)) def check_process(): execute_result = self.p.poll() logger.debug('check process') if execute_result is not None: logger.info('task complete') future.set_result(self.complete(execute_result)) self.check_process_callback = PeriodicCallback(check_process, 1*1000) self.check_process_callback.start() return future
def start(self): """ Create all required components and start the application """ self.log.info('starting alfred %s' % '.'.join(map(str, __version__))) self.startTime = time.asctime() if self.bus: self.bus.init(**self.config.get('broker')) self.bus.on('commands/#', self.on_commands) self.bus.on('items/#', self.on_items) # get over MQTT log transfer (to rework) for h in logging.getLogger().handlers: if isinstance(h, utils.MqttHandler): h.bus = self.bus h.host = self.host # self.plugins = self.load_plugins(self.find_plugins()) # Start installed plugins that have to be started (autoStart) for plugin in self.plugins: if plugin in self.config.get('plugins', []): self.start_plugin(plugin) for item in self.items: if item.name in self.config.get('items', []): self.register(item) signal.signal(signal.SIGINT, self.signal_handler) # Create thread pool to handle all actions and starts a scheduling loop self.scheduler = PeriodicCallback(self.tick, 1000) self.pool = ThreadPoolExecutor(max_workers=20) self.scheduler.start() self.http = WebServer(self, self.clientPath) self.http.start()
def __init__(self, aioloop=None, ioloop=None): self.aioloop = aioloop self.ioloop = ioloop # websocket对象保持集合 self.clientObjectSet = set() self.sync_channel_dict = {} self.sync_delay_dict = {} self.macro_back_dict = {} self.serviceObject = None self.mutex_group_list = [] # 每 20 秒發送一次 ping PeriodicCallback(self.wsClientKeep_alive, 1000).start() self.heartDelayCount = 0 self.ioloop.add_timeout(self.ioloop.time() + 15, self.checkClientConnectionStatus) self.ioloop.add_timeout(self.ioloop.time(), self.wsClientConnect) self.ioloop.add_timeout(self.ioloop.time(), self.getMacroSyncChannel) self.ioloop.add_timeout(self.ioloop.time(), self.getChannelSyncDelay) # self.ioloop.add_timeout(self.ioloop.time(), self.getMacroBackMessage) self.ioloop.add_timeout(self.ioloop.time(), self.getMutexChannel) # 最后 :todo:启动服务 ssl_options = { 'certfile': '/ssl_file/websocket.crt', 'keyfile': '/ssl_file/websocket.key' } wsApp = web.Application([(r'/', WebsocketClient, {'server': self})]) if ENVIRONMENT == 'local' or ENVIRONMENT == 'local_test': wsServer = httpserver.HTTPServer(wsApp) else: wsServer = httpserver.HTTPServer(wsApp, ssl_options=ssl_options) wsServer.listen(8010) logClient.debugLog( '**********************启动wsServer××××××××××××××××××××')
def open_spider(self, spider): try: yield self.items_col.ensure_index(self.job_id_key) yield self.jobs_col.ensure_index('id', unique=True) job = yield self.jobs_col.find_and_modify( { 'id': spider.crawl_id, }, { 'id': spider.crawl_id, 'started_at': datetime.datetime.utcnow(), 'status': 'running', 'spider': spider.name, "urls": self.get_spider_urls(spider), 'options': getattr(spider.crawler, 'start_options', {}), }, upsert=True, new=True) self.job_id = str(job['_id']) spider.motor_job_id = str(self.job_id) logger.info("Crawl job generated id: %s", self.job_id, extra={'crawler': self.crawler}) if self.dump_period: self._dump_pc = PeriodicCallback(self.dump_stats, self.dump_period * 1000) self._dump_pc.start() except Exception: self.job_id = None logger.error( "Can't connect to %s. Items won't be stored.", self.items_uri, exc_info=True, extra={'crawler': self.crawler}, )
def watch_liveloading(self, handler=None): """Watch and handle packets from the Beam liveloading websocket.""" response = yield self.liveloading_websocket.read_message() if response is None: raise ConnectionError packet = self.parse_liveloading_message(response) PeriodicCallback( partial(self.liveloading_websocket.write_message, '2'), packet["data"]["pingInterval"]).start() while True: message = yield self.liveloading_websocket.read_message() if message is None: raise ConnectionError packet = self.parse_liveloading_message(message) if packet.get("data") is not None: self.logger.debug("LIVE: {}".format(packet)) if isinstance(packet["data"], list): if isinstance(packet["data"][0], str): if packet["data"][1].get("following"): self.logger.info("- {} followed.".format( packet["data"][1]["user"]["username"])) self.send_message("Thanks for the follow, @{}!".format( packet["data"][1]["user"]["username"])) elif packet["data"][1].get("subscribed"): self.logger.info("- {} subscribed.".format( packet["data"][1]["user"]["username"])) self.send_message( "Thanks for the subscription, @{}! <3".format( packet["data"][1]["user"]["username"]))
def __init__( self, minimum: int = 0, maximum: int = math.inf, wait_count: int = 3, interval: str = "1s", ): self.minimum = minimum self.maximum = maximum self.wait_count = wait_count self.interval = parse_timedelta(interval, "seconds") if interval else interval self.periodic_callback = None def f(): try: self.periodic_callback.start() except AttributeError: pass if self.interval: self.periodic_callback = PeriodicCallback(self.adapt, self.interval * 1000) try: self.loop.add_callback(f) except AttributeError: IOLoop.current().add_callback(f) try: self.plan = set() self.requested = set() self.observed = set() except Exception: pass # internal state self.close_counts = collections.defaultdict(int) self._adapting = False self.log = collections.deque(maxlen=10000)
def main(): define("host", "0.0.0.0", type=str) define("server_port", 8999, type=int) # define("server_port", 8045, type=int) define("logger_port", 8459, type=int) # define("redis_host", "192.168.36.77", type=str) define("redis_host", "192.168.1.3", type=str) define("redis_port", 6379, type=int) define("redis_password", None, type=str) define("redis_db", 10, type=int) options.parse_command_line() from logic.table_manager import TableMgr TableMgr().reload() from web.handler import CreateRoomHandler, MaintenanceHandler, DismissRoomHandler, LoadBalanceHandler, \ ExistRoomHandler, RunningHandler, BroadcastHandler app = Application( handlers=[ (r"/ws", WSServer), (r"/web/create_room", CreateRoomHandler), (r"/web/maintenance", MaintenanceHandler), (r"/web/dismiss", DismissRoomHandler), (r"/web/load_balance", LoadBalanceHandler), (r"/web/exist_room", ExistRoomHandler), (r"/web/running", RunningHandler), (r"/web/broadcast", BroadcastHandler), ], #debug=True, ) #app.settings["debug"] = True app.listen(options.server_port) from logic.session_manager import SessionMgr, heartbeat PeriodicCallback(SessionMgr().heartbeat, 1000*heartbeat).start() IOLoop.instance().start()
def __init__(self, *args, **kwargs): super(Engine, self).__init__(*args, **kwargs) if not self.options.redis_url: self.host = self.options.redis_host self.port = self.options.redis_port self.password = self.options.redis_password self.db = self.options.redis_db else: # according to https://devcenter.heroku.com/articles/redistogo parsed_url = urlparse.urlparse(self.options.redis_url) self.host = parsed_url.hostname self.port = int(parsed_url.port) self.db = 0 self.password = parsed_url.password self.connection_check = PeriodicCallback(self.check_connection, 1000) self._need_reconnect = False self.subscriber = toredis.Client(io_loop=self.io_loop) self.publisher = toredis.Client(io_loop=self.io_loop) self.worker = toredis.Client(io_loop=self.io_loop) self.subscriptions = {}
def run(self): self.ioloop = IOLoop.instance() (master_fd, slave_fd) = pty.openpty() # make stdout, stderr non-blocking fcntl.fcntl(master_fd, fcntl.F_SETFL, fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK) self.master_fd = master_fd self.master = os.fdopen(master_fd) # listen to stdout, stderr self.ioloop.add_handler(master_fd, self._handle_subprocess_stdout, self.ioloop.READ) slave = os.fdopen(slave_fd) self.kwargs["stdout"] = slave self.kwargs["stderr"] = slave self.kwargs["close_fds"] = True self.pipe = subprocess.Popen(*self.args, **self.kwargs) self.stdin = self.pipe.stdin # check for process exit self.wait_callback = PeriodicCallback(self._wait_for_end, 250) self.wait_callback.start()
def __init__(self, client, loop=None): """Create new adapter for given client instance :param mqtt.Client client: MQTT client instance :param tornado.ioloop.IOLoop loop: Tonardo IOLoop instance, None to use default loop """ self._client = client self._fd = self._client_fd() self._loop = loop self._read_events = IOLoop.READ | IOLoop.ERROR self._all_events = self._read_events | IOLoop.WRITE if not self._loop: self._loop = IOLoop.instance() LOG.debug('setup handlers') self._loop.add_handler(self._client_fd(), self._io_clbk, self._all_events) self._periodic = PeriodicCallback(self._periodic_clbk, MQTT_MISC_PERIOD, io_loop=self._loop) self._periodic.start()
def __init__(self, freq='100ms', interval='500ms', dask=False): if dask: from streamz.dask import DaskStream source = DaskStream() else: source = Source() self.source = source start = {'last': time(), 'freq': pd.Timedelta(freq)} stream = self.source.accumulate(_random_accumulator, returns_state=True, start=start) from tornado.ioloop import PeriodicCallback self.interval = pd.Timedelta(interval).total_seconds() * 1000 def trigger(): source._emit(None) self.pc = PeriodicCallback(trigger, self.interval) self.pc.start() _, example = _random_accumulator(start, None) super(Random, self).__init__(stream, example)
def start(self): """ Starts running the periodic callback. """ if self._cb is not None: raise RuntimeError('Periodic callback has already started.') if not self.running: try: self._updating = True self.running = True finally: self._updating = False self._start_time = time.time() if state.curdoc: self._doc = state.curdoc self._cb = self._doc.add_periodic_callback(self._periodic_callback, self.period) else: from tornado.ioloop import PeriodicCallback self._cb = PeriodicCallback(self._periodic_callback, self.period) self._cb.start() try: state.on_session_destroyed(self._cleanup) except Exception: pass
def __init__(self, capp, db=None, persistent=False, enable_events=True, io_loop=None, **kwargs): threading.Thread.__init__(self) self.daemon = True self.io_loop = io_loop or IOLoop.instance() self.capp = capp self.db = db self.persistent = persistent self.enable_events = enable_events self.state = None if self.persistent and tuple(map( int, celery.__version__.split('.'))) < (3, 0, 15): logger.warning('Persistent mode is available with ' 'Celery 3.0.15 and later') self.persistent = False if self.persistent: logger.debug("Loading state from '%s'...", self.db) state = shelve.open(self.db) if state: self.state = state['events'] state.close() if not self.state: self.state = EventsState(**kwargs) self.timer = PeriodicCallback(self.on_enable_events, self.events_enable_interval)
def __init__(self, **kwargs): """Initialize the DummyAdapter object. This constructor Initializes the DummyAdapter object, including launching a background task if enabled by the adapter options passed as arguments. :param kwargs: keyword arguments specifying options """ super(DummyAdapter, self).__init__(**kwargs) # Set the background task counter to zero self.background_task_counter = 0 # Launch the background task if enabled in options if self.options.get('background_task_enable', False): task_interval = float( self.options.get('background_task_interval', 1.0)) logging.debug("Launching background task with interval %.2f secs", task_interval) self.background_task = PeriodicCallback( self.background_task_callback, task_interval * 1000) self.background_task.start() logging.debug('DummyAdapter loaded')