async def livestream_snapshot(self, filename): """Download a still frame from the camera's livestream.""" # Render filename from template. filename.hass = self.hass snapshot_file = filename.async_render( variables={ATTR_ENTITY_ID: self.entity_id}) # Respect configured path whitelist. if not self.hass.config.is_allowed_path(snapshot_file): _LOGGER.error( "Can't write %s, no access to path!", snapshot_file) return asyncio.shield(self._camera.get_livestream_image( snapshot_file), loop=self.hass.loop)
async def download_livestream(self, filename, duration): """Download a recording from the camera's livestream.""" # Render filename from template. filename.hass = self.hass stream_file = filename.async_render( variables={ATTR_ENTITY_ID: self.entity_id}) # Respect configured path whitelist. if not self.hass.config.is_allowed_path(stream_file): _LOGGER.error( "Can't write %s, no access to path!", stream_file) return asyncio.shield(self._camera.record_livestream( stream_file, timedelta(seconds=duration)), loop=self.hass.loop)
def test_shield_cancel(self): inner = asyncio.Future(loop=self.loop) outer = asyncio.shield(inner) test_utils.run_briefly(self.loop) inner.cancel() test_utils.run_briefly(self.loop) self.assertTrue(outer.cancelled())
def test_gather_shield(self): child1 = asyncio.Future(loop=self.loop) child2 = asyncio.Future(loop=self.loop) inner1 = asyncio.shield(child1, loop=self.loop) inner2 = asyncio.shield(child2, loop=self.loop) parent = asyncio.gather(inner1, inner2, loop=self.loop) test_utils.run_briefly(self.loop) parent.cancel() # This should cancel inner1 and inner2 but bot child1 and child2. test_utils.run_briefly(self.loop) self.assertIsInstance(parent.exception(), asyncio.CancelledError) self.assertTrue(inner1.cancelled()) self.assertTrue(inner2.cancelled()) child1.set_result(1) child2.set_result(2) test_utils.run_briefly(self.loop)
async def async_setup(self, platform, platform_config, discovery_info=None, tries=0): """Setup the platform.""" logger = self.logger hass = self.hass full_name = '{}.{}'.format(self.domain, self.platform_name) logger.info("Setting up %s", full_name) warn_task = hass.loop.call_later( SLOW_SETUP_WARNING, logger.warning, "Setup of platform %s is taking over %s seconds.", self.platform_name, SLOW_SETUP_WARNING) try: if getattr(platform, 'async_setup_platform', None): task = platform.async_setup_platform( hass, platform_config, self._async_schedule_add_entities, discovery_info ) else: # This should not be replaced with hass.async_add_job because # we don't want to track this task in case it blocks startup. task = hass.loop.run_in_executor( None, platform.setup_platform, hass, platform_config, self._schedule_add_entities, discovery_info ) await asyncio.wait_for( asyncio.shield(task, loop=hass.loop), SLOW_SETUP_MAX_WAIT, loop=hass.loop) # Block till all entities are done if self._tasks: pending = [task for task in self._tasks if not task.done()] self._tasks.clear() if pending: await asyncio.wait( pending, loop=self.hass.loop) hass.config.components.add(full_name) except PlatformNotReady: tries += 1 wait_time = min(tries, 6) * 30 logger.warning( 'Platform %s not ready yet. Retrying in %d seconds.', self.platform_name, wait_time) async_track_point_in_time( hass, self.async_setup( platform, platform_config, discovery_info, tries), dt_util.utcnow() + timedelta(seconds=wait_time)) except asyncio.TimeoutError: logger.error( "Setup of platform %s is taking longer than %s seconds." " Startup will proceed without waiting any longer.", self.platform_name, SLOW_SETUP_MAX_WAIT) except Exception: # pylint: disable=broad-except logger.exception( "Error while setting up platform %s", self.platform_name) finally: warn_task.cancel()
def communicate(self, cmd, size=None): try: if not size: self._ser.write(cmd) self._ser.flush() return w = asyncio.Future(loop=self._loop) self._waitings.put_nowait((w, size)) retry = 0 res = None while not w.done() and retry <= 10: self._ser.write(cmd) self._ser.flush() try: res = yield from asyncio.wait_for(asyncio.shield(w), 3.5) except asyncio.TimeoutError: retry += 1 logger.debug('Retry {}'.format(retry)) else: break except serial.serialutil.SerialException: logger.warning('arduino was not connected.') return if res is None: self.state = 'FAILED' logger.critical('飛機con掉了...好慘喔...QQQ') w.cancel() return res
def wrapper(*args, **kwargs): job = asyncweb.new_job() response = {'jobid': job.id, 'interval': interval, 'pending': False} coro = corofunc(job, *args, **kwargs) task = asyncio.Task(coro) try: result = yield from asyncio.wait_for(asyncio.shield(task), blockfor) except asyncio.TimeoutError: # We waited for the task as long as we were willing to block # the client request. The task will now finish asynchronously # and the client will have to pick up the result by querying # /api/jobs log.debug("webcoroutine didn't finish in %.1f seconds, result will come asynchronously", blockfor) except Exception as e: if not task.done(): task.set_exception(e) if task.done(): job.finish(task) else: asyncweb.watch_job(task, job) response['pending'] = True # Get job results for all supplied job ids plus this new one. jobs = web.request.query.jobs + ',%s' % job.id response.update(asyncweb.pop_finished_jobs(job.session, jobs)) log.debug('webcoroutine response: %s', response) return response
def _async_setup_platform(self, platform_type, platform_config, discovery_info=None): """Set up a platform for this component. This method must be run in the event loop. """ platform = yield from async_prepare_setup_platform( self.hass, self.config, self.domain, platform_type) if platform is None: return # Config > Platform > Component scan_interval = (platform_config.get(CONF_SCAN_INTERVAL) or getattr(platform, 'SCAN_INTERVAL', None) or self.scan_interval) entity_namespace = platform_config.get(CONF_ENTITY_NAMESPACE) key = (platform_type, scan_interval, entity_namespace) if key not in self._platforms: self._platforms[key] = EntityPlatform( self, platform_type, scan_interval, entity_namespace) entity_platform = self._platforms[key] self.logger.info("Setting up %s.%s", self.domain, platform_type) warn_task = self.hass.loop.call_later( SLOW_SETUP_WARNING, self.logger.warning, "Setup of platform %s is taking over %s seconds.", platform_type, SLOW_SETUP_WARNING) try: if getattr(platform, 'async_setup_platform', None): task = platform.async_setup_platform( self.hass, platform_config, entity_platform.async_schedule_add_entities, discovery_info ) else: # This should not be replaced with hass.async_add_job because # we don't want to track this task in case it blocks startup. task = self.hass.loop.run_in_executor( None, platform.setup_platform, self.hass, platform_config, entity_platform.schedule_add_entities, discovery_info ) yield from asyncio.wait_for( asyncio.shield(task, loop=self.hass.loop), SLOW_SETUP_MAX_WAIT, loop=self.hass.loop) yield from entity_platform.async_block_entities_done() self.hass.config.components.add( '{}.{}'.format(self.domain, platform_type)) except asyncio.TimeoutError: self.logger.error( "Setup of platform %s is taking longer than %s seconds." " Startup will proceed without waiting any longer.", platform_type, SLOW_SETUP_MAX_WAIT) except Exception: # pylint: disable=broad-except self.logger.exception( "Error while setting up platform %s", platform_type) finally: warn_task.cancel()
def _send_then_recv(self, send, recv): fut_recv = Task(recv()) result = None for i in range(self.max_tries): try: yield from send() except ConnectionError as e: logging.warn("Failed to send RADIUS request: %s" % e) yield from sleep(TIMEOUT, loop=self.loop) continue try: result = yield from wait_for(shield(fut_recv), self.timeout) break except TimeoutError: # No need to restart task, since it is protected by shield(). logging.warning("Timeout, re-send RADIUS request.") except ValueError as e: logging.warning("Malformed RADIUS packet received: %s" % e) logging.info("Please check the shared secret.") fut_recv = Task(self._recv_response()) except ConnectionError as e: logging.warn("Failed to receive RADIUS response: %s" % e) yield from sleep(TIMEOUT, loop=self.loop) fut_recv = Task(self._recv_response()) if result is None: logging.warning("Timeout. No valid RADIUS response.") fut_recv.cancel() return result
async def execute_call(self, client_call): # remember what we sent self.call_registry[client_call.correlation_id] = client_call self.subscribe_exclusive(auto_delete=True, on_declared=self._on_declared) self.queue_name = await self.get_queue_name() routing_key = 'rpc.{}.{}'.format(self.service_name, client_call.function) logging.info('Calling {}({})'.format(routing_key, client_call.kwargs)) self.publish(routing_key=routing_key, payload=client_call.kwargs, correlation_id=client_call.correlation_id, reply_to=self.queue_name) self.loop.call_soon(self.drain_events) if not self.loop.is_running(): self.loop.call_soon(self.loop.run_until_complete(client_call.future)) try: logging.info('Waiting for response from server') await asyncio.wait_for(asyncio.shield(client_call.future), self.timeout) return client_call.future.result() except asyncio.TimeoutError: raise Exception('Timeout!')
def next_record(self, partitions): """ Return one fetched records This method will contain a little overhead as we will do more work this way: * Notify prefetch routine per every consumed partition * Assure message marked for autocommit """ for tp in list(self._records.keys()): if partitions and tp not in partitions: continue res_or_error = self._records[tp] if type(res_or_error) == FetchResult: message = res_or_error.getone() if message is None: # We already processed all messages, request new ones del self._records[tp] self._notify(self._wait_consume_future) else: return message else: # Remove error, so we can fetch on partition again del self._records[tp] self._notify(self._wait_consume_future) res_or_error.check_raise() # No messages ready. Wait for some to arrive if self._wait_empty_future is None or self._wait_empty_future.done(): self._wait_empty_future = asyncio.Future(loop=self._loop) yield from asyncio.shield(self._wait_empty_future, loop=self._loop) return (yield from self.next_record(partitions))
def connection_closed_future(self) -> 'asyncio.Future[Disconnected]': """ Resolves once the connection has been closed. Return the close code. """ return asyncio.shield(self._connection_closed_future, loop=self._loop)
async def wrapper(cls, connection, rest, *args): futures = {connection[name]: msg for name, msg in self.fields} aggregate = asyncio.gather(*futures, loop=connection.loop) if self.wait: timeout = connection.wait_future_timeout else: timeout = 0 try: await asyncio.wait_for( asyncio.shield(aggregate, loop=connection.loop), timeout, loop=connection.loop ) except asyncio.TimeoutError: for future, message in futures.items(): if not future.done(): if self.fail_info is None: template = "bad sequence of commands ({})" info = template.format(message) else: info = self.fail_info connection.response(self.fail_code, info) return True return await f(cls, connection, rest, *args)
def ensure_open(self): """ Check that the WebSocket connection is open. Raise :exc:`~websockets.exceptions.ConnectionClosed` if it isn't. """ # Handle cases from most common to least common for performance. if self.state is State.OPEN: return if self.state is State.CLOSED: raise ConnectionClosed(self.close_code, self.close_reason) if self.state is State.CLOSING: # If we started the closing handshake, wait for its completion to # get the proper close code and status. self.close_connection_task # will complete within 4 or 5 * timeout after calling close(). # The CLOSING state also occurs when failing the connection. In # that case self.close_connection_task will complete even faster. if self.close_code is None: yield from asyncio.shield(self.close_connection_task) raise ConnectionClosed(self.close_code, self.close_reason) # Control may only reach this point in buggy third-party subclasses. assert self.state is State.CONNECTING raise InvalidState("WebSocket connection isn't established yet")
def _do_execute(self, *, return_exceptions=False): self._waiters = waiters = [] conn = yield from self._conn.get_atomic_connection() multi = conn.execute('MULTI') coros = list(self._send_pipeline(conn)) exec_ = conn.execute('EXEC') gather = asyncio.gather(multi, *coros, loop=self._loop, return_exceptions=True) try: yield from asyncio.shield(gather, loop=self._loop) except asyncio.CancelledError: yield from gather finally: if conn.closed: for fut in waiters: fut.cancel() for fut in self._results: if not fut.done(): fut.cancel() else: try: results = yield from exec_ except RedisError as err: for fut in waiters: fut.set_exception(err) else: assert len(results) == len(waiters), ( "Results does not match waiters", results, waiters) self._resolve_waiters(results, return_exceptions) return (yield from self._gather_result(return_exceptions))
def send_request (self): """ Send a request (which should be a CON message) to a CASAN slave and wait for an answer or a timeout. The request may be retransmitted if needed. In order to known if an answer has been received, caller may test m.req_rep is not None :param m: message to send :type m: class msg :return: reply message or None :rtype: class Msg """ if self.peer is None: raise RuntimeError ('Unknown destination address') self.req_rep = None self.send () if self.msgtype == Msg.Types.CON: t = self._initial_timeout () self._event = asyncio.Event () while t is not None: try: yield from asyncio.shield ( asyncio.wait_for (self._wait_for_reply (), t)) t = None except asyncio.TimeoutError: self.send () t = self._next_timeout () g.d.m ('msg', 'Resend message {}, next={}'.format (self, t)) return self.req_rep
def _poll(self, waiter, timeout): assert waiter is self._waiter, (waiter, self._waiter) self._ready(self._weakref) @asyncio.coroutine def cancel(): self._waiter = create_future(self._loop) self._cancelling = True self._cancellation_waiter = self._waiter self._conn.cancel() if not self._conn.isexecuting(): return try: yield from asyncio.wait_for(self._waiter, timeout, loop=self._loop) except psycopg2.extensions.QueryCanceledError: pass except asyncio.TimeoutError: self._close() try: yield from asyncio.wait_for(self._waiter, timeout, loop=self._loop) except (asyncio.CancelledError, asyncio.TimeoutError) as exc: yield from asyncio.shield(cancel(), loop=self._loop) raise exc except psycopg2.extensions.QueryCanceledError: raise asyncio.CancelledError finally: if self._cancelling: self._cancelling = False if self._waiter is self._cancellation_waiter: self._waiter = None self._cancellation_waiter = None else: self._waiter = None
async def assertTimeout(fut: asyncio.Future, timeout: float, shield: bool=False) -> Any: """ Checks that the given coroutine or future is not fulfilled before a specified amount of time runs out. """ if shield: fut = asyncio.shield(fut) with pytest.raises(asyncio.TimeoutError): await asyncio.wait_for(fut, timeout)
def test_shield_exception(self): inner = asyncio.Future(loop=self.loop) outer = asyncio.shield(inner) test_utils.run_briefly(self.loop) exc = RuntimeError('expected') inner.set_exception(exc) test_utils.run_briefly(self.loop) self.assertIs(outer.exception(), exc)
def check_program_end(self): try: yield from asyncio.shield( asyncio.wait_for(self.exit_future, self.PROGRAM_TIMEOUT)) except asyncio.TimeoutError: pass yield from self.emit_terminate_message() yield from self.rm_tmp_dir()
def await_result(self) -> 'asyncio.Future[Result]': """ Wait for a result. Once the :class:`asyncio.Future` is done, all tasks are guaranteed to be done as well. """ return asyncio.shield(self._result_future, loop=self._loop)
def _get_next(self, timeout): waiter = reusable_waiter(self.conn._io_loop, timeout) while len(self.items) == 0: self._maybe_fetch_batch() if self.error is not None: raise self.error with translate_timeout_errors(): yield from waiter(asyncio.shield(self.new_response)) return self.items.popleft()
async def test_mempipe_async_eof(event_loop): reader, writer = mempipe(loop=event_loop) writer.write(b'FUBAR') read = event_loop.create_task(reader.read()) with pytest.raises(asyncio.TimeoutError): print(await asyncio.wait_for(asyncio.shield(read), timeout=0.05)) writer.write_eof() message = await asyncio.wait_for(read, timeout=5.0) assert message == b'FUBAR'
def disconnected(self): """ Future that resolves when the connection to Telegram ends, either by user action or in the background. Note that it may resolve in either a ``ConnectionError`` or any other unexpected error that could not be handled. """ return asyncio.shield(self._disconnected, loop=self._loop)
def fetch_next(self, wait=True): timeout = Cursor._wait_to_timeout(wait) waiter = reusable_waiter(self.conn._io_loop, timeout) while len(self.items) == 0 and self.error is None: self._maybe_fetch_batch() with translate_timeout_errors(): yield from waiter(asyncio.shield(self.new_response)) # If there is a (non-empty) error to be received, we return True, so the # user will receive it on the next `next` call. return len(self.items) != 0 or not isinstance(self.error, RqlCursorEmpty)
def abort_transaction(self): self._ensure_transactional() log.debug( "Aborting transaction for id %s", self._txn_manager.transactional_id) self._txn_manager.aborting_transaction() yield from asyncio.shield( self._txn_manager.wait_for_transaction_end(), loop=self._loop )
def begin_transaction(self): self._ensure_transactional() log.debug( "Beginning a new transaction for id %s", self._txn_manager.transactional_id) yield from asyncio.shield( self._txn_manager.wait_for_pid(), loop=self._loop ) self._txn_manager.begin_transaction()
async def async_run_in_loop(co, loop, cancel_connect=True): our_loop = get_running_loop() if loop == our_loop: # shortcuts in same loop fu = asyncio.ensure_future(co) else: fu = asyncio.wrap_future(asyncio.run_coroutine_threadsafe(co, loop=loop)) if not cancel_connect: fu = asyncio.shield(fu) return await fu
def async_camera_image(self): """Return a still image response from the camera.""" from haffmpeg import ImageFrame, IMAGE_JPEG ffmpeg = ImageFrame( self.hass.data[DATA_FFMPEG].binary, loop=self.hass.loop) image = yield from asyncio.shield(ffmpeg.get_image( self._input, output_format=IMAGE_JPEG, extra_cmd=self._ffmpeg_arguments), loop=self.hass.loop) return image
def async_camera_image(self): """Return a still image response from the camera.""" self.renew_live_stream_session() from haffmpeg import ImageFrame, IMAGE_JPEG ffmpeg = ImageFrame(self._ffmpeg.binary, loop=self.hass.loop) image = yield from asyncio.shield(ffmpeg.get_image( self._live_stream_session.live_stream_url, output_format=IMAGE_JPEG, extra_cmd=self._ffmpeg_arguments), loop=self.hass.loop) return image
def restart(self, request: web.Request) -> Awaitable[None]: """Restart add-on.""" addon = self._extract_addon(request) return asyncio.shield(addon.restart())
def wrapped(*args, **kwargs): return asyncio.shield(func(*args, **kwargs))
def wait_for_ball_unknown_ball(self): """Return true if the device has unknown balls which are neither clearly new or returned.""" return asyncio.shield(self._unknown_balls, loop=self.machine.clock.loop)
def rebuild(self, request: web.Request) -> Coroutine: """Rebuild Home Assistant.""" return asyncio.shield(self.sys_homeassistant.rebuild())
def wait_for_ball_left(self): """Wait until a ball left.""" if self._already_left: raise AssertionError("Invalid wait. Ball left before eject.") return asyncio.shield(self._ball_left, loop=self.machine.clock.loop)
def rebuild(self, request: web.Request) -> Awaitable[None]: """Rebuild local build add-on.""" addon = self._extract_addon_installed(request) return asyncio.shield(addon.rebuild())
def start(self, request: web.Request) -> Awaitable[None]: """Start add-on.""" addon = self._extract_addon_installed(request) return asyncio.shield(addon.start())
async def __run(self): result = None try: while True: try: result = await asyncio.wait_for( self.__commandqueue.get(), timeout=self.IMMEDIATE_COMMAND_TIMEOUT, loop=self.__loop, ) except asyncio.TimeoutError: # The cancellation of the __commandqueue.get() that happens # in this case is intended, and is just what asyncio.Queue # suggests for "get with timeout". subsystems = self._get_idle_interests() if subsystems is None: # The presumably most quiet subsystem -- in this case, # idle is only used to keep the connection alive. subsystems = ["database"] result = CommandResult("idle", subsystems, self._parse_list) self.__idle_results.put_nowait(result) self.__command_enqueued = asyncio.Future() self._write_command(result._command, result._args) while True: try: if self.__command_enqueued is not None: # We're in idle mode. line_future = asyncio.shield(self.__read_output_line()) await asyncio.wait([line_future, self.__command_enqueued], return_when=asyncio.FIRST_COMPLETED) if self.__command_enqueued.done(): self._write_command("noidle") self.__command_enqueued = None l = await line_future else: l = await self.__read_output_line() except CommandError as e: result._feed_error(e) break result._feed_line(l) if l is None: break result = None except Exception as e: # Prevent the destruction of the pending task in the shutdown # function -- it's just shutting down by itself. self.__run_task = None self.disconnect() if result is not None: result._feed_error(e) return else: # Typically this is a bug in mpd.asyncio. raise
def wrap(*args, **kwargs): return wraps(func)(awaiter)(asyncio.shield(func(*args, **kwargs)))
def repair(self, request: web.Request) -> Awaitable[None]: """Try to repair the local setup / overlayfs.""" return asyncio.shield(self.sys_core.repair())
def wait_for_ball_return(self): """Wait until a ball returned.""" return asyncio.shield(self._ball_returned, loop=self.machine.clock.loop)
def restart(self, request: web.Request) -> Awaitable[None]: """Soft restart Supervisor.""" return asyncio.shield(self.sys_supervisor.restart())
def restart(self, request: web.Request) -> Awaitable[None]: """Restart Home Assistant.""" return asyncio.shield(self.sys_homeassistant.restart())
def stop(self, request: web.Request) -> Awaitable[None]: """Stop Home Assistant.""" return asyncio.shield(self.sys_homeassistant.stop())
def async_setup(self, platform, platform_config, discovery_info=None, tries=0): """Setup the platform.""" logger = self.logger hass = self.hass full_name = '{}.{}'.format(self.domain, self.platform_name) logger.info("Setting up %s", full_name) warn_task = hass.loop.call_later( SLOW_SETUP_WARNING, logger.warning, "Setup of platform %s is taking over %s seconds.", self.platform_name, SLOW_SETUP_WARNING) try: if getattr(platform, 'async_setup_platform', None): task = platform.async_setup_platform( hass, platform_config, self._async_schedule_add_entities, discovery_info) else: # This should not be replaced with hass.async_add_job because # we don't want to track this task in case it blocks startup. task = hass.loop.run_in_executor(None, platform.setup_platform, hass, platform_config, self._schedule_add_entities, discovery_info) yield from asyncio.wait_for(asyncio.shield(task, loop=hass.loop), SLOW_SETUP_MAX_WAIT, loop=hass.loop) # Block till all entities are done if self._tasks: pending = [task for task in self._tasks if not task.done()] self._tasks.clear() if pending: yield from asyncio.wait(pending, loop=self.hass.loop) hass.config.components.add(full_name) except PlatformNotReady: tries += 1 wait_time = min(tries, 6) * 30 logger.warning( 'Platform %s not ready yet. Retrying in %d seconds.', self.platform_name, wait_time) async_track_point_in_time( hass, self.async_setup(platform, platform_config, discovery_info, tries), dt_util.utcnow() + timedelta(seconds=wait_time)) except asyncio.TimeoutError: logger.error( "Setup of platform %s is taking longer than %s seconds." " Startup will proceed without waiting any longer.", self.platform_name, SLOW_SETUP_MAX_WAIT) except Exception: # pylint: disable=broad-except logger.exception("Error while setting up platform %s", self.platform_name) finally: warn_task.cancel()
def stop(self, request): """Stop addon.""" addon = self._extract_addon(request) return asyncio.shield(addon.stop(), loop=self.loop)
results[entity_id] = { "ids": [entity_id], "status": "ERROR", "errorCode": ERR_DEVICE_OFFLINE, } continue entities[entity_id] = GoogleEntity(hass, data.config, state) executions[entity_id] = [execution] try: execute_results = await asyncio.wait_for( asyncio.shield( asyncio.gather( *( _entity_execute(entities[entity_id], data, execution) for entity_id, execution in executions.items() ) ) ), EXECUTE_LIMIT, ) for entity_id, result in zip(executions, execute_results): if result is not None: results[entity_id] = result except asyncio.TimeoutError: pass final_results = list(results.values()) for entity in entities.values(): if entity.entity_id in results:
def stop(self, request: web.Request) -> Awaitable[None]: """Stop add-on.""" addon = self._extract_addon(request) return asyncio.shield(addon.stop())
def shield(self, coro): return asyncio.shield(coro, loop=self.loop)
def uninstall(self, request: web.Request) -> Awaitable[None]: """Uninstall add-on.""" addon = self._extract_addon(request) return asyncio.shield(addon.uninstall())
def _async_setup_platform(self, platform_type, platform_config, discovery_info=None, tries=0): """Set up a platform for this component. This method must be run in the event loop. """ platform = yield from async_prepare_setup_platform( self.hass, self.config, self.domain, platform_type) if platform is None: return # Config > Platform > Component scan_interval = ( platform_config.get(CONF_SCAN_INTERVAL) or getattr(platform, 'SCAN_INTERVAL', None) or self.scan_interval) parallel_updates = getattr( platform, 'PARALLEL_UPDATES', int(not hasattr(platform, 'async_setup_platform'))) entity_namespace = platform_config.get(CONF_ENTITY_NAMESPACE) key = (platform_type, scan_interval, entity_namespace) if key not in self._platforms: entity_platform = self._platforms[key] = EntityPlatform( self, platform_type, scan_interval, parallel_updates, entity_namespace) else: entity_platform = self._platforms[key] self.logger.info("Setting up %s.%s", self.domain, platform_type) warn_task = self.hass.loop.call_later( SLOW_SETUP_WARNING, self.logger.warning, "Setup of platform %s is taking over %s seconds.", platform_type, SLOW_SETUP_WARNING) try: if getattr(platform, 'async_setup_platform', None): task = platform.async_setup_platform( self.hass, platform_config, entity_platform.async_schedule_add_entities, discovery_info ) else: # This should not be replaced with hass.async_add_job because # we don't want to track this task in case it blocks startup. task = self.hass.loop.run_in_executor( None, platform.setup_platform, self.hass, platform_config, entity_platform.schedule_add_entities, discovery_info ) yield from asyncio.wait_for( asyncio.shield(task, loop=self.hass.loop), SLOW_SETUP_MAX_WAIT, loop=self.hass.loop) yield from entity_platform.async_block_entities_done() self.hass.config.components.add( '{}.{}'.format(self.domain, platform_type)) except PlatformNotReady: tries += 1 wait_time = min(tries, 6) * 30 self.logger.warning( 'Platform %s not ready yet. Retrying in %d seconds.', platform_type, wait_time) async_track_point_in_time( self.hass, self._async_setup_platform( platform_type, platform_config, discovery_info, tries), dt_util.utcnow() + timedelta(seconds=wait_time)) except asyncio.TimeoutError: self.logger.error( "Setup of platform %s is taking longer than %s seconds." " Startup will proceed without waiting any longer.", platform_type, SLOW_SETUP_MAX_WAIT) except Exception: # pylint: disable=broad-except self.logger.exception( "Error while setting up platform %s", platform_type) finally: warn_task.cancel()
async def _async_setup_platform(self, async_create_setup_task, tries=0): """Set up a platform via config file or config entry. async_create_setup_task creates a coroutine that sets up platform. """ current_platform.set(self) logger = self.logger hass = self.hass full_name = f"{self.domain}.{self.platform_name}" logger.info("Setting up %s", full_name) warn_task = hass.loop.call_later( SLOW_SETUP_WARNING, logger.warning, "Setup of %s platform %s is taking over %s seconds.", self.domain, self.platform_name, SLOW_SETUP_WARNING, ) try: task = async_create_setup_task() await asyncio.wait_for(asyncio.shield(task), SLOW_SETUP_MAX_WAIT) # Block till all entities are done if self._tasks: pending = [task for task in self._tasks if not task.done()] self._tasks.clear() if pending: await asyncio.wait(pending) hass.config.components.add(full_name) return True except PlatformNotReady: tries += 1 wait_time = min(tries, 6) * 30 logger.warning( "Platform %s not ready yet. Retrying in %d seconds.", self.platform_name, wait_time, ) async def setup_again(now): """Run setup again.""" self._async_cancel_retry_setup = None await self._async_setup_platform(async_create_setup_task, tries) self._async_cancel_retry_setup = async_call_later( hass, wait_time, setup_again ) return False except asyncio.TimeoutError: logger.error( "Setup of platform %s is taking longer than %s seconds." " Startup will proceed without waiting any longer.", self.platform_name, SLOW_SETUP_MAX_WAIT, ) return False except Exception: # pylint: disable=broad-except logger.exception( "Error while setting up %s platform for %s", self.platform_name, self.domain, ) return False finally: warn_task.cancel()
def update(self, request: web.Request) -> Awaitable[None]: """Update add-on.""" addon: Addon = self._extract_addon_installed(request) return asyncio.shield(addon.update())
def wait_for_timeout(self): """Wait for timeout.""" return asyncio.shield(self._timeout_future)
def wait_for_can_skip(self): """Wait until this future can skip.""" return asyncio.shield(self._can_skip_future)
def start(self, request: web.Request) -> Coroutine: """Start Home Assistant.""" return asyncio.shield(self.sys_homeassistant.start())
def restart(self, request): """Restart addon.""" addon = self._extract_addon(request) return asyncio.shield(addon.restart(), loop=self.loop)
def install(self, request: web.Request) -> Awaitable[None]: """Install add-on.""" addon = self._extract_addon(request, check_installed=False) return asyncio.shield(addon.install())
def wait_for_ready(self): """Wait until the device is ready.""" return asyncio.shield(self._ready, loop=self.machine.clock.loop)
def wait_for_confirm(self): """Wait for confirm.""" return asyncio.shield(self._confirm_future)