Exemplo n.º 1
0
 def table_cleanup_interval(self, value: Seconds) -> None:
     self._table_cleanup_interval = want_seconds(value)
Exemplo n.º 2
0
 def broker_commit_interval(self, value: Seconds) -> None:
     self._broker_commit_interval = want_seconds(value)
Exemplo n.º 3
0
 def broker_commit_livelock_soft_timeout(self, value: Seconds) -> None:
     self._broker_commit_livelock_soft_timeout = want_seconds(value)
Exemplo n.º 4
0
 def broker_session_timeout(self, value: Seconds) -> None:
     self._broker_session_timeout = want_seconds(value)
Exemplo n.º 5
0
 def broker_heartbeat_interval(self, value: Seconds) -> None:
     self._broker_heartbeat_interval = want_seconds(value)
Exemplo n.º 6
0
#: Used as the default value for :setting:`broker_commit_every`.
BROKER_COMMIT_EVERY = 10_000

#: How often we commit acknowledged messages on a timer.
#: Used as the default value for :setting:`broker_commit_interval`.
BROKER_COMMIT_INTERVAL = 2.8

#: Kafka consumer session timeout (``session_timeout_ms``).
BROKER_SESSION_TIMEOUT = 30.0

#: Kafka consumer heartbeat (``heartbeat_interval_ms``).
BROKER_HEARTBEAT_INTERVAL = 3.0

#: How long time it takes before we warn that the commit offset has
#: not advanced.
BROKER_LIVELOCK_SOFT = want_seconds(timedelta(minutes=5))

#: How often we clean up expired items in windowed tables.
#: Used as the default value for :setting:`table_cleanup_interval`.
TABLE_CLEANUP_INTERVAL = 30.0

#: Prefix used for reply topics.
REPLY_TO_PREFIX = 'f-reply-'

#: Default expiry time for replies, in seconds (float).
REPLY_EXPIRES = want_seconds(timedelta(days=1))

#: Max number of messages channels/streams/topics can "prefetch".
STREAM_BUFFER_MAXSIZE = 4096

#: We buffer up sending messages until the
Exemplo n.º 7
0
    def __init__(  # noqa: C901
            self,
            id: str,
            *,
            version: int = None,
            broker: Union[str, URL] = None,
            broker_client_id: str = None,
            broker_commit_every: int = None,
            broker_commit_interval: Seconds = None,
            broker_commit_livelock_soft_timeout: Seconds = None,
            broker_session_timeout: Seconds = None,
            broker_heartbeat_interval: Seconds = None,
            broker_check_crcs: bool = None,
            agent_supervisor: SymbolArg[Type[SupervisorStrategyT]] = None,
            store: Union[str, URL] = None,
            autodiscover: AutodiscoverArg = None,
            origin: str = None,
            canonical_url: Union[str, URL] = None,
            datadir: Union[Path, str] = None,
            tabledir: Union[Path, str] = None,
            key_serializer: CodecArg = None,
            value_serializer: CodecArg = None,
            loghandlers: List[logging.StreamHandler] = None,
            table_cleanup_interval: Seconds = None,
            table_standby_replicas: int = None,
            topic_replication_factor: int = None,
            topic_partitions: int = None,
            id_format: str = None,
            reply_to: str = None,
            reply_to_prefix: str = None,
            reply_create_topic: bool = None,
            reply_expires: Seconds = None,
            ssl_context: ssl.SSLContext = None,
            stream_buffer_maxsize: int = None,
            stream_wait_empty: bool = None,
            stream_ack_cancelled_tasks: bool = None,
            stream_ack_exceptions: bool = None,
            stream_publish_on_commit: bool = None,
            producer_linger_ms: int = None,
            producer_max_batch_size: int = None,
            producer_acks: int = None,
            producer_max_request_size: int = None,
            producer_compression_type: str = None,
            worker_redirect_stdouts: bool = None,
            worker_redirect_stdouts_level: Severity = None,
            Agent: SymbolArg[Type[AgentT]] = None,
            Stream: SymbolArg[Type[StreamT]] = None,
            Table: SymbolArg[Type[TableT]] = None,
            TableManager: SymbolArg[Type[TableManagerT]] = None,
            Serializers: SymbolArg[Type[RegistryT]] = None,
            Worker: SymbolArg[Type[WorkerT]] = None,
            PartitionAssignor: SymbolArg[Type[PartitionAssignorT]] = None,
            LeaderAssignor: SymbolArg[Type[LeaderAssignorT]] = None,
            Router: SymbolArg[Type[RouterT]] = None,
            Topic: SymbolArg[Type[TopicT]] = None,
            HttpClient: SymbolArg[Type[HttpClientT]] = None,
            Monitor: SymbolArg[Type[SensorT]] = None,
            # XXX backward compat (remove for Faust 1.0)
            url: Union[str, URL] = None,
            **kwargs: Any) -> None:
        self.version = version if version is not None else self._version
        self.id_format = id_format if id_format is not None else self.id_format
        self.origin = origin if origin is not None else self.origin
        self.id = id
        self.broker = url or broker or BROKER_URL
        self.ssl_context = ssl_context
        self.store = store or STORE_URL
        if autodiscover is not None:
            self.autodiscover = autodiscover
        if broker_client_id is not None:
            self.broker_client_id = broker_client_id
        self.canonical_url = canonical_url or ''
        # datadir is a format string that can contain e.g. {conf.id}
        self.datadir = datadir or DATADIR
        self.tabledir = tabledir or TABLEDIR
        self.broker_commit_interval = (broker_commit_interval
                                       or self._broker_commit_interval)
        self.broker_commit_livelock_soft_timeout = (
            broker_commit_livelock_soft_timeout
            or self._broker_commit_livelock_soft_timeout)
        if broker_session_timeout is not None:
            self.broker_session_timeout = want_seconds(broker_session_timeout)
        if broker_heartbeat_interval is not None:
            self.broker_heartbeat_interval = want_seconds(
                broker_heartbeat_interval)
        self.table_cleanup_interval = (table_cleanup_interval
                                       or self._table_cleanup_interval)

        if broker_commit_every is not None:
            self.broker_commit_every = broker_commit_every
        if broker_check_crcs is not None:
            self.broker_check_crcs = broker_check_crcs
        if key_serializer is not None:
            self.key_serializer = key_serializer
        if value_serializer is not None:
            self.value_serializer = value_serializer
        if table_standby_replicas is not None:
            self.table_standby_replicas = table_standby_replicas
        if topic_replication_factor is not None:
            self.topic_replication_factor = topic_replication_factor
        if topic_partitions is not None:
            self.topic_partitions = topic_partitions
        if reply_create_topic is not None:
            self.reply_create_topic = reply_create_topic
        self.loghandlers = loghandlers if loghandlers is not None else []
        if stream_buffer_maxsize is not None:
            self.stream_buffer_maxsize = stream_buffer_maxsize
        if stream_wait_empty is not None:
            self.stream_wait_empty = stream_wait_empty
        if stream_ack_cancelled_tasks is not None:
            self.stream_ack_cancelled_tasks = stream_ack_cancelled_tasks
        if stream_ack_exceptions is not None:
            self.stream_ack_exceptions = stream_ack_exceptions
        if stream_publish_on_commit is not None:
            self.stream_publish_on_commit = stream_publish_on_commit
        if producer_linger_ms is not None:
            self.producer_linger_ms = producer_linger_ms
        if producer_max_batch_size is not None:
            self.producer_max_batch_size = producer_max_batch_size
        if producer_acks is not None:
            self.producer_acks = producer_acks
        if producer_max_request_size is not None:
            self.producer_max_request_size = producer_max_request_size
        if producer_compression_type is not None:
            self.producer_compression_type = producer_compression_type
        if worker_redirect_stdouts is not None:
            self.worker_redirect_stdouts = worker_redirect_stdouts
        if worker_redirect_stdouts_level is not None:
            self.worker_redirect_stdouts_level = worker_redirect_stdouts_level

        if reply_to_prefix is not None:
            self.reply_to_prefix = reply_to_prefix
        if reply_to is not None:
            self.reply_to = reply_to
        else:
            self.reply_to = f'{self.reply_to_prefix}{uuid4()}'
        if reply_expires is not None:
            self.reply_expires = reply_expires

        self.agent_supervisor = agent_supervisor or AGENT_SUPERVISOR_TYPE

        self.Agent = Agent or AGENT_TYPE
        self.Stream = Stream or STREAM_TYPE
        self.Table = Table or TABLE_TYPE
        self.Set = Set or SET_TYPE
        self.TableManager = TableManager or TABLE_MANAGER_TYPE
        self.Serializers = Serializers or REGISTRY_TYPE
        self.Worker = Worker or WORKER_TYPE
        self.PartitionAssignor = PartitionAssignor or PARTITION_ASSIGNOR_TYPE
        self.LeaderAssignor = LeaderAssignor or LEADER_ASSIGNOR_TYPE
        self.Router = Router or ROUTER_TYPE
        self.Topic = Topic or TOPIC_TYPE
        self.HttpClient = HttpClient or HTTP_CLIENT_TYPE
        self.Monitor = Monitor or MONITOR_TYPE
        self.__dict__.update(kwargs)  # arbitrary configuration
Exemplo n.º 8
0
 def to_python(self, conf: _Settings, value: _Seconds) -> float:
     return want_seconds(value)
Exemplo n.º 9
0
    async def take(self, max_: int,
                   within: Seconds) -> AsyncIterable[Sequence[T_co]]:
        """Buffer n values at a time and yield a list of buffered values.

        Arguments:
            within: Timeout for when we give up waiting for another value,
                and process the values we have.
                Warning: If there's no timeout (i.e. `timeout=None`),
                the agent is likely to stall and block buffered events for an
                unreasonable length of time(!).
        """
        buffer: List[T_co] = []
        events: List[EventT] = []
        buffer_add = buffer.append
        event_add = events.append
        buffer_size = buffer.__len__
        buffer_full = asyncio.Event(loop=self.loop)
        buffer_consumed = asyncio.Event(loop=self.loop)
        timeout = want_seconds(within) if within else None
        stream_enable_acks: bool = self.enable_acks

        buffer_consuming: Optional[asyncio.Future] = None

        channel_it = aiter(self.channel)

        # We add this processor to populate the buffer, and the stream
        # is passively consumed in the background (enable_passive below).
        async def add_to_buffer(value: T) -> T:
            try:
                # buffer_consuming is set when consuming buffer after timeout.
                nonlocal buffer_consuming
                if buffer_consuming is not None:
                    try:
                        await buffer_consuming
                    finally:
                        buffer_consuming = None
                buffer_add(cast(T_co, value))
                event = self.current_event
                if event is not None:
                    event_add(event)
                if buffer_size() >= max_:
                    # signal that the buffer is full and should be emptied.
                    buffer_full.set()
                    # strict wait for buffer to be consumed after buffer full.
                    # If max is 1000, we are not allowed to return 1001 values.
                    buffer_consumed.clear()
                    await self.wait(buffer_consumed)
            except Exception as exc:
                self.log.exception('Error adding to take buffer: %r', exc)
                await self.crash(exc)
            return value

        # Disable acks to ensure this method acks manually
        # events only after they are consumed by the user
        self.enable_acks = False

        self.add_processor(add_to_buffer)
        self._enable_passive(cast(ChannelT, channel_it))
        try:
            while not self.should_stop:
                # wait until buffer full, or timeout
                await self.wait_for_stopped(buffer_full, timeout=timeout)
                if buffer:
                    # make sure background thread does not add new times to
                    # budfer while we read.
                    buffer_consuming = self.loop.create_future()
                    try:
                        yield list(buffer)
                    finally:
                        buffer.clear()
                        for event in events:
                            await self.ack(event)
                        events.clear()
                        # allow writing to buffer again
                        notify(buffer_consuming)
                        buffer_full.clear()
                        buffer_consumed.set()

        finally:
            # Restore last behaviour of "enable_acks"
            self.enable_acks = stream_enable_acks
            self._processors.remove(add_to_buffer)
Exemplo n.º 10
0
 async def get(self, *, timeout: Seconds = None) -> Any:
     timeout_: float = want_seconds(timeout)
     if timeout_:
         return await asyncio.wait_for(self.queue.get(), timeout=timeout_)
     return await self.queue.get()
Exemplo n.º 11
0
 def stream_recovery_delay(self, delay: Seconds) -> None:
     self._stream_recovery_delay = want_seconds(delay)
Exemplo n.º 12
0
 def reply_expires(self, reply_expires: Seconds) -> None:
     self._reply_expires = want_seconds(reply_expires)