Esempio n. 1
0
        def delete_stream(name: str):
            async def wrapper():
                async with LiftBridgeClient() as client:
                    await client.delete_stream(client.get_offset_stream(name))
                    await client.delete_stream(name)

            run_sync(wrapper)
Esempio n. 2
0
    def handle_benchmark_subscriber(self,
                                    name: str,
                                    cursor: Optional[str] = None,
                                    *args,
                                    **kwargs):
        async def subscriber():
            async with LiftBridgeClient() as client:
                report_interval = 1.0
                t0 = perf_counter()
                total_msg = last_msg = 0
                total_size = last_size = 0
                async for msg in client.subscribe(name):
                    total_msg += 1
                    total_size += len(msg.value)
                    t = perf_counter()
                    dt = t - t0
                    if dt >= report_interval:
                        self.print("%d msg/sec, %d bytes/sec" %
                                   ((total_msg - last_msg) / dt,
                                    (total_size - last_size) / dt))
                        t0 = t
                        last_msg = total_msg
                        last_size = total_size
                    if cursor:
                        await client.set_cursor(stream=name,
                                                partition=0,
                                                cursor_id=cursor,
                                                offset=msg.offset)

        run_sync(subscriber)
Esempio n. 3
0
    def handle_subscribe(
        self,
        name: str,
        partition: int = 0,
        cursor: str = "",
        start_offset: int = 0,
        start_ts: int = None,
        *args,
        **kwargs,
    ):
        async def subscribe():
            async with LiftBridgeClient() as client:
                async for msg in client.subscribe(
                        stream=name,
                        partition=partition,
                        start_offset=start_offset,
                        cursor_id=cursor or None,
                        start_timestamp=start_ts,
                ):
                    print(
                        "# Subject: %s Partition: %s Offset: %s Timestamp: %s Key: %s Headers: %s"
                        % (
                            msg.subject,
                            msg.partition,
                            msg.offset,
                            msg.timestamp,
                            msg.key,
                            msg.headers,
                        ))
                    print(msg.value)

        if start_ts:
            start_offset = None
            start_ts *= 1000000000
        run_sync(subscribe)
Esempio n. 4
0
    def handle(self, input, addresses, jobs, *args, **options):
        async def runner():
            nonlocal lock
            lock = asyncio.Lock()
            tasks = [
                asyncio.create_task(ping_worker(), name=f"ping-{i}")
                for i in range(min(jobs, len(addr_list)))
            ]
            await asyncio.gather(*tasks)

        async def ping_worker():
            while True:
                async with lock:
                    if not addr_list:
                        break  # Done
                    addr = addr_list.pop(0)
                    rtt, attempts = await ping.ping_check_rtt(addr, count=1, timeout=1000)
                    if rtt:
                        self.stdout.write(f"{addr} {rtt * 1000:.2f}ms\n")
                    else:
                        self.stdout.write(f"{addr} FAIL\n")

        # Run ping
        addr_list = self.get_addresses(addresses, input)
        lock: Optional[asyncio.Lock] = None
        ping = Ping()
        setup_asyncio()
        run_sync(runner)
Esempio n. 5
0
    def handle_load(self, fields, input, chunk, rm, *args, **kwargs):
        async def upload(table: str, data: List[bytes]):
            CHUNK = 1000
            n_parts = len(config.clickhouse.cluster_topology.split(","))
            async with LiftBridgeClient() as client:
                while data:
                    chunk, data = data[:CHUNK], data[CHUNK:]
                    await client.publish(
                        b"\n".join(chunk),
                        stream=f"ch.{table}",
                        partition=random.randint(0, n_parts - 1),
                    )

        for fn in input:
            # Read data
            self.print("Reading file %s" % fn)
            if fn.endswith(".gz"):
                with gzip.GzipFile(fn) as f:
                    records = f.read().replace("\r", "").splitlines()
            else:
                with open(fn) as f:
                    records = f.read().replace("\r", "").splitlines()
            table = fn.split("-", 1)[0]
            run_sync(partial(upload, table, records))
            if rm:
                os.unlink(fn)
Esempio n. 6
0
    def handle_show_metadata(self, *args, **options):
        async def get_meta() -> Metadata:
            async with LiftBridgeClient() as client:
                return await client.fetch_metadata()

        async def get_partition_meta(stream, partition) -> PartitionMetadata:
            async with LiftBridgeClient() as client:
                return await client.fetch_partition_metadata(stream, partition)

        meta: Metadata = run_sync(get_meta)
        self.print("# Brokers (%d)" % len(meta.brokers))
        self.print("%-20s | %s" % ("ID", "HOST:PORT"))
        for broker in meta.brokers:
            self.print("%-20s | %s:%s" % (broker.id, broker.host, broker.port))
        self.print("# Streams")
        for stream in meta.metadata:
            print("  ## Name: %s Subject: %s" % (stream.name, stream.subject))
            for p in sorted(stream.partitions):
                print("    ### Partition: %d" % p)
                try:
                    p_meta: PartitionMetadata = run_sync(
                        functools.partial(get_partition_meta, stream.name, p))
                except Exception as e:
                    print("[%s|%s] Failed getting data for partition: %s" %
                          (stream.name, p, e))
                    continue
                print("    Leader        : %s" % p_meta.leader)
                print("    Replicas      : %s" %
                      ", ".join(sorted(p_meta.replicas, key=alnum_key)))
                print("    ISR           : %s" %
                      ", ".join(sorted(p_meta.isr, key=alnum_key)))
                print("    HighWatermark : %s" % p_meta.high_watermark)
                print("    NewestOffset  : %s" % p_meta.newest_offset)
Esempio n. 7
0
    def handle_benchmark_publisher(
        self,
        name: str,
        num_messages: int,
        payload_size: int = 64,
        batch=1,
        wait_for_stream=False,
        *args,
        **kwargs,
    ):
        async def publisher():
            async with LiftBridgeClient() as client:
                payload = b" " * payload_size
                t0 = perf_counter()
                for _ in self.progress(range(num_messages), num_messages):
                    await client.publish(payload,
                                         stream=name,
                                         wait_for_stream=wait_for_stream)
                dt = perf_counter() - t0
            self.print("%d messages sent in %.2fms" %
                       (num_messages, dt * 1000))
            self.print("%d msg/sec, %d bytes/sec" %
                       (num_messages / dt, num_messages * payload_size / dt))

        async def batch_publisher():
            async with LiftBridgeClient() as client:
                payload = b" " * payload_size
                t0 = perf_counter()
                out = []
                n_acks = 0
                for _ in self.progress(range(num_messages), num_messages):
                    out += [client.get_publish_request(payload, stream=name)]
                    if len(out) == batch:
                        async for ack in client.publish_async(out):
                            n_acks += 1
                        out = []
                if out:
                    async for _ in client.publish_async(out):
                        n_acks += 1
                    out = []
                dt = perf_counter() - t0
            self.print("%d messages sent in %.2fms (%d acks)" %
                       (num_messages, dt * 1000, n_acks))
            self.print("%d msg/sec, %d bytes/sec" %
                       (num_messages / dt, num_messages * payload_size / dt))

        if batch == 1:
            run_sync(publisher)
        else:
            print("batch")
            run_sync(batch_publisher)
Esempio n. 8
0
    def handle_fetch_cursor(self,
                            name: str,
                            stream: str,
                            partition: int = 0,
                            *args,
                            **kwargs):
        async def fetch_cursor():
            async with LiftBridgeClient() as client:
                cursor = await client.fetch_cursor(stream=stream,
                                                   partition=partition,
                                                   cursor_id=name)
                print(cursor)

        run_sync(fetch_cursor)
Esempio n. 9
0
def publish(
    value: bytes,
    stream: str,
    partition: Optional[int] = None,
    key: Optional[bytes] = None,
    headers: Optional[Dict[str, bytes]] = None,
):
    async def wrap():
        async with LiftBridgeClient() as client:
            await client.publish(
                value=value, stream=stream, partition=partition, key=key, headers=headers
            )

    run_sync(wrap)
Esempio n. 10
0
    def handle_set_cursor(self,
                          name: str,
                          stream: str,
                          partition: int = 0,
                          offset: int = 0,
                          *args,
                          **kwargs):
        async def set_cursor():
            async with LiftBridgeClient() as client:
                await client.set_cursor(stream=stream,
                                        partition=partition,
                                        cursor_id=name,
                                        offset=offset)

        run_sync(set_cursor)
Esempio n. 11
0
File: base.py Progetto: nbashev/noc
    def count(self, oid, filter=None, version=None):
        """
        Iterate MIB subtree and count matching instances
        :param oid: OID
        :param filter: Callable accepting oid and value and returning boolean
        """
        async def run():
            try:
                r = await snmp_count(
                    address=self.script.credentials["address"],
                    oid=oid,
                    community=str(self.script.credentials["snmp_ro"]),
                    bulk=self.script.has_snmp_bulk(),
                    filter=filter,
                    tos=self.script.tos,
                    udp_socket=self.get_socket(),
                    version=version,
                    rate_limit=self.rate_limit,
                )
                return r
            except SNMPError as e:
                if e.code == TIMED_OUT:
                    raise self.TimeOutError()
                else:
                    raise

        if "snmp_ro" not in self.script.credentials:
            raise SNMPError(code=ERR_SNMP_BAD_COMMUNITY)
        version = self._get_snmp_version(version)
        return run_sync(run, close_all=False)
Esempio n. 12
0
File: base.py Progetto: nbashev/noc
    def set(self, *args):
        """
        Perform SNMP GET request
        :param oid: string or list of oids
        :returns: eigther result scalar or dict of name -> value
        """
        async def run():
            try:
                r = await snmp_set(
                    address=self.script.credentials["address"],
                    varbinds=varbinds,
                    community=str(self.script.credentials["snmp_rw"]),
                    tos=self.script.tos,
                    udp_socket=self.get_socket(),
                    rate_limit=self.rate_limit,
                )
                return r
            except SNMPError as e:
                if e.code == TIMED_OUT:
                    raise self.TimeOutError()
                else:
                    raise

        if len(args) == 1:
            varbinds = args
        elif len(args) == 2:
            varbinds = [(args[0], args[1])]
        else:
            raise ValueError("Invalid varbinds")
        if "snmp_ro" not in self.script.credentials:
            raise SNMPError(code=ERR_SNMP_BAD_COMMUNITY)
        return run_sync(run, close_all=False)
Esempio n. 13
0
File: stub.py Progetto: nbashev/noc
    def publish(
        self,
        value: bytes,
        stream: str,
        partition: Optional[int] = None,
        key: Optional[bytes] = None,
        headers: Optional[Dict[str, bytes]] = None,
    ):
        async def wrap():
            async with LiftBridgeClient() as client:
                await client.publish(
                    value=value,
                    stream=stream,
                    partition=partition,
                    key=key,
                    headers=headers,
                    auto_compress=bool(config.liftbridge.compression_method),
                )

        run_sync(wrap)
Esempio n. 14
0
    def handle_create_stream(
        self,
        name: str,
        subject: Optional[str] = None,
        partitions: int = 1,
        rf: int = 1,
        *args,
        **kwargs,
    ):
        async def create():
            async with LiftBridgeClient() as client:
                await client.create_stream(
                    name=name,
                    subject=subject,
                    partitions=partitions,
                    replication_factor=rf,
                )

        subject = subject or name
        run_sync(create)
Esempio n. 15
0
        def create_stream(name: str, n_partitions: int, replication_factor: int):
            base_name = name.split(".")[0]
            minisr = 0
            if base_name == "ch":
                replication_factor = min(
                    config.liftbridge.stream_ch_replication_factor, replication_factor
                )
                minisr = min(2, replication_factor)

            async def wrapper():
                async with LiftBridgeClient() as client:
                    await client.create_stream(
                        subject=name,
                        name=name,
                        partitions=n_partitions,
                        minisr=minisr,
                        replication_factor=replication_factor,
                        retention_max_bytes=getattr(
                            config.liftbridge, f"stream_{base_name}_retention_max_age", 0
                        ),
                        retention_max_age=getattr(
                            config.liftbridge, f"stream_{base_name}_retention_max_bytes", 0
                        ),
                        segment_max_bytes=getattr(
                            config.liftbridge, f"stream_{base_name}_segment_max_bytes", 0
                        ),
                        segment_max_age=getattr(
                            config.liftbridge, f"stream_{base_name}_segment_max_age", 0
                        ),
                        auto_pause_time=getattr(
                            config.liftbridge, f"stream_{base_name}_auto_pause_time", 0
                        ),
                        auto_pause_disable_if_subscribers=getattr(
                            config.liftbridge,
                            f"stream_{base_name}_auto_pause_disable_if_subscribers",
                            False,
                        ),
                    )

            run_sync(wrapper)
Esempio n. 16
0
 def iter_ch_cursors(self, stream, partition):
     # Parse
     cluster = config.clickhouse.cluster_topology.split(",")
     for replica in range(0, int(cluster[partition])):
         cursor = run_sync(
             functools.partial(self.fetch_cursor, stream, partition,
                               f"chwriter-{replica}"))
         yield (
             "stream_cursor_offset",
             ("name", stream),
             ("partition", partition),
             ("cursor_id", f"chwriter-{replica}"),
         ), cursor
Esempio n. 17
0
File: base.py Progetto: nbashev/noc
    def getnext(
        self,
        oid,
        community_suffix=None,
        filter=None,
        cached=False,
        only_first=False,
        bulk=None,
        max_repetitions=None,
        version=None,
        max_retries=0,
        timeout=10,
        raw_varbinds=False,
        display_hints=None,
    ):
        async def run():
            try:
                r = await snmp_getnext(
                    address=self.script.credentials["address"],
                    oid=oid,
                    community=str(self.script.credentials["snmp_ro"]),
                    bulk=self.script.has_snmp_bulk() if bulk is None else bulk,
                    max_repetitions=max_repetitions,
                    filter=filter,
                    only_first=only_first,
                    tos=self.script.tos,
                    udp_socket=self.get_socket(),
                    version=version,
                    max_retries=max_retries,
                    timeout=timeout,
                    raw_varbinds=raw_varbinds,
                    display_hints=display_hints,
                    response_parser=self.script.profile.
                    get_snmp_response_parser(self.script),
                    rate_limit=self.rate_limit,
                )
                return r
            except SNMPError as e:
                if e.code == TIMED_OUT:
                    raise self.TimeOutError()
                else:
                    raise

        if "snmp_ro" not in self.script.credentials:
            raise SNMPError(code=ERR_SNMP_BAD_COMMUNITY)
        if display_hints is None:
            display_hints = self._get_display_hints()
        version = self._get_snmp_version(version)
        return run_sync(run, close_all=False)
Esempio n. 18
0
File: base.py Progetto: nbashev/noc
    def get(self,
            oids,
            cached=False,
            version=None,
            raw_varbinds=False,
            display_hints=None):
        """
        Perform SNMP GET request
        :param oid: string or list of oids
        :param cached: True if get results can be cached during session
        :param raw_varbinds: Return value in BER encoding
        :param display_hints: Dict of  oid -> render_function. See BaseProfile.snmp_display_hints for details
        :returns: eigther result scalar or dict of name -> value
        """
        async def run():
            try:
                r = await snmp_get(
                    address=self.script.credentials["address"],
                    oids=oids,
                    community=str(self.script.credentials["snmp_ro"]),
                    tos=self.script.tos,
                    udp_socket=self.get_socket(),
                    version=version,
                    raw_varbinds=raw_varbinds,
                    display_hints=display_hints,
                    response_parser=self.script.profile.
                    get_snmp_response_parser(self.script),
                    rate_limit=self.rate_limit,
                )
                self.timeouts = self.timeouts_limit
                return r
            except SNMPError as e:
                if e.code == TIMED_OUT:
                    if self.timeouts_limit:
                        self.timeouts -= 1
                        if not self.timeouts:
                            raise self.FatalTimeoutError()
                    raise self.TimeOutError()
                else:
                    raise

        if "snmp_ro" not in self.script.credentials:
            raise SNMPError(code=ERR_SNMP_BAD_COMMUNITY)
        if display_hints is None:
            display_hints = self._get_display_hints()
        version = self._get_snmp_version(version)
        return run_sync(run, close_all=False)
Esempio n. 19
0
    def resolve_sync(self, name, hint=None, wait=True, timeout=None, full_result=False):
        """
        Returns *hint* when service is active or new service
        instance,
        :param name:
        :param hint:
        :param full_result:
        :return:
        """

        async def _resolve():
            r = await self.resolve(
                name, hint=hint, wait=wait, timeout=timeout, full_result=full_result
            )
            return r

        return run_sync(_resolve)
Esempio n. 20
0
def resolve(name,
            hint=None,
            wait=True,
            timeout=None,
            full_result=False,
            near=False,
            critical=False):
    """
    Returns *hint* when service is active or new service
    instance,
    :param name:
    :param hint:
    :param wait:
    :param timeout:
    :param full_result:
    :param near:
    :return:
    """
    async def _resolve():
        dcs = get_dcs()
        try:
            if near:
                r = await dcs.resolve_near(
                    name,
                    hint=hint,
                    wait=wait,
                    timeout=timeout,
                    full_result=full_result,
                    critical=critical,
                )
            else:
                r = await dcs.resolve(
                    name,
                    hint=hint,
                    wait=wait,
                    timeout=timeout,
                    full_result=full_result,
                    critical=critical,
                    track=False,
                )
        finally:
            dcs.stop()
        return r

    return run_sync(_resolve)
Esempio n. 21
0
def fetch_sync(
    url: str,
    method: str = "GET",
    headers=None,
    body: Optional[bytes] = None,
    connect_timeout=DEFAULT_CONNECT_TIMEOUT,
    request_timeout=DEFAULT_REQUEST_TIMEOUT,
    resolver=resolve,
    max_buffer_size=DEFAULT_BUFFER_SIZE,
    follow_redirects=False,
    max_redirects=DEFAULT_MAX_REDIRECTS,
    validate_cert=config.http_client.validate_certs,
    allow_proxy: bool = False,
    proxies=None,
    user: Optional[str] = None,
    password: Optional[str] = None,
    content_encoding: Optional[str] = None,
    eof_mark: Optional[bytes] = None,
):
    async def _fetch():
        return await fetch(
            url,
            method=method,
            headers=headers,
            body=body,
            connect_timeout=connect_timeout,
            request_timeout=request_timeout,
            resolver=resolver,
            max_buffer_size=max_buffer_size,
            follow_redirects=follow_redirects,
            max_redirects=max_redirects,
            validate_cert=validate_cert,
            allow_proxy=allow_proxy,
            proxies=proxies,
            user=user,
            password=password,
            content_encoding=content_encoding,
            eof_mark=eof_mark,
        )

    return run_sync(_fetch)
Esempio n. 22
0
File: mx.py Progetto: nbashev/noc
def get_mx_partitions() -> int:
    """
    Get number of MX stream partitions
    :return:
    """
    async def wrap():
        async with LiftBridgeClient() as client:
            r = await client.fetch_metadata(MX_STREAM, wait_for_stream=True)
            for m in r.metadata:
                if m.name == MX_STREAM:
                    return len(m.partitions)

    global _mx_partitions

    if _mx_partitions:
        return _mx_partitions
    with _mx_lock:
        if _mx_partitions:
            return _mx_partitions  # Set by concurrent thread
        _mx_partitions = run_sync(wrap)
        return _mx_partitions
Esempio n. 23
0
def save_avatar(user: User = Depends(get_current_user),
                image: UploadFile = File(...)):
    async def read_file() -> bytes:
        return smart_bytes(await image.read(config.ui.max_avatar_size + 1))

    data = run_sync(read_file)
    if len(data) > config.ui.max_avatar_size:
        raise HTTPException(status_code=413)
    content_type = ContentType.from_content_type(image.content_type)
    if content_type is None:
        raise HTTPException(status_code=421)
    avatar = Avatar.objects.filter(user_id=str(user.id)).first()
    if avatar:
        # Update
        avatar.data = data
        avatar.content_type = content_type
    else:
        # Create
        avatar = Avatar(user_id=str(user.id),
                        data=data,
                        content_type=content_type)
    avatar.save()
    return StatusResponse(status=True)
Esempio n. 24
0
    def iter_limits(self) -> Tuple[str, int]:
        async def get_slot_limits():
            nonlocal slot_name
            return await dcs.get_slot_limit(slot_name)

        dcs = get_dcs()
        # Plain streams
        for stream_name in self.STREAMS:
            yield stream_name, 1
        # Slot-based streams
        for slot_name, stream_name in self.iter_slot_streams():
            n_partitions = run_sync(get_slot_limits)
            if n_partitions:
                yield stream_name, n_partitions
        # Metric scopes
        n_ch_shards = len(config.clickhouse.cluster_topology.split(","))
        for scope in MetricScope.objects.all():
            yield f"ch.{scope.table_name}", n_ch_shards
        # BI models
        for name in bi_loader:
            bi_model = bi_loader[name]
            if not bi_model:
                continue
            yield f"ch.{bi_model._meta.db_table}", n_ch_shards
Esempio n. 25
0
    def get_meta(self) -> Metadata:
        async def get_meta() -> Metadata:
            async with LiftBridgeClient() as client:
                return await client.fetch_metadata()

        return run_sync(get_meta)
Esempio n. 26
0
 async def wrapper():
     self.print("Altering stream %s" % name)
     async with LiftBridgeClient() as client:
         # Create temporary stream with same structure, as original one
         tmp_stream = "__tmp-%s" % name
         self.print("Creating temporary stream %s" % tmp_stream)
         await client.create_stream(
             subject=tmp_stream,
             name=tmp_stream,
             partitions=old_partitions,
             replication_factor=replication_factor,
         )
         # Copy all unread data to temporary stream as is
         for partition in range(old_partitions):
             self.print(
                 "Copying partition %s:%s to %s:%s"
                 % (name, partition, tmp_stream, partition)
             )
             n_msg[partition] = 0
             # Get current offset
             p_meta = run_sync(functools.partial(get_partition_meta, stream, partition))
             newest_offset = p_meta.newest_offset or 0
             # Fetch cursor
             current_offset = await client.fetch_cursor(
                 stream=stream,
                 partition=partition,
                 cursor_id=self.CURSOR_STREAM[name.split(".")[0]],
             )
             if current_offset > newest_offset:
                 # Fix if cursor not set properly
                 current_offset = newest_offset
             self.print(
                 "Start copying from current_offset: %s to newest offset: %s"
                 % (current_offset, newest_offset)
             )
             if current_offset < newest_offset:
                 async for msg in client.subscribe(
                     stream=name, partition=partition, start_offset=current_offset
                 ):
                     await client.publish(
                         msg.value,
                         stream=tmp_stream,
                         partition=partition,
                     )
                     n_msg[partition] += 1
                     if msg.offset == newest_offset:
                         break
             if n_msg[partition]:
                 self.print("  %d messages has been copied" % n_msg[partition])
             else:
                 self.print("  nothing to copy")
         # Drop original stream
         self.print("Dropping original stream %s" % name)
         await client.delete_stream(name)
         # Create new stream with required structure
         self.print("Creating stream %s" % name)
         await client.create_stream(
             subject=name,
             name=name,
             partitions=new_partitions,
             replication_factor=replication_factor,
         )
         # Copy data from temporary stream to a new one
         for partition in range(old_partitions):
             self.print(
                 "Restoring partition %s:%s to %s"
                 % (tmp_stream, partition, new_partitions)
             )
             # Re-route dropped partitions to partition 0
             dest_partition = partition if partition < new_partitions else 0
             n = n_msg[partition]
             if n > 0:
                 async for msg in client.subscribe(
                     stream=tmp_stream,
                     partition=partition,
                     start_position=StartPosition.EARLIEST,
                 ):
                     await client.publish(
                         msg.value, stream=name, partition=dest_partition
                     )
                     n -= 1
                     if not n:
                         break
                 self.print("  %d messages restored" % n_msg[partition])
             else:
                 self.print("  nothing to restore")
         # Drop temporary stream
         self.print("Dropping temporary stream %s" % tmp_stream)
         await client.delete_stream(tmp_stream)
         # Uh-oh
         self.print("Stream %s has been altered" % name)
Esempio n. 27
0
File: whois.py Progetto: nbashev/noc
def whois(query, fields=None):
    async def _whois():
        return await whois_async(query, fields)

    return run_sync(_whois)
Esempio n. 28
0
 def load(self):
     run_sync(self.load_async)
Esempio n. 29
0
    def iter_metrics(self):
        meta: Metadata = run_sync(self.get_meta)

        for stream in meta.metadata:
            for p in sorted(stream.partitions):
                if stream.name.startswith("_"):
                    continue
                name, pool, cursor = stream.name, None, -1
                if name.startswith("ch"):
                    # Chwriter streams
                    for r in self.iter_ch_cursors(name, p):
                        yield r
                elif "." in name:
                    name, pool = name.split(".", 1)
                if name in self.CURSOR_STREAM:
                    cursor = run_sync(
                        functools.partial(self.fetch_cursor, stream.name, p,
                                          self.CURSOR_STREAM[name]))
                    if pool:
                        yield (
                            "stream_cursor_offset",
                            ("name", stream.name),
                            ("partition", p),
                            ("cursor_id", self.CURSOR_STREAM[name]),
                            ("pool", pool),
                        ), cursor
                    else:
                        yield (
                            "stream_cursor_offset",
                            ("name", stream.name),
                            ("partition", p),
                            ("cursor_id", self.CURSOR_STREAM[name]),
                        ), cursor

                try:
                    p_meta: PartitionMetadata = run_sync(
                        functools.partial(
                            self.get_partition_meta,
                            stream.name,
                            p,
                        ))
                except Exception as e:
                    self.logger.error(
                        "[%s|%s] Failed getting data for partition: %s",
                        stream.name, p, e)
                    continue
                if pool:
                    yield (
                        "stream_newest_offset",
                        ("name", stream.name),
                        ("partition", p),
                        ("pool", pool),
                    ), p_meta.newest_offset
                    yield (
                        "stream_high_watermark",
                        ("name", stream.name),
                        ("partition", p),
                        ("pool", pool),
                    ), p_meta.high_watermark
                else:
                    yield (
                        "stream_newest_offset",
                        ("name", stream.name),
                        ("partition", p),
                    ), p_meta.newest_offset
                    yield (
                        "stream_high_watermark",
                        ("name", stream.name),
                        ("partition", p),
                    ), p_meta.high_watermark
Esempio n. 30
0
    def handle_delete_stream(self, name: str, *args, **kwargs):
        async def delete():
            async with LiftBridgeClient() as client:
                await client.delete_stream(name)

        run_sync(delete)