Esempio n. 1
0
def create_topics():
    """
    Create any required kafka topics if they don't exist.
    """
    settings = get_settings()
    client = AdminClient(
        {"bootstrap.servers": "".join(settings.kafka_bootstrap_servers)})
    metadata = client.list_topics()
    if metadata.topics.get(kafka_sync_topic) is None:
        new_topic = NewTopic(
            kafka_sync_topic,
            num_partitions=settings.kafka_admin_new_topic_partitions,
            replication_factor=settings.
            kafka_admin_new_topic_replication_factor,
        )
        client.create_topics([new_topic])
        logger.debug(f"create_topics: created topic = {kafka_sync_topic}")
Esempio n. 2
0
def get_kafka_listener() -> Optional[ConfluentAsyncKafkaListener]:
    """
    :return: a new connected ConfluentAsyncKafkaListener instance
    """
    settings = get_settings()
    consumer_conf = {
        'bootstrap.servers': ''.join(settings.kafka_bootstrap_servers),
        'group.id': settings.kafka_consumer_default_group_id,
        'enable.auto.commit': settings.kafka_consumer_default_enable_auto_commit
    }
    listener_params = {
        'poll_timeout':  settings.kafka_listener_timeout,
        'poll_yield': settings.kafka_topics_timeout
    }
    return ConfluentAsyncKafkaListener(configs=consumer_conf,
                                       params=listener_params,
                                       loop=get_running_loop())
Esempio n. 3
0
async def retransmitter():
    """
    Process messages in nats_retransmit_queue.
    """
    global nats_retransmit_queue
    settings = get_settings()
    logger.trace("Starting retransmit loop")

    while not nats_retransmit_canceled:
        # iterate through nats_retransmit_queue
        for i in range(len(nats_retransmit_queue)):
            logger.trace(
                f"retransmitter: retransmitting from nats_retransmit_queue position {i}"
            )
            await do_retransmit(nats_retransmit_queue[i], i)
            if i + 1 < len(nats_retransmit_queue):
                await asyncio.sleep(2)
        await asyncio.sleep(settings.nats_retransmit_loop_interval_secs)
Esempio n. 4
0
def log_configuration() -> None:
    """
    Logs Connect configuration settings.
    "General" settings are logged at an INFO level.
    "Internal" settings for clients/components are logged at a DEBUG level.
    """
    settings = get_settings()
    header_footer_length = 50

    logger.debug("*" * header_footer_length)
    logger.debug("Connect Configuration Settings")
    logger.debug("=" * header_footer_length)
    logger.debug(f"UVICORN_APP: {settings.uvicorn_app}")
    logger.debug(f"UVICORN_HOST: {settings.uvicorn_host}")
    logger.debug(f"UVICORN_PORT: {settings.uvicorn_port}")
    logger.debug(f"UVICORN_RELOAD: {settings.uvicorn_reload}")
    logger.debug("=" * header_footer_length)

    logger.debug(
        f"CERTIFICATE_AUTHORITY_PATH: {settings.certificate_authority_path}")
    logger.debug(
        f"LOGGING_CONFIG_PATH: {settings.connect_logging_config_path}")
    logger.debug("=" * header_footer_length)

    logger.debug(f"CONNECT_CA_FILE: {settings.connect_ca_file}")
    logger.debug(f"CONNECT_CA_PATH: {settings.connect_ca_path}")
    logger.debug(
        f"CONNECT_CONFIG_DIRECTORY: {settings.connect_config_directory}")
    logger.debug(f"CONNECT_CERT: {settings.connect_cert_name}")
    logger.debug(f"CONNECT_CERT_KEY: {settings.connect_cert_key_name}")
    logger.debug("=" * header_footer_length)

    logger.debug(
        f"KAFKA_BOOTSTRAP_SERVERS: {settings.kafka_bootstrap_servers}")
    logger.debug(f"KAFKA_PRODUCER_ACKS: {settings.kafka_producer_acks}")
    logger.debug("=" * header_footer_length)

    logger.debug(f"NATS_SERVERS: {settings.nats_servers}")
    logger.debug(f"NATS_ALLOW_RECONNECT: {settings.nats_allow_reconnect}")
    logger.debug(
        f"NATS_MAX_RECONNECT_ATTEMPTS: {settings.nats_max_reconnect_attempts}")
    logger.debug("=" * header_footer_length)

    logger.debug("*" * header_footer_length)
Esempio n. 5
0
async def start_sync_event_subscribers():
    """
    Create a NATS subscriber for 'nats_sync_subject' for the local NATS server/cluster and
    for each NATS server defined by 'nats_sync_subscribers' in config.py.
    """
    settings = get_settings()

    # subscribe to nats_sync_subject from the local NATS server or cluster
    await start_local_subscriber(nats_sync_consumer, nats_sync_event_handler,
                                 "EVENTS", "SYNC")

    # subscribe to nats_sync_subject from any additional NATS servers
    await start_subscribers(
        nats_sync_consumer,
        nats_sync_event_handler,
        settings.nats_sync_subscribers,
        "EVENTS",
        "SYNC",
    )
Esempio n. 6
0
def get_kafka_consumer(
        topic_name: str,
        partition: int,
        offset: int = None,
        consumer_group_id: str = None) -> ConfluentAsyncKafkaConsumer:
    """
    Main method that allows for instantiation of an async KafkaConsumer client. Accepts optional offset(long)
    value and optional consumer_group_id(string) values. If an offset is not provided, the offset would begin
    from the first available message at the specified partition.

    User is expected to call the get_message_from_kafka_cb() with a callback_method after calling this method.

    :param topic_name: The topic name for which we would be looking up a message for.
    :param partition: The partition id on which we want to look for a topic_name
    :param offset: An optional parameter to lookup a single message that exists at a specified offset
    :param consumer_group_id: An optional parameter to specify a consumer_group_id

    :returns: a new instance of the ConfluentAsyncKafkaConsumer
    """
    settings = get_settings()
    if topic_name is None or partition is None:
        msg = "Init Error: No topic_name or partition information provided."
        logger.error(msg)
        raise ValueError(msg)

    # We pull default configs from config.py
    consumer_conf = {
        "bootstrap.servers":
        "".join(settings.kafka_bootstrap_servers),
        "group.id":
        settings.kafka_consumer_default_group_id,
        "auto.offset.reset":
        settings.kafka_consumer_default_auto_offset_reset,
        "enable.auto.commit":
        settings.kafka_consumer_default_enable_auto_commit,
        "enable.auto.offset.store":
        settings.kafka_consumer_default_enable_auto_offset_store,
    }

    kafka_consumer = ConfluentAsyncKafkaConsumer(topic_name, partition,
                                                 consumer_conf, offset,
                                                 consumer_group_id)
    return kafka_consumer
Esempio n. 7
0
async def create_nats_client(servers: List[str]) -> Optional[NatsClient]:
    """
    Create a NATS client for any NATS server or NATS cluster.

    :param servers: List of one or more NATS servers.  If multiple servers are
    provided, they should be in the same NATS cluster.
    :return: a connected NATS client instance
    """
    settings = get_settings()

    ssl_ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
    ssl_ctx.load_verify_locations(settings.nats_rootCA_file)

    nats_client = NatsClient()
    await nats_client.connect(
        servers=servers,
        loop=get_running_loop(),
        tls=ssl_ctx,
        allow_reconnect=settings.nats_allow_reconnect,
        max_reconnect_attempts=settings.nats_max_reconnect_attempts)
    logger.debug(f'Created NATS client for servers = {servers}')

    return nats_client
Esempio n. 8
0
async def create_nats_client(servers: List[str]) -> Optional[NatsClient]:
    """
    Create a NATS client for any NATS server or NATS cluster configured to accept this installation's NKey.

    :param servers: List of one or more NATS servers in the same NATS cluster.
    :return: a connected NATS client instance
    """
    settings = get_settings()

    client = await nats.connect(
        verbose=True,
        servers=servers,
        nkeys_seed=os.path.join(settings.connect_config_directory,
                                settings.nats_nk_file),
        tls=get_ssl_context(ssl.Purpose.SERVER_AUTH),
        allow_reconnect=settings.nats_allow_reconnect,
        max_reconnect_attempts=settings.nats_max_reconnect_attempts,
    )

    logger.info("Created NATS client")
    logger.debug(f"Created NATS client for servers = {servers}")

    return client
Esempio n. 9
0
def configure_logging() -> None:
    """
    Configures logging for the connect application.
    Logging configuration is parsed from the setting/environment variable LOGGING_CONFIG_PATH, if present.
    If LOGGING_CONFIG_PATH is not found, a basic config is applied.
    """
    def apply_basic_config():
        """Applies a basic config for console logging"""
        add_trace_logging()
        logging.basicConfig(
            stream=sys.stdout,
            level=logging.INFO,
            format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
        )

    settings = get_settings()

    if os.path.exists(settings.connect_logging_config_path):
        with open(settings.connect_logging_config_path, "r") as f:
            try:
                logging_config = yaml.safe_load(f)
                logging.config.dictConfig(logging_config)
                logger.info(
                    f"Loaded logging configuration from {settings.connect_logging_config_path}"
                )
                add_trace_logging()
            except YAMLError as e:
                apply_basic_config()
                logger.error(
                    f"Unable to load logging configuration from file: {e}.")
                logger.info("Applying basic logging configuration.")
    else:
        apply_basic_config()
        logger.info(
            "Logging configuration not found. Applying basic logging configuration."
        )
Esempio n. 10
0
"""
kafka_segments.py

Connect convenience functions to handle Kafka message segmentation
"""

import uuid
import math
import time
import logging
from connect.config import get_settings

logger = logging.getLogger(__name__)

settings = get_settings()


def segment_message(msg, chunk_size=settings.kafka_message_chunk_size):
    """
    Utility function to segment a large message into chunks that the producer can send to the broker.
    The function yields each chunk with the relevant information that can be stored in the message headers
    one at a time by the Producer client and sent off to the broker.

    Allows for the creation of headers that uniquely identify segments by their id(uuid), msg_segment_count
    and a 1-index based counter.

    Example usage of `segment_message`:
```
        for segment, identifier, count, index in segment_message(msg, self.segment_size):
                segment_headers = {
                    ID: identifier,
Esempio n. 11
0
async def nats_sync_event_handler(msg: Msg):
    """
    Callback for NATS 'nats_sync_subject' messages

    :param msg: a message delivered from the NATS server
    """
    subject = msg.subject
    reply = msg.reply
    data = msg.data.decode()
    message = json.loads(data)
    logger.trace(
        f"nats_sync_event_handler: received a message with id={message['uuid']} on {subject} {reply}"
    )

    response = await msg.ack_sync()
    logger.trace(f"nats_sync_event_handler: ack response={response}")

    # Emit an app_sync message so LFH clients that are listening only for
    # messages from this LFH node will be able to get all sync'd messages
    # from all LFH nodes.
    js = await get_jetstream_context()
    await js.publish(nats_app_sync_subject, msg.data)

    # if the message is from our local LFH, don't store in kafka
    if get_settings().connect_lfh_id == message["lfh_id"]:
        logger.trace(
            "nats_sync_event_handler: detected local LFH message, not storing in kafka",
        )
        return

    # store the message in kafka
    kafka_producer = get_kafka_producer()
    kafka_cb = KafkaCallback()
    await kafka_producer.produce_with_callback(
        kafka_sync_topic, data, on_delivery=kafka_cb.get_kafka_result)
    logger.trace(
        f"nats_sync_event_handler: stored msg in kafka topic {kafka_sync_topic} at {kafka_cb.kafka_result}",
    )

    # set up transmit servers, if defined
    transmit_servers = []
    settings = get_settings()
    if message["data_format"].startswith("FHIR-R4_"):
        for s in settings.connect_external_fhir_servers:
            if settings.connect_generate_fhir_server_url:
                origin_url_elements = message["consuming_endpoint_url"].split(
                    "/")
                resource_type = origin_url_elements[len(origin_url_elements) -
                                                    1]
                transmit_servers.append(f"{s}/{resource_type}")
            else:
                transmit_servers.append(s)

    # perform message type-specific decoding
    if message["data_format"].startswith("X12_"):
        msg_data = decode_to_str(message["data"])
    else:
        msg_data = decode_to_dict(message["data"])

    # process the message into the local store
    workflow = core.CoreWorkflow(
        message=msg_data,
        origin_url=message["consuming_endpoint_url"],
        certificate_verify=settings.certificate_verify,
        data_format=message["data_format"],
        lfh_id=message["lfh_id"],
        transmit_servers=transmit_servers,
        do_sync=False,
        operation=message["operation"],
        do_retransmit=settings.nats_enable_retransmit,
    )

    result = await workflow.run()
    logger.trace(
        f"nats_sync_event_handler: successfully replayed nats sync message with id={message['uuid']}"
    )