Esempio n. 1
0
def test_fatal():
    """ Test fatal exceptions """

    # Configure an invalid broker and make sure the ALL_BROKERS_DOWN
    # error is seen in the error callback.
    p = Producer({'error_cb': error_cb})

    with pytest.raises(KafkaException) as exc:
        KafkaError._test_raise_fatal()
    err = exc.value.args[0]
    assert isinstance(err, KafkaError)
    assert err.fatal() is True

    p.poll(0)  # Need some p use to avoid flake8 unused warning
def test_fatal():
    """ Test fatal exceptions """

    # Configure an invalid broker and make sure the ALL_BROKERS_DOWN
    # error is seen in the error callback.
    p = Producer({'error_cb': error_cb})

    with pytest.raises(KafkaException) as exc:
        KafkaError._test_raise_fatal()
    err = exc.value.args[0]
    assert isinstance(err, KafkaError)
    assert err.fatal() is True

    p.poll(0)  # Need some p use to avoid flake8 unused warning
Esempio n. 3
0
def test_produce_error_message(mocker):
    """
    Given:
        - initialized KafkaCommunicator
    When:
        - running kafka-produce-msg command without a bad response.
    Then:
        - Assert the relevant exception is raised.
    """
    mocker.patch.object(KProducer, '__init__', return_value=None)
    demisto_args = {'topic': 'some-topic', 'partitioning_key': 1, 'value': 'some-value'}
    produce_mock = mocker.patch.object(KProducer, 'produce')
    kafka_error = KafkaError(1)

    def run_delivery_report():
        message = MessageMock(message='some-value', offset=0, topic='some-topic', partition=1)
        KafkaCommunicator.delivery_report(kafka_error, message)

    flush_mock = mocker.patch.object(KProducer, 'flush', side_effect=run_delivery_report)

    with pytest.raises(DemistoException) as exception_info:
        produce_message(KAFKA, demisto_args)

    assert 'Message delivery failed:' in str(exception_info.value)
    assert str(kafka_error) in str(exception_info.value)

    produce_mock.assert_called_once_with(topic='some-topic', partition=1, value='some-value',
                                         on_delivery=KAFKA.delivery_report)
    flush_mock.assert_called_once()
Esempio n. 4
0
 def test_max_errors(self):
     with unittest.mock.MagicMock() as mock_kafka_consumer:
         # poll returns None when no messages left to consume
         e = KafkaError(KafkaError._APPLICATION,
                        "Test Error",
                        fatal=False,
                        retriable=True)
         mock_kafka_consumer.poll.return_value.error.return_value = e
         mock_kafka_consumer.poll.return_value.value.return_value = example_input_data
         conf = {
             'broker': '',
             'group': '',
             'input_topic': '',
             'output_topic': '',
             'batch_size': 5,
             'timeout': 1,
             'max_errors': 0,
             'cache_db': '',
             'poll_timeout': 1,
             'max_poll_interval': 300000
         }
         alerts = []
         # consume should report consuming 0 alerts
         self.assertEqual(
             wrapper.consume(conf, log, alerts, mock_kafka_consumer), 0)
         # alerts should be empty
         self.assertEqual(alerts, [])
         # poll should have been called once with timeout 1
         mock_kafka_consumer.poll.assert_called_once_with(1)
def test_new_produce_error_custom_message():
    pe = ProduceError(
        KafkaError(KafkaError._KEY_SERIALIZATION, "Unable to serialize key"))

    assert pe.code == KafkaError._KEY_SERIALIZATION
    assert pe.name == u'_KEY_SERIALIZATION'
    assert pe.args[0].str() == "Unable to serialize key"
def test_new_produce_error_caused_by():
    pe = ProduceError(KafkaError(KafkaError.INVALID_CONFIG),
                      exception=ValueError())

    assert pe.code == KafkaError.INVALID_CONFIG
    assert pe.name == u'INVALID_CONFIG'
    assert isinstance(pe.exception, ValueError)
Esempio n. 7
0
    def test_notify_sources_application_availability_kafka_exception(
        self,
        mock_kafka_producer,
    ):
        """Test notify source application availability handling KafkaException."""
        kafka_producer = mock_kafka_producer(self.sources_kafka_config)
        kafka_producer.produce.side_effect = KafkaException(KafkaError(5))

        with override_settings(
            LISTENER_SERVER=self.listener_server,
            LISTENER_PORT=self.listener_port,
            SOURCES_STATUS_TOPIC=self.sources_kafka_topic,
            SOURCES_RESOURCE_TYPE=self.sources_resource_type,
            SOURCES_AVAILABILITY_EVENT_TYPE=self.sources_availability_event_type,
            SOURCES_ENABLE_DATA_MANAGEMENT_FROM_KAFKA=True,
        ):
            with self.assertRaises(KafkaProducerException):
                sources.notify_application_availability(
                    self.account_number,
                    self.application_id,
                    availability_status=self.available_status,
                )

            kafka_producer.produce.assert_called_with(
                topic=self.sources_kafka_topic,
                value=json.dumps(self.kafka_payload),
                headers={
                    "x-rh-identity": self.headers["X-RH-IDENTITY"],
                    "event_type": self.sources_availability_event_type,
                },
                callback=_check_response,
            )
            kafka_producer.flush.assert_not_called()
def test_new_consume_error_custom_message():
    ce = ConsumeError(
        KafkaError(KafkaError._KEY_SERIALIZATION, "Unable to serialize key"))

    assert ce.code == KafkaError._KEY_SERIALIZATION
    assert ce.name == u'_KEY_SERIALIZATION'
    assert ce.args[0].str() == "Unable to serialize key"
def test_new_consume_error_caused_by():
    ce = ConsumeError(KafkaError(KafkaError.INVALID_CONFIG),
                      exception=ValueError())

    assert ce.code == KafkaError.INVALID_CONFIG
    assert ce.name == u'INVALID_CONFIG'
    assert isinstance(ce.exception, ValueError)
Esempio n. 10
0
 def consume(self):
     credential_dict = self.get_credential_dict()
     consumer = Consumer(credential_dict)
     consumer.subscribe(self.topic)
     message = consumer.poll(1.0)
     if message.error():
         raise KafkaError(ERROR_CODES["KafkaConsumerError"].format(message.error()))
     return message
Esempio n. 11
0
 def on_deliver(self, err, msg):
     if err is not None:
         logger.error(f'Message delivery failed: {err}, {msg}')
         if err.code() == KafkaError._MSG_TIMED_OUT:
             logger.error(f'{msg.topic()}, {msg}')
             self.producer = self.get_producer()
             self.producer.poll(0)
             self.producer.produce(msg.topic(), msg.value())
         else:
             raise KafkaError(f'Message delivery failed: {err}, {msg}')
Esempio n. 12
0
 def delivery_status(self, error, message):
     """ 
     Called once for each message produced to indicate delivery result.
     Triggered by poll() or flush().
     """
     if error:
         self.response.errors.append(KafkaError(ERROR_CODES["KafkaProducerError"].format(error)))
     else:
         self.response.success = True
         self.response.message = 'Message delivered to {} [{}]'.format(message.topic(), message.partition())
Esempio n. 13
0
 def read_until(self, check, timeout_s):
     begin = time.time()
     while True:
         if time.time() - begin > timeout_s:
             raise KafkaException(KafkaError(KafkaError._TIMED_OUT))
         for msg in self.stream:
             offset = msg.offset()
             value = msg.value().decode('utf-8')
             key = msg.key().decode('utf-8')
             if check(offset, key, value):
                 return
Esempio n. 14
0
def test_abortable():
    """ Test abortable exceptions """

    with pytest.raises(KafkaException) as exc:
        raise KafkaException(
            KafkaError(KafkaError.MEMBER_ID_REQUIRED, txn_requires_abort=True))
    err = exc.value.args[0]
    assert isinstance(err, KafkaError)
    assert not err.fatal()
    assert not err.retriable()
    assert err.txn_requires_abort()
Esempio n. 15
0
def log_client_errors(kafka_error: confluent_kafka.KafkaError):
    if kafka_error.code() == confluent_kafka.KafkaError._ALL_BROKERS_DOWN:
        # This error occurs very frequently. It's not nearly as fatal as it
        # sounds: it really indicates that the client's broker metadata has
        # timed out. It appears to get triggered in races during client
        # shutdown, too. See https://github.com/edenhill/librdkafka/issues/2543
        # for more background.
        logger.warn("client is currently disconnected from all brokers")
    else:
        logger.error(f"internal kafka error: {kafka_error}")
        raise (KafkaException.from_kafka_error(kafka_error))
Esempio n. 16
0
def non_fatal_error_on_1st_call(timeout):
    global nfe_call_count
    nfe_call_count += 1
    if nfe_call_count > 1:
        return MockMessage()
    else:
        e = KafkaError(KafkaError._APPLICATION,
                       "Test Error",
                       fatal=False,
                       retriable=True)
        return MockMessage(e)
Esempio n. 17
0
def test_kafkaError_custom_msg():
    err = KafkaError(KafkaError._ALL_BROKERS_DOWN, "Mayday!")
    assert err == KafkaError._ALL_BROKERS_DOWN
    assert err.str() == "Mayday!"
    assert not err.fatal()
    assert not err.fatal()
    assert not err.retriable()
    assert not err.txn_requires_abort()
Esempio n. 18
0
def raise_for_kafka_error(err: KafkaError, msg: Optional[Message] = None):
    if not err:
        return None

    if err.code() in CONFLUENT_ERROR_LOOKUP.keys():
        raise CONFLUENT_ERROR_LOOKUP[err.code()](err.str(), err.code())
    else:
        raise KafkaException(err.str(), err.code())
Esempio n. 19
0
 def produce(self, topic, partition, key, value, timeout_s):
     self.last_msg = None
     self.producer.produce(topic,
                           key=key.encode('utf-8'),
                           value=value.encode('utf-8'),
                           partition=partition,
                           callback=lambda e, m: self.on_delivery(e, m))
     self.producer.flush(timeout_s)
     msg = self.last_msg
     if msg == None:
         raise KafkaException(KafkaError(KafkaError._MSG_TIMED_OUT))
     if msg.error() != None:
         raise KafkaException(msg.error())
     assert msg.offset() != None
     return msg.offset()
Esempio n. 20
0
def test_fatal():
    """ Test fatal exceptions """

    # Configure an invalid broker and make sure the ALL_BROKERS_DOWN
    # error is seen in the error callback.
    p = Producer({'error_cb': error_cb})

    with pytest.raises(KafkaException) as exc:
        raise KafkaException(
            KafkaError(KafkaError.MEMBER_ID_REQUIRED, fatal=True))
    err = exc.value.args[0]
    assert isinstance(err, KafkaError)
    assert err.fatal()
    assert not err.retriable()
    assert not err.txn_requires_abort()

    p.poll(0)  # Need some p use to avoid flake8 unused warning
Esempio n. 21
0
 def test_fatal_error(self):
     with unittest.mock.MagicMock() as mock_kafka_consumer:
         # poll returns None when no messages left to consume
         e = KafkaError(KafkaError._FATAL,
                        "Test Error",
                        fatal=True,
                        retriable=False)
         mock_kafka_consumer.poll.return_value.error.return_value = e
         mock_kafka_consumer.poll.return_value.value.return_value = example_input_data
         alerts = []
         # consume should report consuming 0 alerts
         self.assertEqual(
             wrapper.consume(self.conf, log, alerts, mock_kafka_consumer),
             0)
         # alerts should be empty
         self.assertEqual(alerts, [])
         # poll should have been called once with timeout 1
         mock_kafka_consumer.poll.assert_called_once_with(1)
def test_new_consume_error_constant():
    ce = ConsumeError(KafkaError(KafkaError._PARTITION_EOF))

    assert ce.code == KafkaError._PARTITION_EOF
    assert ce.name == u'_PARTITION_EOF'
def test_new_produce_error_constant():
    pe = ProduceError(KafkaError(KafkaError._PARTITION_EOF))

    assert pe.code == KafkaError._PARTITION_EOF
    assert pe.name == u'_PARTITION_EOF'
Esempio n. 24
0
    def init_client(self, client_type: str, **client_conf: dict):
        """Obtain Kafka based Producer/Consumer."""
        Log.debug(f"initializing client_type: {client_type},"\
            f" **kwargs {client_conf}")
        # Validate and return if client already exists
        if client_type not in self._clients.keys():
            Log.error(f"MessageBusError: Invalid client type "\
                f"{errors.ERR_INVALID_CLIENT_TYPE}, {client_type}")
            raise MessageBusError(errors.ERR_INVALID_CLIENT_TYPE,\
                "Invalid client type %s", client_type)

        if client_conf['client_id'] in self._clients[client_type].keys():
            if self._clients[client_type][client_conf['client_id']] != {}:
                # Check if message_type exists to send/receive
                client = self._clients[client_type][client_conf['client_id']]
                available_message_types = client.list_topics().topics.keys()
                if client_type == 'producer':
                    if client_conf['message_type'] not in\
                        available_message_types:
                        Log.error(f"MessageBusError: message_type "\
                            f"{client_conf['message_type']} not found in "\
                            f"{available_message_types} for {client_type}")
                        raise MessageBusError(
                            errno.EINVAL, "Unknown Topic or \
                            Partition. %s", KafkaError(3))
                elif client_type == 'consumer':
                    if not any(each_message_type in available_message_types for\
                        each_message_type in client_conf['message_types']):
                        Log.error(f"MessageBusError: message_type "\
                            f"{client_conf['message_types']} not found in "\
                            f"{available_message_types} for {client_type}")
                        raise MessageBusError(
                            errno.EINVAL, "Unknown Topic or \
                            Partition. %s", KafkaError(3))
                return

        kafka_conf = {}
        kafka_conf['bootstrap.servers'] = self._servers
        kafka_conf['client.id'] = client_conf['client_id']
        kafka_conf['error_cb'] = self._error_cb

        if client_type == 'admin' or client_type == 'producer':
            kafka_conf['socket.timeout.ms'] = self._controller_socket_timeout
            admin = AdminClient(kafka_conf)
            self._clients['admin'][client_conf['client_id']] = admin

        if client_type == 'producer':
            kafka_conf['message.timeout.ms'] = self._send_message_timeout
            producer = Producer(**kafka_conf)
            self._clients[client_type][client_conf['client_id']] = producer

            self._resource = ConfigResource('topic',\
                client_conf['message_type'])
            admin = self._clients['admin'][client_conf['client_id']]
            conf = admin.describe_configs([self._resource])
            default_configs = list(conf.values())[0].result()
            for params in ['retention.ms']:
                if params not in default_configs:
                    Log.error(f"MessageBusError: Missing required config"\
                        f" parameter {params}. for client type {client_type}")
                    raise MessageBusError(errno.ENOKEY,\
                        "Missing required config parameter %s. for " +\
                        "client type %s", params, client_type)

            self._saved_retention = int(default_configs['retention.ms']\
                .__dict__['value'])

            # Set retention to default if the value is 1 ms
            self._saved_retention = self._default_msg_retention_period if\
                self._saved_retention == self._min_msg_retention_period else\
                int(default_configs['retention.ms'].__dict__['value'])

        elif client_type == 'consumer':
            for entry in ['offset', 'consumer_group', 'message_types',\
                'auto_ack', 'client_id']:
                if entry not in client_conf.keys():
                    Log.error(f"MessageBusError: Could not find entry "\
                        f"{entry} in conf keys for client type {client_type}")
                    raise MessageBusError(errno.ENOENT, "Could not find " +\
                        "entry %s in conf keys for client type %s", entry,\
                        client_type)

            kafka_conf['enable.auto.commit'] = client_conf['auto_ack']
            kafka_conf['auto.offset.reset'] = client_conf['offset']
            kafka_conf['group.id'] = client_conf['consumer_group']

            consumer = Consumer(**kafka_conf)
            consumer.subscribe(client_conf['message_types'])
            self._clients[client_type][client_conf['client_id']] = consumer
Esempio n. 25
0
def test_kafkaError_unknonw_error():
    with pytest.raises(KafkaException, match="Err-12345?") as e:
        raise KafkaError(12345)
    assert not e.value.args[0].fatal()
    assert not e.value.args[0].retriable()
    assert not e.value.args[0].txn_requires_abort()
Esempio n. 26
0
 def __init__(self, error_code):
     super(MyException, self).__init__(KafkaError(error_code))
Esempio n. 27
0
def raise_for_kafka_error(err: KafkaError):
    if err.code() in ERROR_LOOKUP.keys():
        raise ERROR_LOOKUP[err.code()](err.code(), err.str())
    else:
        raise KafkaException(err.code(), err.str())
    def init_client(self, client_type: str, **client_conf: dict):
        """ Obtain Kafka based Producer/Consumer """

        """ Validate and return if client already exists """
        if client_type not in self._clients.keys():
            raise MessageBusError(errors.ERR_INVALID_CLIENT_TYPE, \
                "Invalid client type %s", client_type)

        if client_conf['client_id'] in self._clients[client_type].keys():
            if self._clients[client_type][client_conf['client_id']] != {}:
                # Check if message_type exists to send/receive
                client = self._clients[client_type][client_conf['client_id']]
                available_message_types = client.list_topics().topics.keys()
                if client_type == 'producer':
                    if client_conf['message_type'] not in \
                        available_message_types:
                        raise KafkaException(KafkaError(3))
                elif client_type == 'consumer':
                    if not any(each_message_type in available_message_types for\
                        each_message_type in client_conf['message_types']):
                        raise KafkaException(KafkaError(3))
                return

        kafka_conf = {}
        kafka_conf['bootstrap.servers'] = self._servers
        kafka_conf['client.id'] = client_conf['client_id']
        kafka_conf['error_cb'] = self._error_cb

        if client_type == 'admin' or self._clients['admin'] == {}:
            if client_type != 'consumer':
                kafka_conf['socket.timeout.ms'] = self._kafka_socket_timeout
                self.admin = AdminClient(kafka_conf)
                self._clients['admin'][client_conf['client_id']] = self.admin

        if client_type == 'producer':
            producer = Producer(**kafka_conf)
            self._clients[client_type][client_conf['client_id']] = producer

            self._resource = ConfigResource('topic', \
                client_conf['message_type'])
            conf = self.admin.describe_configs([self._resource])
            default_configs = list(conf.values())[0].result()
            for params in ['retention.ms']:
                if params not in default_configs:
                    raise MessageBusError(errno.ENOKEY, \
                        "Missing required config parameter %s. for " +\
                        "client type %s", params, client_type)

            self._saved_retention = int(default_configs['retention.ms']\
                .__dict__['value'])

            # Set retention to default if the value is 1 ms
            self._saved_retention = self._default_msg_retention_period if \
                self._saved_retention == self._min_msg_retention_period else \
                int(default_configs['retention.ms'].__dict__['value'])

        elif client_type == 'consumer':
            for entry in ['offset', 'consumer_group', 'message_types', \
                'auto_ack', 'client_id']:
                if entry not in client_conf.keys():
                    raise MessageBusError(errno.ENOENT, "Could not find " +\
                        "enrty %s in conf keys for client type %s", entry, \
                        client_type)

            kafka_conf['enable.auto.commit'] = client_conf['auto_ack']
            kafka_conf['auto.offset.reset'] = client_conf['offset']
            kafka_conf['group.id'] = client_conf['consumer_group']

            consumer = Consumer(**kafka_conf)
            consumer.subscribe(client_conf['message_types'])
            self._clients[client_type][client_conf['client_id']] = consumer