def test_version():
    print('Using confluent_kafka module version %s (0x%x)' % confluent_kafka.version())
    sver, iver = confluent_kafka.version()
    assert len(sver) > 0
    assert iver > 0

    print('Using librdkafka version %s (0x%x)' % confluent_kafka.libversion())
    sver, iver = confluent_kafka.libversion()
    assert len(sver) > 0
    assert iver > 0
Esempio n. 2
0
def test_version():
    print('Using confluent_kafka module version %s (0x%x)' % confluent_kafka.version())
    sver, iver = confluent_kafka.version()
    assert len(sver) > 0
    assert iver > 0

    print('Using librdkafka version %s (0x%x)' % confluent_kafka.libversion())
    sver, iver = confluent_kafka.libversion()
    assert len(sver) > 0
    assert iver > 0
def write_to_kafka(bootstrap_servers, schema_registry_url, topic_name, data):

    print("Kafka Version                : ", confluent_kafka.version(),confluent_kafka.libversion())

    schema_registry_conf = {'url': schema_registry_url}
    schema_registry_client = SchemaRegistryClient(schema_registry_conf)

    value_avro_serializer = AvroSerializer(schemas.weather_source_schema, schema_registry_client)
    string_serializer = StringSerializer('utf-8')

    conf = {'bootstrap.servers': bootstrap_servers,
            'client.id': socket.gethostname(),
            'on_delivery': delivery_report,
            'key.serializer': string_serializer,
            'value.serializer': value_avro_serializer    
            }

    avroProducer = SerializingProducer(conf)
    
    key=datetime.date.today() + '~' + str(data['lat']) + '~' + str(data['lon'])
    message = json.dumps(data, cls=DatetimeEncoder)

    print("Key Type                     : ", type(key))
    print("Value Type                   : ", type(json.loads(message)))
  
    avroProducer.produce(topic=topic_name, key=key, value=json.loads(message))
    avroProducer.flush()
def verify_producer():
    """ Verify basic Producer functionality """

    # Producer config
    conf = {
        'bootstrap.servers': bootstrap_servers,
        'error_cb': error_cb,
        'api.version.request': api_version_request
    }

    # Create producer
    p = confluent_kafka.Producer(**conf)
    print('producer at %s' % p)

    headers = produce_headers

    # Produce some messages
    p.produce(topic, 'Hello Python!', headers=headers)
    p.produce(topic, key='Just a key and headers', headers=headers)
    p.produce(topic, key='Just a key')
    p.produce(topic,
              partition=1,
              value='Strictly for partition 1',
              key='mykey',
              headers=headers)

    # Produce more messages, now with delivery report callbacks in various forms.
    mydr = MyTestDr()
    p.produce(topic,
              value='This one has a dr callback',
              callback=mydr.delivery)
    p.produce(topic,
              value='This one has a lambda',
              callback=lambda err, msg: MyTestDr._delivery(err, msg))
    p.produce(topic, value='This one has neither')

    # Try producing with a timestamp
    try:
        p.produce(topic, value='with a timestamp', timestamp=123456789000)
    except NotImplementedError:
        if confluent_kafka.libversion()[1] >= 0x00090400:
            raise

    # Produce even more messages
    for i in range(0, 10):
        p.produce(topic,
                  value='Message #%d' % i,
                  key=str(i),
                  callback=mydr.delivery)
        p.poll(0)

    print('Waiting for %d messages to be delivered' % len(p))

    # Block until all messages are delivered/failed
    p.flush()

    #
    # Additional isolated tests
    #
    test_producer_dr_only_error()
Esempio n. 5
0
def print_packages_versions():
    """Print versions for the passed packages.

    """
    packages = get_packages()
    for pkg in packages:
        if pkg == "librdkafka":
            print("%s==%s" % (pkg, confluent_kafka.libversion()[0]))
        else:
            print("%s==%s" %
                  (pkg, pkg_resources.get_distribution(pkg).version))
Esempio n. 6
0
def main():
    print("Confluent Kafka Version: %s - Libversion: %s" %
          (version(), libversion()))
    print("")
    print("Connecting to RTSP: %s" % rtsp_url)
    print("")
    print("Producing video frames to: %s" % topic_frames)
    print("")
    print(
        "Skipping every %s frames and then saving up to %s (-1 is unlimited) frames"
        % (skipframes, capmax))
    print("")

    p = Producer({'bootstrap.servers': '', 'message.max.bytes': '2978246'})

    output_loc = "./test"
    capcount = 0
    cap = cv2.VideoCapture(rtsp_url)
    while capcount < capmax or capmax == -1:
        ret, frame = cap.read()
        if capcount % skipframes == 0:
            curtime = datetime.datetime.now()
            mystrtime = curtime.strftime("%Y-%m-%d %H:%M:%S")
            epochtime = int(time.time())

            print("Saving to Stream  at %s" % capcount)
            img_str = cv2.imencode('.jpg', frame)[1].tostring()
            encdata = base64.b64encode(img_str)
            encdatastr = encdata.decode('utf-8')

            myrec = OrderedDict()
            myrec['ts'] = mystrtime
            myrec['epoch_ts'] = epochtime
            myrec['cam_name'] = cam_name
            myrec['img'] = encdatastr

            produceMessage(p, topic_frames, json.dumps(myrec))

            # This is more verbose method of writing so we can get raw bytes to save to a stream


#            w = open(output_loc + "/cap_test_%s.jpg" % capcount, 'wb')
#            w.write(img_str)
#            w.close()

# This is the prefered method for writing to a file from OpenCV, I am using theimencode so I can get the byte to write directly to MapR Streams
#            cv2.imwrite(output_loc + "/cap_%s.jpg" % (capcount), frame)
        capcount += 1
        if capcount >= 10000000:
            if capcount % skipframe == 0:
                capcount = 0
def verify_producer():
    """ Verify basic Producer functionality """

    # Producer config
    conf = {'bootstrap.servers': bootstrap_servers,
            'error_cb': error_cb}

    # Create producer
    p = confluent_kafka.Producer(conf)
    print('producer at %s' % p)

    headers = produce_headers

    # Produce some messages
    p.produce(topic, 'Hello Python!', headers=headers)
    p.produce(topic, key='Just a key and headers', headers=headers)
    p.produce(topic, key='Just a key')
    p.produce(topic, partition=1, value='Strictly for partition 1',
              key='mykey', headers=headers)

    # Produce more messages, now with delivery report callbacks in various forms.
    mydr = MyTestDr()
    p.produce(topic, value='This one has a dr callback',
              callback=mydr.delivery)
    p.produce(topic, value='This one has a lambda',
              callback=lambda err, msg: MyTestDr._delivery(err, msg))
    p.produce(topic, value='This one has neither')

    # Try producing with a timestamp
    try:
        p.produce(topic, value='with a timestamp', timestamp=123456789000)
    except NotImplementedError:
        if confluent_kafka.libversion()[1] >= 0x00090400:
            raise

    # Produce even more messages
    for i in range(0, 10):
        p.produce(topic, value='Message #%d' % i, key=str(i),
                  callback=mydr.delivery)
        p.poll(0)

    print('Waiting for %d messages to be delivered' % len(p))

    # Block until all messages are delivered/failed
    p.flush()

    #
    # Additional isolated tests
    #
    test_producer_dr_only_error()
def test_produce_timestamp():
    """ Test produce() with timestamp arg """
    p = Producer({'socket.timeout.ms': 10,
                  'error_cb': error_cb,
                  'message.timeout.ms': 10})

    # Requires librdkafka >=v0.9.4

    try:
        p.produce('mytopic', timestamp=1234567)
    except NotImplementedError:
        # Should only fail on non-supporting librdkafka
        if libversion()[1] >= 0x00090400:
            raise

    p.flush()
Esempio n. 9
0
def test_produce_timestamp():
    """ Test produce() with timestamp arg """
    p = Producer({
        'socket.timeout.ms': 10,
        'error_cb': error_cb,
        'message.timeout.ms': 10
    })

    # Requires librdkafka >=v0.9.4

    try:
        p.produce('mytopic', timestamp=1234567)
    except NotImplementedError:
        # Should only fail on non-supporting librdkafka
        if libversion()[1] >= 0x00090400:
            raise

    p.flush()
Esempio n. 10
0
def main():
    print("Confluent Kafka Version: %s - Libversion: %s" %
          (version(), libversion()))
    print("")

    conf = {
        'group.id': 'img2file1',
        'default.topic.config': {
            'auto.offset.reset': 'earliest'
        }
    }
    c = Consumer(conf)
    c.subscribe([topic_frames])

    running = True
    while running:
        msg = c.poll(timeout=1.0)
        if msg is None: continue
        if not msg.error():
            mymsg = json.loads(msg.value().decode('utf-8'),
                               object_pairs_hook=OrderedDict)
            outmsg = OrderedDict()
            outmsg['ts'] = mymsg['ts']
            outmsg['epoch_ts'] = mymsg['epoch_ts']
            outmsg['cam_name'] = mymsg['cam_name']

            fileout = "%s/stream_%s_%s.jpg" % (output_loc, outmsg['cam_name'],
                                               outmsg['ts'].replace(
                                                   " ", "_").replace(":", "_"))
            mybytes = base64.b64decode(mymsg['img'])
            print("Processing message: %s and saving to %s" %
                  (outmsg, fileout))
            w = open(fileout, 'wb')
            w.write(mybytes)
            w.close()

        elif msg.error().code() != KafkaError._PARTITION_EOF:
            print(msg.error())
            running = False
    c.close()
Esempio n. 11
0
    def _to_confluent_kafka(self) -> Dict:
        config = {
            "bootstrap.servers": ",".join(self.broker_urls),
            "error_cb": self.error_callback,
            "group.id": self.group_id,
            "enable.auto.commit": True,
            "auto.commit.interval.ms": int(self.offset_commit_interval.total_seconds() * 1000),
            "enable.auto.offset.store": False,
            "queued.min.messages": 1000,
            "enable.partition.eof": not self.read_forever,
        }
        if self.start_at is ConsumerStartPosition.EARLIEST:
            default_topic_config = config.get("default.topic.config", {})
            default_topic_config = {
                "auto.offset.reset": "EARLIEST",
            }
            config["default.topic.config"] = default_topic_config
        elif self.start_at is ConsumerStartPosition.LATEST:
            # FIXME: librdkafka has a bug in offset handling - it caches
            # "OFFSET_END", and will repeatedly move to the end of the
            # topic. See https://github.com/edenhill/librdkafka/pull/2876 -
            # it should get fixed in v1.5 of librdkafka.

            librdkafka_version = confluent_kafka.libversion()[0]
            if librdkafka_version < "1.5.0":
                self.logger.warn(
                    "In librdkafka before v1.5, LATEST offsets have buggy behavior; you may "
                    f"not receive data (your librdkafka version is {librdkafka_version}). See "
                    "https://github.com/confluentinc/confluent-kafka-dotnet/issues/1254.")
            default_topic_config = config.get("default.topic.config", {})
            default_topic_config = {
                "auto.offset.reset": "LATEST",
            }
            config["default.topic.config"] = default_topic_config

        if self.auth is not None:
            config.update(self.auth())
        return config
Esempio n. 12
0
def main():
    print("Confluent Kafka Version: %s - Libversion: %s" %
          (version(), libversion()))
    topic = os.environ["MAPR_STREAMS_STREAM_LOCATION"].replace(
        '"', '') + ":" + os.environ["MAPR_STREAMS_TOPIC"].replace('"', '')
    print("Producing messages to: %s" % topic)
    p = Producer({'bootstrap.servers': ''})

    # Listen for messages
    running = True
    lastflush = 0
    while running:
        curtime = int(time.time())
        curts = time.strftime("%m/%d/%Y %H:%M:%S")
        message = {}
        message['ts'] = curts
        message['field1'] = "This is fun"
        message['field2'] = "or is it?"
        message_json = json.dumps(message)
        print(message_json)

        try:
            p.produce(topic, value=message_json, callback=delivery_callback)
            p.poll(0)
        except BufferError as e:
            print("Buffer full, waiting for free space on the queue")
            p.poll(10)
            p.produce(topic, value=message_json, callback=delivery_callback)

        except KeyboardInterrupt:
            print("\n\nExiting per User Request")
            p.close()
            sys.exit(0)
        delay = random.randint(1, 9)
        print("Sleeping for %s" % delay)
        time.sleep(delay)
Esempio n. 13
0
    try:
        kc.list_topics(timeout=0.2)
    except KafkaException as e:
        assert e.args[0].code() in (KafkaError._TIMED_OUT,
                                    KafkaError._TRANSPORT)

    try:
        kc.list_topics(topic="hi", timeout=0.1)
    except KafkaException as e:
        assert e.args[0].code() in (KafkaError._TIMED_OUT,
                                    KafkaError._TRANSPORT)

    kc.close()


@pytest.mark.skipif(libversion()[1] < 0x000b0000,
                    reason="requires librdkafka >=0.11.0")
def test_store_offsets():
    """ Basic store_offsets() tests """

    c = Consumer({
        'group.id': 'test',
        'enable.auto.commit': True,
        'enable.auto.offset.store': False,
        'socket.timeout.ms': 50,
        'session.timeout.ms': 100
    })

    c.subscribe(["test"])

    try:
Esempio n. 14
0
name = 'kafkacrypto'
# version and license information in setup.py
__all__ = []

import logging
try:
    import confluent_kafka
    from kafkacrypto.confluent_kafka_wrapper import KafkaConsumer, KafkaProducer, TopicPartition, TopicPartitionOffset, OffsetAndMetadata
    logging.warning("Using confluent_kafka: %s, librdkafka: %s",
                    confluent_kafka.version(), confluent_kafka.libversion())
except ImportError:
    # fallback to kafka-python
    logging.warning(
        "No confluent_kafka package found. Falling back to kafka-python. It is highly"
    )
    logging.warning(
        "recommended that you install confluent_kafka and librdkafka for better performance,"
    )
    logging.warning("especially with large messages.")
    from kafkacrypto.kafka_python_wrapper import KafkaConsumer, KafkaProducer, TopicPartition, TopicPartitionOffset, OffsetAndMetadata
del logging

__all__.extend([
    'KafkaConsumer', 'KafkaProducer', 'TopicPartition', 'TopicPartitionOffset',
    'OffsetAndMetadata'
])

from kafkacrypto.message import KafkaCryptoMessage
from kafkacrypto.crypto import KafkaCrypto
from kafkacrypto.controller import KafkaCryptoController
from kafkacrypto.kafkacryptostore import KafkaCryptoStore
Esempio n. 15
0
name='kafkacrypto'
# version and license information in setup.py
__all__ = []

#
# Use warnings to not accidentally initialize the logging subsystem (these can and will
# be logged to a logger if logger is configured with captureWarnings). The category
# is RuntimeWarning because that is where information about library configurations
# should be sent.
#
import warnings
try:
  import confluent_kafka
  from kafkacrypto.confluent_kafka_wrapper import KafkaConsumer,KafkaProducer,TopicPartition,TopicPartitionOffset,OffsetAndMetadata
  warnings.warn("Using confluent_kafka: {}, librdkafka: {}".format(str(confluent_kafka.version()), str(confluent_kafka.libversion())),category=RuntimeWarning)
except ImportError:
  # fallback to kafka-python
  warnings.warn("No confluent_kafka package found. Falling back to kafka-python. It is highly, recommended that you install confluent_kafka and librdkafka for better performance, especially with large messages.",category=RuntimeWarning)
  from kafkacrypto.kafka_python_wrapper import KafkaConsumer,KafkaProducer,TopicPartition,TopicPartitionOffset,OffsetAndMetadata
del warnings

__all__.extend(['KafkaConsumer', 'KafkaProducer', 'TopicPartition', 'TopicPartitionOffset', 'OffsetAndMetadata'])

from kafkacrypto.message import KafkaCryptoMessage
from kafkacrypto.crypto import KafkaCrypto
from kafkacrypto.controller import KafkaCryptoController
from kafkacrypto.kafkacryptostore import KafkaCryptoStore
from kafkacrypto.chainserver import KafkaCryptoChainServer
__all__.extend([ 'KafkaCryptoMessage', 'KafkaCrypto', 'KafkaCryptoStore', 'KafkaCryptoController', 'KafkaCryptoChainServer'])

Esempio n. 16
0
def main():

    print("Confluent Kafka Version: %s - Libversion: %s" %
          (version(), libversion()))
    topic_full = os.environ["MAPR_STREAMS_STREAM_LOCATION"].replace(
        '"', '') + ":" + os.environ["MAPR_STREAMS_TESLA_FULL_TOPIC"].replace(
            '"', '')
    topic_stream = os.environ["MAPR_STREAMS_STREAM_LOCATION"].replace(
        '"', '') + ":" + os.environ["MAPR_STREAMS_TESLA_STREAM_TOPIC"].replace(
            '"', '')

    print("Producing full messages to: %s" % topic_full)
    print("Producing stream messages to: %s" % topic_stream)
    p = Producer({'bootstrap.servers': ''})

    # Listen for messages
    running = True
    lastflush = 0

    app_auth = getAppAuth()
    token = loadToken(app_auth)
    vehicles = loadVehicleInfo(token)
    vehicle = None
    for v in vehicles['response']:
        if v['display_name'] == carname:
            vehicle = v
            break
    if vehicle is None:
        print("Could not find %s in vehicle list" % carname)
        sys.exit(1)

    all_data = loadData(token, vehicle['id'])['response']

    stream_items = [
        'speed', 'odometer', 'soc', 'elevation', 'est_heading', 'est_lat',
        'est_lng', 'power', 'shift_state', 'native_type', 'heading',
        'native_latitude', 'native_longitude'
    ]
    stream_string = ",".join(stream_items)

    output_items = ['timestamp'] + stream_items
    stream_url = "https://streaming.vn.teslamotors.com/stream/%s/?values=%s" % (
        vehicle['vehicle_id'], stream_string)

    tokenidx = 0
    last_stream_line = ""
    last_all_data = False

    while running:
        curtime = int(time.time())
        if curtime - last_data > data_secs:
            try:
                all_data = loadData(token, vehicle['id'])['response']
                if all_data is None:
                    last_all_data = False
                    print(
                        "%s - all_data is None, going to slowly try to refresh this to correct"
                        % curtime)
                    while all_data is None:
                        all_data = loadData(token, vehicle['id'])['response']
                        if all_data is None:
                            sleep(5)
                else:
                    if last_all_data == False:
                        print(
                            "%s - all_data success on new start or after previous failure"
                            % curtime)
                    last_all_data = True
                produceMessage(p, topic_full, json.dumps(all_data))
                if curtime % stdout_interval == 0:
                    print("%s - logging at stdout interval - success" %
                          curtime)
            except:
                print("%s - All Data load failure" % curtime)
        try:
            stream_resp = requests.get(stream_url,
                                       auth=(token['uname'],
                                             all_data['tokens'][tokenidx]),
                                       stream=True,
                                       timeout=http_timeout)

            for line in stream_resp.iter_lines():
                curtime = int(time.time())
                if line:
                    decoded_line = line.decode('utf-8')
                    if decoded_line.find("Can't validate password") >= 0:
                        all_data = loadData(token, vehicle['id'])['response']
                        print("%s - Bad password - Refreshing Tokens" %
                              curtime)
                        time.sleep(2)
                    elif last_stream_line != decoded_line:
                        dline = decoded_line.split(",")
                        myrec = OrderedDict(zip(output_items, dline))
                        produceMessage(p, topic_stream, json.dumps(myrec))
                        if curtime % stdout_interval == 0:
                            print(
                                "%s - Streaming well - success on stdout_interval"
                                % curtime)
                        myrec = None
                        last_stream_line = decoded_line
                if int(time.time()) - last_data > data_secs:
                    try:
                        all_data = loadData(token, vehicle['id'])['response']
                        last_all_data = True
                        produceMessage(p, topic_full, json.dumps(all_data))
                    except:
                        last_all_data = False
                        break
        except requests.exceptions.ConnectionError:
            pass
        except:
            pass
Esempio n. 17
0
    })

    # Requires librdkafka >=v0.9.4

    try:
        p.produce('mytopic', timestamp=1234567)
    except NotImplementedError:
        # Should only fail on non-supporting librdkafka
        if libversion()[1] >= 0x00090400:
            raise

    p.flush()


# Should be updated to 0.11.4 when it is released
@pytest.mark.skipif(libversion()[1] < 0x000b0400,
                    reason="requires librdkafka >=0.11.4")
def test_produce_headers():
    """ Test produce() with timestamp arg """
    p = Producer({
        'socket.timeout.ms': 10,
        'error_cb': error_cb,
        'message.timeout.ms': 10
    })

    binval = pack('hhl', 1, 2, 3)

    headers_to_test = [
        [('headerkey', 'headervalue')],
        [('dupkey', 'dupvalue'), ('empty', ''), ('dupkey', 'dupvalue')],
        [('dupkey', 'dupvalue'), ('dupkey', 'diffvalue')],
        assert e.args[0].code() == KafkaError._TIMED_OUT

    try:
        kc.list_topics(timeout=0.2)
    except KafkaException as e:
        assert e.args[0].code() in (KafkaError._TIMED_OUT, KafkaError._TRANSPORT)

    try:
        kc.list_topics(topic="hi", timeout=0.1)
    except KafkaException as e:
        assert e.args[0].code() in (KafkaError._TIMED_OUT, KafkaError._TRANSPORT)

    kc.close()


@pytest.mark.skipif(libversion()[1] < 0x000b0000,
                    reason="requires librdkafka >=0.11.0")
def test_store_offsets():
    """ Basic store_offsets() tests """

    c = Consumer({'group.id': 'test',
                  'enable.auto.commit': True,
                  'enable.auto.offset.store': False,
                  'socket.timeout.ms': 50,
                  'session.timeout.ms': 100})

    c.subscribe(["test"])

    try:
        c.store_offsets(offsets=[TopicPartition("test", 0, 42)])
    except KafkaException as e:
def main ():


    print("Confluent Kafka Version: %s - Libversion: %s" % (version(), libversion()))
    print("")
    print("Using weights file: %s" % weights_file)
    print("")
    print("Consuming raw video frames from %s" % topic_frames)
    print("")
    print("Producing classified video frames to: %s" % topic_class_frames)
    print("")
    print("Consumer group name: %s" % consumer_group)
    print("")
    print("Consumer group start: %s" % consumer_group_start)
    print("")
    print("Debug is set to %s" % debug)
    print("")
    print("nomsgcnt is set to %s - if this is greater than 0, then when we have that many attempts to read a msg from MapR Streams, we will exit" % nomsgcnt)


    con_conf = {'bootstrap.servers': '', 'group.id': consumer_group, 'default.topic.config': {'auto.offset.reset': consumer_group_start}}
    pro_conf = {'bootstrap.servers': '', 'message.max.bytes':'2978246'}
    c = Consumer(con_conf)
    p = Producer(pro_conf)

    c.subscribe([topic_frames])
    lastmsgtime = time.time()
    nomsg = 0
    running = True
    while running:
        msg = c.poll(timeout=1.0)
        if msg is None:
            nomsg += 1
            if debug:
                print("No Message - Continuing")
            if nomsgcnt > 0 and nomsg >= nomsgcnt:
                print("%s itterations with no messages reached - Exiting Gracefully")
                sys.exit(0)
            continue
        if not msg.error():
            mymsg = json.loads(msg.value().decode('utf-8'), object_pairs_hook=OrderedDict)
            mypart = msg.partition()
            myoffset = msg.offset()
            outmsg = OrderedDict()
            outmsg['ts'] = mymsg['ts']
            outmsg['epoch_ts'] = mymsg['epoch_ts']
            outmsg['cam_name'] = mymsg['cam_name']
            outmsg['src_partition'] = mypart
            outmsg['src_offset'] = myoffset
            mybytes = base64.b64decode(mymsg['img'])
            o = open("/dev/shm/tmp.jpg", "wb")
            o.write(mybytes)
            o.close
#            myimage = np.array(Image.open(BytesIO(mybytes))) 
            curmsgtime = time.time()
            msgdelta = curmsgtime - lastmsgtime
            if debug:
                print("Time between last processed messages: %s" %  msgdelta)
            lastmsgtime = curmsgtime

            r = python.darknet.detect(net, meta, b'/dev/shm/tmp.jpg')
            if r != []:
                if debug:
                    print("Got classification!")
                curtime = datetime.datetime.now()
                mystrtime = curtime.strftime("%Y-%m-%d %H:%M:%S")
                epochtime = int(time.time())

                arclass = []
                if save_images == 1:
                    try:
                        image = Image.open(BytesIO(mybytes)).convert("RGBA")
                    except:
                        continue
                    draw = ImageDraw.Draw(image)
                    for q in r:
                        j = OrderedDict()
                        name = q[0]
                        j['name'] = name.decode()
                        predict = q[1]
                        j['predict'] = predict
                        x = q[2][0]
                        y = q[2][1]
                        w = q[2][2]
                        z = q[2][3]
                        x_max = (2*x+w)/2
                        x_min = (2*x-w)/2
                        y_min = (2*y-z)/2
                        y_max = (2*y+z)/2
                        j['x_min'] = x_min
                        j['x_max'] = x_max
                        j['y_min'] = y_min
                        j['y_max'] = y_max
                        for x in range(border):
                            draw.rectangle(((x_min - x, y_min - x), (x_max + x, y_max + x)), fill=None, outline="black")
                        draw.text((x_min + border + 2, y_max - border - 5), name)
                        arclass.append(j)

                    imgSave = BytesIO()
                    image.save(imgSave, format='JPEG')
                    imgSave = imgSave.getvalue()
                    encdata = base64.b64encode(imgSave)
                    encdatastr = encdata.decode('utf-8')
                else:
                    encdatastr = ""
                outmsg['class_json'] = arclass
                outmsg['class_ts'] = mystrtime
                outmsg['class_epoch_ts'] = epochtime
                outmsg['class_img'] = encdatastr
                produceMessage(p, topic_class_frames, json.dumps(outmsg))
            else:
                pass
                  'message.timeout.ms': 10})

    # Requires librdkafka >=v0.9.4

    try:
        p.produce('mytopic', timestamp=1234567)
    except NotImplementedError:
        # Should only fail on non-supporting librdkafka
        if libversion()[1] >= 0x00090400:
            raise

    p.flush()


# Should be updated to 0.11.4 when it is released
@pytest.mark.skipif(libversion()[1] < 0x000b0400,
                    reason="requires librdkafka >=0.11.4")
def test_produce_headers():
    """ Test produce() with timestamp arg """
    p = Producer({'socket.timeout.ms': 10,
                  'error_cb': error_cb,
                  'message.timeout.ms': 10})

    binval = pack('hhl', 1, 2, 3)

    headers_to_test = [
        [('headerkey', 'headervalue')],
        [('dupkey', 'dupvalue'), ('empty', ''), ('dupkey', 'dupvalue')],
        [('dupkey', 'dupvalue'), ('dupkey', 'diffvalue')],
        [('key_with_null_value', None)],
        [('binaryval', binval)],
    bootstrap_servers = testconf.get('bootstrap.servers')
    topic = testconf.get('topic')
    schema_registry_url = testconf.get('schema.registry.url')

    if len(modes) == 0:
        modes = test_modes

    if bootstrap_servers is None or topic is None:
        print_usage(1, "Missing required property bootstrap.servers")

    if topic is None:
        print_usage(1, "Missing required property topic")

    print('Using confluent_kafka module version %s (0x%x)' % confluent_kafka.version())
    print('Using librdkafka version %s (0x%x)' % confluent_kafka.libversion())
    print('Testing: %s' % modes)
    print('Brokers: %s' % bootstrap_servers)
    print('Topic prefix: %s' % topic)

    if 'producer' in modes:
        print('=' * 30, 'Verifying Producer', '=' * 30)
        verify_producer()

        if 'performance' in modes:
            print('=' * 30, 'Verifying Producer performance (with dr_cb)', '=' * 30)
            verify_producer_performance(with_dr_cb=True)

        if 'performance' in modes:
            print('=' * 30, 'Verifying Producer performance (without dr_cb)', '=' * 30)
            verify_producer_performance(with_dr_cb=False)
Esempio n. 22
0
    if len(sys.argv) > 1:
        bootstrap_servers = sys.argv[1]
        if len(sys.argv) > 2:
            topic = sys.argv[2]
        if len(sys.argv) > 3:
            schema_registry_url = sys.argv[3]
    else:
        print_usage(1)

    if len(modes) == 0:
        modes = ['consumer', 'producer', 'avro', 'performance']

    print('Using confluent_kafka module version %s (0x%x)' %
          confluent_kafka.version())
    print('Using librdkafka version %s (0x%x)' % confluent_kafka.libversion())

    if 'producer' in modes:
        print('=' * 30, 'Verifying Producer', '=' * 30)
        verify_producer()

        if 'performance' in modes:
            print('=' * 30, 'Verifying Producer performance (with dr_cb)',
                  '=' * 30)
            verify_producer_performance(with_dr_cb=True)

        if 'performance' in modes:
            print('=' * 30, 'Verifying Producer performance (without dr_cb)',
                  '=' * 30)
            verify_producer_performance(with_dr_cb=False)
Esempio n. 23
0
"""
Default atexit registered functions will not be triggered on error signals.

However - if we manually handle the error signals and exit, the functions will
be triggered.
"""
import signal
import sys

import confluent_kafka
import structlog

logger = structlog.get_logger(__name__)
logger.info("Using confluent kafka versions",
            version=confluent_kafka.version(),
            libversion=confluent_kafka.libversion())


def error_handler(signum, frame):
    logger.info("Received signal", signum=signum)
    exit_code = 128 + signum
    sys.exit(exit_code)


def interrupt_handler(signum, frame):
    logger.info("Received signal", signum=signum)
    sys.exit(1)


signal.signal(signal.SIGTERM, error_handler)  # graceful shutdown
signal.signal(signal.SIGINT, interrupt_handler)  # keyboard interrupt