Example #1
0
def clear_data_for_redispatching():
    kafka_admin_client = KafkaAdminClient(
        bootstrap_servers=KAFKA_BOOTSTRAP_SERVERS,
    )

    kafka_consumer = KafkaConsumer(
        bootstrap_servers=KAFKA_BOOTSTRAP_SERVERS.split(";"),
    )

    kafka_admin_client.delete_topics(list(kafka_consumer.topics()))
    print("--kafka cleared --")

    if REDIS_PASSWORD == "":
        redis_conn = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=None)
    else:
        redis_conn = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD)

    redis_conn.flushdb()
    print("--redis cleared --")

    if not database_exists(str(SQLALCHEMY_DATABASE_URI)):
        print("Failed, the database does not exist.")
        return False

    engine = create_engine(str(SQLALCHEMY_DATABASE_URI))

    db_session = sessionmaker(bind=engine)
    session = db_session()

    session.execute("delete from job_scheduled_secondary_workers;")
    session.execute("update job set planning_status = 'U';") 
    session.execute("update team set latest_env_kafka_offset=0;")
    session.execute("commit;")
    print("--postgres DB cleared --")
Example #2
0
    def test_produce_and_consume(self):
        """
        It first creates 10 messages. The it consumes them. It is successful, if all messages are consumed.
        :return: -
        """
        topic = 'test_4'
        topic_partition = TopicPartition(topic='test_4', partition=0)
        msg = b'this is a message'
        # publish 10 events to the topic 'wat'
        producer = KafkaProducer(bootstrap_servers=bootstrap_servers)
        for _ in range(10):
            producer.send(topic, msg)

        # consume all previous events, that where published to the topic 'wat'
        consumer = KafkaConsumer(topic,
                                 bootstrap_servers=bootstrap_servers,
                                 auto_offset_reset='earliest')
        events = consumer.poll(1000)[topic_partition]
        n_events = len(events)

        # the events is a list of events that must not be empty
        assert (n_events > 0)

        # the last event must (most likely) have the value 'this is a message'
        assert (events[n_events - 1].value == msg)
        client = KafkaAdminClient(bootstrap_servers=bootstrap_servers)
        client.delete_topics([topic])
    def run(self):
        try:
            consumer = KafkaConsumer(self.response_topic,
                                     bootstrap_servers=self.servers,
                                     client_id=random_string())

            self.logger.info("manifest consumer on {0} kafka topic".format(
                self.response_topic))

            for message in consumer:
                try:
                    json_str = message.value
                    json_msg = json.loads(json_str)
                    if is_close_msg(json_msg):
                        print(json_str)
                        break
                except Exception as e:
                    exc_type, exc_value, exc_traceback = sys.exc_info()
                    traceback.print_tb(exc_traceback,
                                       limit=20,
                                       file=sys.stdout)
                    self.logger.error(str(exc_type))
                    self.logger.error(str(exc_value))
                    break

            consumer.close()
            admin = KafkaAdminClient(bootstrap_servers=self.servers)
            admin.delete_topics([self.response_topic])
            admin.close()
        except Exception as e:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            traceback.print_tb(exc_traceback, limit=20, file=sys.stdout)
            self.logger.error(str(exc_type))
            self.logger.error(str(exc_value))
def delete_topics(topics, time_out=10):
    # noinspection PyBroadException
    config = get_basic_utilities().get(CONFIG)
    kafka_config = config['kafka']
    server = f"{kafka_config['host']}:{kafka_config['port']}"
    admin_client = KafkaAdminClient(bootstrap_servers=server)
    consumer = KafkaConsumer(bootstrap_servers=server)
    active_topics = [topic for topic in consumer.topics() if topic in topics]
    admin_client.delete_topics(topics=active_topics)
    stop = threading.Event()
    all_deleted = False

    def is_deleted():
        nonlocal all_deleted
        while not stop.is_set():
            current_topics = consumer.topics()
            for topic in active_topics:
                if topic in current_topics:
                    stop.wait(timeout=1)
                    break
            else:
                stop.set()
                all_deleted = True

    thread = threading.Thread(name='delete-topics', target=is_deleted)
    thread.start()
    stop.wait(timeout=time_out)
    stop.set()
    thread.join()
    return all_deleted
Example #5
0
class TestElasticProducer(object):

    def setup_class(self):
        self.admin = KafkaAdminClient(bootstrap_servers='localhost:9092')
        self.index = ElasticIndex('test-elastic-producer', 'doc')
        self.index.index_into({'test': 1}, 0)
        self.index.index_into({'test': 2}, 1)
        self.index.index_into({'test': 3}, 2)
        self.index.index_into({'test': 4}, 3)
        self.index.index_into({'test': 5}, 4)

        self.producer = ElasticProducer('configs/elastic/test_elastic_producer_producer.yml')
        self.consumer = SimpleConsumer('configs/elastic/test_elastic_producer_consumer.yml')

    def teardown_class(self):
        self.consumer.close()
        self.admin.delete_topics(['test-elastic-producer'])
        self.admin.close()
        self.index.delete()

    #@pytest.mark.skip()
    def test_produce(self):
        self.producer.process()
        key, message = self.consumer.consume()
        assert key == '0'
        assert message == '{"test": 1}'
Example #6
0
def del_topic(args):
    admin = KafkaAdminClient(bootstrap_servers=[args.broker])
    try:
        admin.delete_topics(topics=args.topics)
    except UnknownTopicOrPartitionError:
        pass
    admin.close()
    def run(self):

        try:
            self.logger.info("starting rosbag_consumer:{0}".format(
                self.response_topic))
            rospy.init_node("mozart_rosbag_{0}".format(random_string(6)))

            consumer = KafkaConsumer(self.response_topic,
                                     bootstrap_servers=self.servers,
                                     client_id=random_string())

            if self.s3:
                self.s3_reader = S3Reader(self.s3_read_req, self.s3_read_resp)
                self.s3_deleter = S3Deleter(self.s3_delete_req)
                self.s3_reader.start()
                self.s3_deleter.start()

            for msg in consumer:
                try:
                    json_str = msg.value
                    json_msg = json.loads(json_str)

                    if is_close_msg(json_msg):
                        print(json_str)
                        break

                    self.publish_bag(json_msg)
                except Exception as e:
                    exc_type, exc_value, exc_traceback = sys.exc_info()
                    traceback.print_tb(exc_traceback,
                                       limit=20,
                                       file=sys.stdout)
                    print(str(e))
                    break

            if self.s3:
                self.read_s3(drain=True)
                self.s3_read_req.put("__close__")
                self.s3_reader.join(timeout=2)
                if self.s3_reader.is_alive():
                    self.s3_reader.terminate()
                self.s3_delete_req.put("__close__")
                time.sleep(5)
                self.s3_deleter.join(timeout=2)
                if self.s3_deleter.is_alive():
                    self.s3_deleter.terminate()
            else:
                for dir in self.clean_up:
                    shutil.rmtree(dir, ignore_errors=True)

            consumer.close()
            admin = KafkaAdminClient(bootstrap_servers=self.servers)
            admin.delete_topics([self.response_topic])
            admin.close()

        except Exception as e:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            traceback.print_tb(exc_traceback, limit=20, file=sys.stdout)
            print(str(e))
def clear_kafka():
    kafka_admin_client = KafkaAdminClient(
        bootstrap_servers=KAFKA_BOOTSTRAP_SERVERS, )

    kafka_consumer = KafkaConsumer(
        bootstrap_servers=KAFKA_BOOTSTRAP_SERVERS.split(";"), )

    kafka_admin_client.delete_topics(list(kafka_consumer.topics()))
    print("--kafka cleared --")
Example #9
0
class Admin:
    def __init__(self):
        self.client = KafkaAdminClient(bootstrap_servers=app.config['KAFKA_URL'])

    def create_topics(self, topics):
        topics = [NewTopic(topic, 1, 1) for topic in topics]
        self.client.create_topics(new_topics=topics)

    def delete_topics(self, topics):
        self.client.delete_topics(topics=topics)
def clean_topics():
    kafka_admin_client = KafkaAdminClient(
        bootstrap_servers=['192.168.56.101:29094'])
    topics = kafka_admin_client.list_topics()
    my_topics = []
    for topic in topics:
        if "owid-covid" in topic:
            print(topic)
            my_topics.append(topic)
    kafka_admin_client.delete_topics(my_topics)
Example #11
0
class AdminClient(KafkaPython):
    def __init__(self, bootstrap_servers=None, **kwargs):
        super().__init__(servers=bootstrap_servers)
        admin_client_config.update(kwargs)
        self.engine = KafkaAdminClient(
            bootstrap_servers=self._bootstrap_servers,
            client_id=self._client_id,
            request_timeout_ms=self._request_timeout_ms,
            **admin_client_config)

    def create_topics(self, topic_list: list):
        new_topic = []

        for k, v in enumerate(topic_list):
            new_topic.append(
                NewTopics(name=v['name'],
                          num_partitions=v['num_partitions'],
                          replication_factor=v['replication_factor'],
                          replica_assignments=v['replica_assignments'],
                          topic_configs=v['topic_configs']))

        if not Consumer().get_user_topics().intersection(
            {item['name']
             for i, item in enumerate(topic_list)}):

            try:
                self.engine.create_topics(new_topic, **create_topic_config)
            except KafkaError as e:
                _logger.error(e)

            self.engine.close()
        else:
            _logger.error(
                self._logMsg(create_topic_fail_code, self._client_id,
                             'topic重复'))
            return

    def delete_topics(self, topic: list):
        if Consumer().get_user_topics().intersection(set(topic)):

            try:
                self.engine.delete_topics(topic, self._request_timeout_ms)

            except KafkaError as e:
                _logger.error(
                    self._logMsg(delete_topic_fail_code, self._client_id,
                                 '删除的topic失败:%s' % e))

            self.engine.close()
        else:
            _logger.error(
                self._logMsg(delete_topic_fail_code, self._client_id,
                             '需删除的topic不存在'))
            return
Example #12
0
def clear_all_data():
    kafka_admin_client = KafkaAdminClient(
        bootstrap_servers=KAFKA_BOOTSTRAP_SERVERS,
    )

    kafka_consumer = KafkaConsumer(
        bootstrap_servers=KAFKA_BOOTSTRAP_SERVERS.split(";"),
    )

    kafka_admin_client.delete_topics(list(kafka_consumer.topics()))
    print("--kafka cleared --")

    if REDIS_PASSWORD == "":
        redis_conn = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=None)
    else:
        redis_conn = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD)

    redis_conn.flushdb()
    print("--redis cleared --")

    if not database_exists(str(SQLALCHEMY_DATABASE_URI)):
        print("Failed, the database does not exist.")
        return False

    engine = create_engine(str(SQLALCHEMY_DATABASE_URI))

    db_session = sessionmaker(bind=engine)
    session = db_session()

    session.execute("delete from event;")
    session.execute("delete from assoc_job_tags;")
    session.execute("delete from job_scheduled_secondary_workers;")
    

    session.execute("delete from job;")
    # session.execute("delete from worker_absence;")
    # session.execute("delete from appointment;")
    session.execute("delete from worker;")
    session.execute("delete from location;")
    # session.execute("update team set latest_env_kafka_offset=0;")
    session.execute("delete from team;")
    session.execute("delete from tag;")
    session.execute("delete from service_plugin;")
    session.execute("delete from plugin;")
    session.execute("delete from service;")
    session.execute("delete from dispatch_user;")

    session.execute("commit;")
    print("--postgres DB cleared --")
Example #13
0
    def test_runner(self):
        """
        Generalized producer consumer test.

        Assuming there is an empty test kafka rollout locally at port 9092
        """
        test_run_id = str(uuid.uuid1())

        test_config = StatsProcessorConfig()
        test_config.servers = ['localhost:9092']
        test_config.topics = ['test-topic']
        test_config.client_id = 'test-client'
        # Remove existing topics

        admin_client = KafkaAdminClient(bootstrap_servers=test_config.servers,
                                        client_id='test-admin')
        kafka_topics = admin_client.list_topics()
        existing_topics = set(test_config.topics).intersection(kafka_topics)
        if len(existing_topics) > 0:
            admin_client.delete_topics(existing_topics)

        # Initialize the database
        local_db_mngr = LocalDataManager(self.test_dir)
        local_db_mngr.initialize_db()

        runner = StatsCollectorRunner(self.test_dir, test_config)
        runner.initialize_runner()

        test_producer = TestEventsProducer(kafka_servers=test_config.servers,
                                           test_topic=test_config.topics[0],
                                           test_run_id=test_run_id)
        test_producer.start()

        runner.start()

        time.sleep(10)  # run for 10 seconds

        runner.stop()
        print('Runner stopping')
        runner.join()
        print('Runner exited')

        test_producer.stop()
        print('Producer stopping')
        test_producer.join()
        print('Producer exited')

        with local_db_mngr as bench:
            print(bench.get_run_nodes(test_run_id))
Example #14
0
    def test_delete_topics(self):
        """
        Caution! This test is supposed to run in a test environment. Do not use it in a production environment,
        as it is supposed to delete all topics! The test environment can be started with
        tests/resources docker-compose.yml
        :return: -
        """
        # get all topics...
        consumer = KafkaConsumer(bootstrap_servers=bootstrap_servers)
        topics = consumer.topics()

        # ...and delete them
        client = KafkaAdminClient(bootstrap_servers=bootstrap_servers)
        client.delete_topics(topics)

        # ...now check if they were all deleted
        topics = consumer.topics()
        assert (len(topics) == 0)
Example #15
0
class TestSimpleElasticConsumer(object):

    def setup_class(self):
        self.admin = KafkaAdminClient(bootstrap_servers='localhost:9092')
        self.consumer = SimpleElasticConsumer('configs/elastic/elastic-consumer-simple-test.yml')
        self.bulk_consumer = BulkElasticConsumer('configs/elastic/elastic-consumer-bulk-test.yml')
        self.producer = LineProducer("configs/elastic/json-lines-producer.yml")
        self.producer.process()

    def teardown_class(self):
        self.admin.delete_topics(['json'])
        self.consumer.close()

    #@pytest.mark.skip()
    def test_consume(self):
        result = self.consumer.consume()
        assert result

    # @pytest.mark.skip()
    def test_consume_bulk(self):
        result = self.bulk_consumer.consume()
        assert result
Example #16
0
    def cleanup():
        filter = flask.request.args.get('filter')
        if filter == None:
            filter = ""

        consumer = openConsumer(brokerArray)
        admin_client = KafkaAdminClient(bootstrap_servers=brokerArray)

        topics = consumer.topics()

        returnTopicCount = 0

        for n in topics:
            log("Found Topic - " + n)
            if filter in n:
                tp = TopicPartition(n, 0)
                consumer.assign([tp])
                lastOffset = consumer.end_offsets([tp])[tp]
                log("Found Topic - " + str(lastOffset))
                readOffset = 0
                if lastOffset > 0:
                    readOffset = lastOffset - 1

                consumer.seek(tp, readOffset)

                messages = consumer.poll(timeout_ms=1000)
                log("Found Topic - " + str(messages))
                if tp in messages:
                    log("Removing Topic - " + str(messages[tp][0].timestamp))
                    if messages[tp][0].timestamp < (
                        (time.time() - DEFAULT_CLEANUP_TIME) * 1000):
                        log("Deleting Topic - " + n)
                        admin_client.delete_topics([n])
                        returnTopicCount += 1
                elif (readOffset == 0) or (len(messages) == 0):
                    log("Deleting Topic - " + n)
                    admin_client.delete_topics([n])
                    returnTopicCount += 1
        return "Deleted " + str(returnTopicCount) + " Topics"
class AdminClient():
    def __init__(self, kafkaBroker):
        self.admin = KafkaAdminClient(bootstrap_servers=kafkaBroker)

    def CreateTopics(self, topics, numpartitions=3):
        """ Create topics """
        new_topics = [
            NewTopic(topic, num_partitions=numpartitions, replication_factor=1)
            for topic in topics
        ]

        # Call create_topics to asynchronously create topics, a dict
        # of <topic,future> is returned.
        fs = self.admin.create_topics(new_topics)

        for topic in topics:
            print("Topic {} created".format(topic))
        '''
        # Wait for operation to finish.
        # Timeouts are preferably controlled by passing request_timeout=15.0
        # to the create_topics() call.
        # All futures will finish at the same time.

        for topic, f in fs.items():
            try:
                f.result()  # The result itself is None
                print("Topic {} created".format(topic))
            except Exception as e:
                print("Failed to create topic {}: {}".format(topic, e))
        '''

    def DeleteTopics(self, topics):
        """ delete topics """

        # Call delete_topics to asynchronously delete topics, a future is returned.
        # By default this operation on the broker returns immediately while
        # topics are deleted in the background. But here we give it some time (30s)
        # to propagate in the cluster before returning.
        #
        # Returns a dict of <topic,future>.

        fs = self.admin.delete_topics(topics, timeout_ms=30)
        # Wait for operation to finish.
        for topic in topics:
            print("Topic {} deleted".format(topic))
        '''
    def test_admin_client(self):
        """
    This test verifies that Kafka Admin Client can still be used to manage Kafka.
    """

        admin_client = KafkaAdminClient(
            bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address())

        # Create a topic with 3 partitions.
        new_topic_spec = NewTopic(name='test_admin_client',
                                  num_partitions=3,
                                  replication_factor=1)
        create_response = admin_client.create_topics([new_topic_spec])
        error_data = create_response.topic_errors
        self.assertEqual(len(error_data), 1)
        self.assertEqual(error_data[0], (new_topic_spec.name, 0, None))

        # Alter topic (change some Kafka-level property).
        config_resource = ConfigResource(ConfigResourceType.TOPIC,
                                         new_topic_spec.name,
                                         {'flush.messages': 42})
        alter_response = admin_client.alter_configs([config_resource])
        error_data = alter_response.resources
        self.assertEqual(len(error_data), 1)
        self.assertEqual(error_data[0][0], 0)

        # Add 2 more partitions to topic.
        new_partitions_spec = {new_topic_spec.name: NewPartitions(5)}
        new_partitions_response = admin_client.create_partitions(
            new_partitions_spec)
        error_data = create_response.topic_errors
        self.assertEqual(len(error_data), 1)
        self.assertEqual(error_data[0], (new_topic_spec.name, 0, None))

        # Delete a topic.
        delete_response = admin_client.delete_topics([new_topic_spec.name])
        error_data = create_response.topic_errors
        self.assertEqual(len(error_data), 1)
        self.assertEqual(error_data[0], (new_topic_spec.name, 0, None))

        self.metrics.collect_final_metrics()
        self.metrics.assert_metric_increase('create_topics', 1)
        self.metrics.assert_metric_increase('alter_configs', 1)
        self.metrics.assert_metric_increase('create_partitions', 1)
        self.metrics.assert_metric_increase('delete_topics', 1)
Example #19
0
class Admin:
    def __init__(self, bootstrap_servers):
        self.admin = KafkaAdminClient(bootstrap_servers=bootstrap_servers)

    def create_topic(self, topic):
        try:
            topic = NewTopic(name=topic,
                             num_partitions=1,
                             replication_factor=1)
            self.admin.create_topics([topic], timeout_ms=2000)
        except Exception:
            pass

    def delete_topic(self, topic):
        res = self.admin.delete_topics([topic], 2000)

    def close(self):
        self.admin.close()
def main(args):

    if args.dry:
        print("--- This a DRY run. No topic will be deleted! ---")

    # set up the bootstrap servers for kafka client
    bootstrap_servers = [x for x in args.broker_list.split(",")]

    kafka_consumer = KafkaConsumer(bootstrap_servers=bootstrap_servers)
    kafka_topics = kafka_consumer.topics()

    kafka_admin_client = KafkaAdminClient(bootstrap_servers=bootstrap_servers)

    mongo_uri = 'mongodb://{0}/?replicaSet=rs0'.format(",".join([x for x in args.mongo_list.split(",")]))

    topics_col = MongoClient(mongo_uri).get_database(name="argo_msg").get_collection(name="topics")
    ams_topics = set()
    for top in topics_col.find():
        ams_topics.add("{0}.{1}".format(top["project_uuid"], top["name"]))

    deleted_topics_count = 0
    topics_to_be_deleted = kafka_consumer.topics().difference(ams_topics)
    for top_to_del in topics_to_be_deleted:
        print("Marking topic: " + str(top_to_del) + " for deletion (X)")
        if not args.dry:
            try:
                print(kafka_admin_client.delete_topics(topics=[top_to_del]))
                deleted_topics_count += 1
                print("---------------------------------------------------")
            except UnknownTopicOrPartitionError as e:
                print("Could not delete topic {0}. Exception: {1}"
                      .format(top_to_del, str(e.message)))
                print("---------------------------------------------------")
                continue

    print("Total Kafka topics: {0}".format(len(kafka_topics)))
    print("Total AMS topics: {0}".format(len(ams_topics)))
    print("Total Marked topics: {0}".format(len(topics_to_be_deleted)))
    print("Total Deleted topics: {0}".format(deleted_topics_count))
Example #21
0
class AdminClient(object):
    def __init__(self, **configs):
        self._instance = KafkaAdminClient(**configs)

    def create_topic(self, name: str, num_partitions: int,
                     replication_factor: int, **configs: Dict[str, str]):
        """

        See https://kafka.apache.org/documentation/#topicconfigs for topic configs options.

        :param name:                The name of the new topic.
        :param num_partitions:      The number of partitions within this topic.
        :param replication_factor:  The number of replicas to be created.
        :param configs:             Configs as a dict {str: str}.
        :return:                    Version of CreateTopicResponse class.
        """
        topic = NewTopic(name,
                         num_partitions,
                         replication_factor,
                         topic_configs=configs)
        return self._instance.create_topics([topic])

    def delete_topic(self, name: str):
        return self._instance.delete_topics([name])
Example #22
0
from kafka import KafkaAdminClient
import os

admin = KafkaAdminClient(bootstrap_servers='localhost:9092')

os.system('pkill -9 -f app.py')
os.system('pkill -9 -f consumer.py')
os.system('pkill -9 -f producer.py')

admin.delete_topics(['dashboard1', 'clickhouse1'])
admin.close()

#os.system('sudo docker stop $(sudo docker ps -a -q)')
Example #23
0
                    type=int,
                    default="10")
args = parser.parse_args()

kafka_brokers = args.kafka_broker
number_of_msgs = args.number_of_messages
topic = args.topic
consumer_timeout_ms = args.consumer_timeout_ms

msg_send = []
msg_recv = []

admin = KafkaAdminClient(bootstrap_servers=kafka_brokers)

try:
    admin.delete_topics([topic], timeout_ms=5000)
except:
    print("Cannot delete topic, might not exist")

consumer = KafkaConsumer(bootstrap_servers=kafka_brokers,
                         auto_offset_reset='earliest',
                         consumer_timeout_ms=consumer_timeout_ms)
producer = KafkaProducer(bootstrap_servers=kafka_brokers)

consumer.subscribe(topic)

for i in range(number_of_msgs):
    now_start = round(monotonic_ns())
    producer.send(
        topic,
        bytes(
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from kafka import KafkaAdminClient

servers = ['192.168.5.110:9092']

adminClient = KafkaAdminClient(bootstrap_servers=servers)

adminClient.delete_topics(['test'])

print(adminClient.list_consumer_groups())

adminClient.close()
Example #25
0
def _delete_kafka_topic(topic_name):
    """Delte the given kafka topic running locally."""
    kafka_client = KafkaAdminClient(bootstrap_servers='localhost:9092')
    kafka_client.delete_topics([topic_name])
Example #26
0
def delete_topics(topic='mykafka'):
    admin = KafkaAdminClient(bootstrap_servers='6.86.2.170:9092')
    topics = list_topics()
    if topic in topics:
        admin.delete_topics(topics=[topic])
Example #27
0
def main(argv):
    with open('register.json') as f:
        config = json.load(f)
    with open('tpc-config.json') as f:
        tpcconfig = json.load(f)

    if (len(argv) > 0):
        bootstrapserver = argv[0]
    print(config['config']['connector.class'])
    print(config['name'])
    config['name'] = 'tpc-connector'
    print(config['name'])

    config['config']['database.history.kafka.topic'] = 'tpc-test'

    databasetype = config['config']['connector.class']
    connectiontype = config['config']['connector.class'].split('.')[3]
    print(databasetype)
    print(connectiontype)

    table = tpcconfig['jdbc'][connectiontype].get('table')
    if table == None:
        table = 'TPC.TEST'
    config['config']['table.include.list'] = table

    lowercase = tpcconfig['jdbc'][connectiontype].get('lowercase')
    if lowercase == None:
        lowercase = False

    conn = getjdbcconnection(config, tpcconfig, connectiontype)

    initsql(conn, config, tpcconfig)
    createTPCTable(conn, config, tpcconfig)
    enablecdctablesql(conn, config, tpcconfig)

    print('============')

    print('get status tpc connector')
    resp = requests.get(
        'http://' + tpcconfig['debezium.connect.server'] + '/connectors/tpc-connector/status',  verify=False)
    print(resp.content)
    print(resp.status_code)
    retvalue = json.loads(resp.content)
    print(retvalue)

    print('============')

    resp = requests.delete(
        'http://' + tpcconfig['debezium.connect.server'] + '/connectors/tpc-connector',  verify=False)
    print(resp.content)
    print(resp.status_code)
    if (resp.status_code == 404):
        print('tpc-connector not exists')
    else:
        print('tpc-connector deleted')
        pass

    databaseservername = config['config']['database.server.name']
    topicname = databaseservername + '.' + table
    historybootstrapserver = config['config'].get('database.history.kafka.bootstrap.servers')
    if historybootstrapserver != None:
        bootstrapserver = historybootstrapserver.split(",")

    # check integrated test ( all in one docker)
    if bootstrapserver == 'kafka:9092':

        print(bootstrapserver)
        kafkaadmin = KafkaAdminClient(bootstrap_servers=bootstrapserver)

        try:
            kafkaadmin.delete_topics(
                [topicname], 30)
        except:
            print(topicname + ' TOPIC not exists')
        else:
            print(topicname + ' TOPIC deleted')
        if historybootstrapserver != None:
            try:
                kafkaadmin.delete_topics(
                    [config['config']['database.history.kafka.topic']], 30)
            except:
                print(config['config']['database.history.kafka.topic'] +
                      ' TOPIC not exists')
            else:
                print(config['config']
                      ['database.history.kafka.topic'] + ' TOPIC deleted')

        # start tpc connector
        print('start tpc connector')
        resp = requests.post(
            'http://' + tpcconfig['debezium.connect.server'] + '/connectors', headers={'content-type': 'application/json'}, data=json.dumps(config), verify=False)
        print(resp.content)
        print(resp.status_code)
        retvalue = json.loads(resp.content)
        print(retvalue)

        while resp.status_code > 200:
            print('get status tpc connector')
            resp = requests.get(
                'http://' + tpcconfig['debezium.connect.server'] + '/connectors/tpc-connector/status',  verify=False)
            print(resp.content)
            print(resp.status_code)
            retvalue = json.loads(resp.content)
            print(retvalue)
            time.sleep(1)

    conn.jconn.setAutoCommit(False)
    for x in range(len(tpcconfig['tpc']['commit.intervals'])):
        print(tpcconfig['tpc']['commit.intervals'][x])
        curs = conn.cursor()
        for y in range(int(tpcconfig['tpc']['count'])):
            curs.execute(tpcconfig['sql']['insert'])
            if ((y % (tpcconfig['tpc']['commit.intervals'][x])) == (tpcconfig['tpc']['commit.intervals'][x] - 1)):
                conn.commit()
        conn.commit()
        topicexport(bootstrapserver, topicname, int(
            tpcconfig['tpc']['count']), tpcconfig['tpc']['commit.intervals'][x])

        kafkaadmin.delete_topics([topicname], 30)
        print('Wait 30 second for TOPIC clean up')
        time.sleep(30)
Example #28
0
    used_topics = (
        "topic_oferte",
        "topic_rezultat",
        "topic_oferte_procesate",
        "topic_notificare_procesor_mesaje",
    )

    # se sterg topic-urile, daca exista deja
    print("Se sterg topic-urile existente...")

    kafka_topics = admin.list_topics()
    for topic in kafka_topics:
        if topic in used_topics:
            print("\tSe sterge {}...".format(topic))
            admin.delete_topics(topics=[topic], timeout_ms=2000)

            # se asteapta putin ca stergerea sa aiba loc
            time.sleep(2)

    # se creeaza topic-urile necesare aplicatiei
    print("Se creeaza topic-urile necesare:")
    lista_topicuri = [
        NewTopic(name=used_topics[0], num_partitions=4, replication_factor=1),
        NewTopic(name=used_topics[1], num_partitions=1, replication_factor=1),
        NewTopic(name=used_topics[2], num_partitions=1, replication_factor=1),
        NewTopic(name=used_topics[3], num_partitions=1, replication_factor=1)
    ]
    for topic in lista_topicuri:
        print("\t{}".format(topic.name))
    admin.create_topics(lista_topicuri, timeout_ms=3000)
Example #29
0
def delete_topics(topic='test'):
    admin = KafkaAdminClient(bootstrap_servers='localhost:9092')
    topics = list_topics()
    if topic in topics:
        admin.delete_topics(topics=[topic])
class TestLineProducer(object):
    def setup_class(self):
        self.admin = KafkaAdminClient(bootstrap_servers='localhost:9092')
        self.producer = LineProducer('configs/lines/producer.yml')
        self.producer_gz = LineProducer('configs/lines/producer_gz.yml')
        self.producer_bz2 = LineProducer('configs/lines/producer_bz2.yml')
        self.consumer = SimpleConsumer('configs/lines/consumer.yml')
        self.consumer_gz = SimpleConsumer('configs/lines/consumer_gz.yml')
        self.consumer_bz2 = SimpleConsumer('configs/lines/consumer_bz2.yml')

        self.ntriples_producer = SortedNTriplesCollectorProducer(
            'configs/nt/producer.yml')
        self.ntriples_consumer = BulkElasticConsumer('configs/nt/consumer.yml')

    def teardown_class(self):
        self.consumer.close()
        self.consumer_gz.close()
        self.consumer_bz2.close()
        try:
            self.admin.delete_topics(['test-lines-gz'])
        except UnknownTopicOrPartitionError:
            pass
        try:
            self.admin.delete_topics(['test-lines'])
        except UnknownTopicOrPartitionError:
            pass
        try:
            self.admin.delete_topics(['test-lines-bz2'])
        except UnknownTopicOrPartitionError:
            pass
        try:
            self.admin.delete_topics(['test-sorted-nt-resource'])
        except UnknownTopicOrPartitionError:
            pass
        self.admin.close()

    def test_ntriples_producer(self):
        self.ntriples_producer.process()
        assert self.ntriples_consumer.consume()

    #@pytest.mark.skip()
    def test_produce(self):
        self.producer.process()
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer.consume()
        assert key == '0'
        assert message == "This is a line"
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer.consume()
        assert key == '1'
        assert message == "and another line"
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer.consume()
        assert key == '2'
        assert message == "a third line"
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer.consume()
        assert key == '3'
        assert message == "a forth line"
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer.consume()
        assert key == '4'
        assert message == "a lot of lines now"

    #@pytest.mark.skip("Currently way too slow")
    def test_produce_gz(self):
        self.producer_gz.process()
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer_gz.consume()
        assert key == '0'
        assert message == "This is a line"
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer_gz.consume()
        assert key == '1'
        assert message == "and another line"
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer_gz.consume()
        assert key == '2'
        assert message == "a third line"
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer_gz.consume()
        assert key == '3'
        assert message == "a forth line"
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer_gz.consume()
        assert key == '4'
        assert message == "a lot of lines now"

    # @pytest.mark.skip("Currently way too slow")
    def test_produce_bz2(self):
        self.producer_bz2.process()
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer_bz2.consume()
        assert key == '0'
        assert message == "This is a line"
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer_bz2.consume()
        assert key == '1'
        assert message == "and another line"
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer_bz2.consume()
        assert key == '2'
        assert message == "a third line"
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer_bz2.consume()
        assert key == '3'
        assert message == "a forth line"
        key = None
        message = None
        while key is None and message is None:
            key, message = self.consumer_bz2.consume()
        assert key == '4'
        assert message == "a lot of lines now"