Exemple #1
0
def delete_kafka_topic():
    logger.info("Request received - DELETE /delete_kafka_topic")
    if not request.is_json:
        logger.warning("Format not valid")
        return 'Format not valid', 400
    try:
        admin_client = KafkaAdminClient(bootstrap_servers=kafka_ip_port,
                                        client_id='delete_kafka_topic')

        # Parse JSON
        data = request.get_json()
        logger.info("Data received: %s", data)
        topic = data["topic"]

        logger.info("Deleting topic %s in Kafka", topic)

        topic_list = []
        topic_list.append(topic)
        admin_client.delete_topics(topics=topic_list)
        admin_client.close()
    except Exception as e:
        logger.error("Error while parsing request")
        logger.exception(e)
        return str(e), 400
    return '', 201
Exemple #2
0
    def get(self, topicName):
        """
    Get Topic Configuration.

    """
        app.logger.info(
            "Request to get Configuration for topic {0}.".format(topicName))
        try:

            admin = KafkaAdminClient(
                bootstrap_servers=config['cluster.broker.listeners'],
                security_protocol=config['cluster.security.protocol'],
                ssl_cafile=config['cluster.ssl.cafile'],
                ssl_certfile=config['cluster.ssl.certfile'],
                ssl_keyfile=config['cluster.ssl.keyfile'])
            config_list = []
            config = ConfigResource(ConfigResourceType.TOPIC, topicName)
            topic_configs = admin.describe_configs([config])
            topic_config = topic_configs[0].resources[0]
            for c in topic_config[4]:
                config_list.append({'key': c[0], 'value': c[1]})
            return config_list

        except Exception as e:
            ns_topic.abort(500, str(e.args))
        finally:
            admin.close()
Exemple #3
0
    def create_topic(self, topic):
        """ Creates a topic at Kafka.

        Arguments:
            topic (str): Name of the topic to create
        """
        try:
            admin_client = KafkaAdminClient(
                bootstrap_servers=self.bootstrap_servers,
                security_protocol=self.security_protocol,
                ssl_cafile=self.ssl_cafile,
                ssl_certfile=self.ssl_certfile,
                ssl_keyfile=self.ssl_keyfile,
            )
            topic_list = []
            topic_list.append(
                NewTopic(topic, num_partitions=1, replication_factor=1))
            admin_client.create_topics(new_topics=topic_list,
                                       validate_only=False)
            admin_client.close()

        except TopicAlreadyExistsError:
            # we are fine if the Topic already exists
            logger.info("Topic already exists")
            pass
        except Exception as ex:
            logger.error("Could not create topic %s: %s", topic, str(ex))
            raise Exception(ex)
    def __init__(self, topicName):
        admin_client = KafkaAdminClient(
            bootstrap_servers=['kafka1:9092', 'kafka2:9092', 'kafka3:9092'],
            client_id='disco_eventproducer_admin')

        try:
            topic_list = [
                NewTopic(name=topicName,
                         num_partitions=1,
                         replication_factor=2,
                         topic_configs={'retention.ms': 30758400000})
            ]
            admin_client.create_topics(new_topics=topic_list,
                                       validate_only=False)
        except Exception as e:
            logging.warning(str(e))
            pass
        finally:
            admin_client.close()

        self.producer = KafkaProducer(
            bootstrap_servers='localhost:9092',
            value_serializer=lambda v: msgpack.packb(v, use_bin_type=True),
            compression_type='snappy')

        self.topicName = topicName
def setup(topic_name):
    # First, check if the topic already exists in kafka
    kafka_client = KafkaClient(bootstrap_servers=KAFKA_SERVER,
                               api_version=(2, 5, 0))

    future = kafka_client.cluster.request_update()
    kafka_client.poll(future=future)

    metadata = kafka_client.cluster
    current_topics = metadata.topics()

    kafka_client.close()

    print('Active topics:', current_topics)

    if topic_name not in current_topics:
        print(f'Creating topic {topic_name}...')
        kafka_admin_client = KafkaAdminClient(bootstrap_servers=KAFKA_SERVER,
                                              api_version=(2, 5, 0))

        topic_list = [
            NewTopic(name=topic_name, num_partitions=1, replication_factor=1)
        ]
        kafka_admin_client.create_topics(new_topics=topic_list,
                                         validate_only=False)

        kafka_admin_client.close()
    else:
        print(f'Topic {topic_name} exists')
Exemple #6
0
    def put(self, topicName):
        """
    Update Topic Configuration.

    """
        ckey = request.json['key']
        cvalue = request.json['value']
        app.logger.info(
            "Request to update configuration for topic {0} for key {1} and value {2}."
            .format(topicName, ckey, cvalue))
        try:

            admin = KafkaAdminClient(
                bootstrap_servers=config['cluster.broker.listeners'],
                security_protocol=config['cluster.security.protocol'],
                ssl_cafile=config['cluster.ssl.cafile'],
                ssl_certfile=config['cluster.ssl.certfile'],
                ssl_keyfile=config['cluster.ssl.keyfile'])
            new_config = ConfigResource(ConfigResourceType.TOPIC, topicName,
                                        {ckey: cvalue})
            result = admin.alter_configs([new_config])

        except UnknownTopicOrPartitionError as e:
            api.abort(400, e.description)
        except Exception as e:
            ns_topic.abort(500, str(e.args))
        finally:
            admin.close()

        if result.resources[0][0] == 0:
            return {"configured": topicName}
        else:
            api.abort(400, "Bad Request(" + result.resources[0][1] + ")")
Exemple #7
0
    def get(self, serviceName):
        """
    Get Service State.

    """
        app.logger.info(
            "Request to get details for service {0}.".format(serviceName))
        try:

            admin = KafkaAdminClient(
                bootstrap_servers=config['cluster.broker.listeners'],
                security_protocol=config['cluster.security.protocol'],
                ssl_cafile=config['cluster.ssl.cafile'],
                ssl_certfile=config['cluster.ssl.certfile'],
                ssl_keyfile=config['cluster.ssl.keyfile'])
            state_list = []
            if serviceName.lower() == 'kafka':
                for n in admin.describe_cluster()['brokers']:
                    h = n['host']
                    s = remote_execute(h, serviceName.lower(), 'get')
                    state_list.append({'host': h, 'state': s})
            return state_list

        except Exception as e:
            ns_service.abort(500, str(e.args))
        finally:
            admin.close()
Exemple #8
0
    def get(self, topicName):
        """
    Get Topic ACL.

    """
        app.logger.info("Request to get ACL for topic {0}.".format(topicName))
        try:

            admin = KafkaAdminClient(
                bootstrap_servers=config['cluster.broker.listeners'],
                security_protocol=config['cluster.security.protocol'],
                ssl_cafile=config['cluster.ssl.cafile'],
                ssl_certfile=config['cluster.ssl.certfile'],
                ssl_keyfile=config['cluster.ssl.keyfile'])
            acl_filter = ACLFilter(principal=None,
                                   host="*",
                                   operation=ACLOperation.ANY,
                                   permission_type=ACLPermissionType.ANY,
                                   resource_pattern=ResourcePattern(
                                       ResourceType.TOPIC, topicName))
            acls, error = admin.describe_acls(acl_filter)
            acl_list = []
            for a in acls:
                princpl = a.principal
                oprtn = a.operation.name
                acl_list.append({'user': princpl, 'type': oprtn})
            return acl_list

        except Exception as e:
            ns_acl.abort(500, str(e.args))
        finally:
            admin.close()
def handle(event, context):
    if event.method == 'POST':
        data = json.loads(event.body)
        if "topic" not in data:
            return {
                "statusCode": 400,
                "body": "Format not valid"
            }
        try:
            topic = data["topic"]
            admin_client = KafkaAdminClient(bootstrap_servers="kafka.deployment8:9092", client_id='create_kafka_topic')
            topic_list = []
            topic_list.append(NewTopic(name=topic, num_partitions=1, replication_factor=1))
            admin_client.create_topics(new_topics=topic_list, validate_only=False)
            admin_client.close()
        except Exception as e:
            return {
                "statusCode": 400,
                "body": "".format(e)
            }
        return {
            "statusCode": 200,
            "body": "OK"
        }
    else:
        return {
            "statusCode": 200,
            "body": "No action for this endpoint"
        }
Exemple #10
0
    def delete(self, topicName):
        """
    Delete Topic.

    """
        app.logger.info(
            "Request to delete topic witn name {0}.".format(topicName))
        try:

            admin = KafkaAdminClient(
                bootstrap_servers=config['cluster.broker.listeners'],
                security_protocol=config['cluster.security.protocol'],
                ssl_cafile=config['cluster.ssl.cafile'],
                ssl_certfile=config['cluster.ssl.certfile'],
                ssl_keyfile=config['cluster.ssl.keyfile'])
            result = admin.delete_topics([topicName])

        except UnknownTopicOrPartitionError as e:
            api.abort(400, e.description)
        except Exception as e:
            api.abort(500, str(e.args))
        finally:
            admin.close()

        app.logger.debug(result)

        if result.topic_error_codes[0][1] == 0:
            return {"deleted": topicName}
        else:
            api.abort(400, "Bad Request(Topic Deletion Failed)")
Exemple #11
0
    def get(self, topicName):
        """
    Get Topic Detail.

    """
        app.logger.info(
            "Request to get details for topic {0}.".format(topicName))
        try:
            admin = KafkaAdminClient(
                bootstrap_servers=config['cluster.broker.listeners'],
                security_protocol=config['cluster.security.protocol'],
                ssl_cafile=config['cluster.ssl.cafile'],
                ssl_certfile=config['cluster.ssl.certfile'],
                ssl_keyfile=config['cluster.ssl.keyfile'])
            result = admin.describe_topics([topicName])

        except UnknownTopicOrPartitionError as e:
            api.abort(500, e.description)
        except Exception as e:
            api.abort(500, str(e.args))
        finally:
            admin.close()
        app.logger.debug(result)

        if result[0]['error_code'] == 0:
            return {
                'partitions': len(result[0]['partitions']),
                'replicas': len(result[0]['partitions'][0]['replicas'])
            }
        else:
            api.abort(400, "Bad Request(Wrong Topic Name)")
Exemple #12
0
def create_kafka_topic():
    logger.info("Request received - POST /create_kafka_topic")
    if not request.is_json:
        logger.warning("Format not valid")
        return 'Format not valid', 400
    try:
        admin_client = KafkaAdminClient(bootstrap_servers=kafka_ip_port,
                                        client_id='create_kafka_topic')

        # Parse JSON
        data = request.get_json()
        logger.info("Data received: %s", data)
        topic = data["topic"]

        logger.info("Creating topic %s in Kafka", topic)

        topic_list = []
        topic_list.append(
            NewTopic(name=topic, num_partitions=1, replication_factor=1))
        admin_client.create_topics(new_topics=topic_list, validate_only=False)
        admin_client.close()
    except Exception as e:
        logger.error("Error while parsing request")
        logger.exception(e)
        return str(e), 400
    return '', 201
Exemple #13
0
    def __init__(self,
                 threshold,
                 startTime,
                 endTime,
                 timeWindow,
                 probeData,
                 topicIn,
                 topicOut,
                 slideStep=3600):
        self.threshold = threshold

        self.startTime = startTime
        self.endTime = endTime
        self.timeWindow = timeWindow

        # Slide the window by this amount
        self.slideWindow = slideStep
        # Ignore disconnection events followed by a reconnect within
        # discoProbesWindow seconds
        # Also report only probes disconnected discoProbesWindow
        # seconds before/after the burst starting time
        self.discoProbesWindow = 300

        self.probeData = probeData
        self.eventData = defaultdict(list)

        self.numTotalProbes = {}
        self.initNumProbes()

        self.disconnectedProbes = {}

        admin_client = KafkaAdminClient(
            bootstrap_servers=['kafka1:9092', 'kafka2:9092', 'kafka3:9092'],
            client_id='disco_disco_admin')

        try:
            topic_list = [
                NewTopic(name=topicOut,
                         num_partitions=1,
                         replication_factor=2,
                         topic_configs={'retention.ms': 30758400000})
            ]
            admin_client.create_topics(new_topics=topic_list,
                                       validate_only=False)
        except Exception as e:
            logging.warning(str(e))
            pass
        finally:
            admin_client.close()

        self.producer = KafkaProducer(
            bootstrap_servers='localhost:9092',
            value_serializer=lambda v: msgpack.packb(v, use_bin_type=True),
            compression_type='snappy')

        self.topicIn = topicIn
        self.topicOut = topicOut

        self.executor = ProcessPoolExecutor(max_workers=10)
def create_topic(bootstrap_ip_port, kafka_topic, num_parts, repli_factor):
    admin_client = KafkaAdminClient(bootstrap_servers=[bootstrap_ip_port])
    topic_list = []
    topic_list.append(
        NewTopic(name=kafka_topic,
                 num_partitions=int(num_parts),
                 replication_factor=int(repli_factor)))
    try:
        admin_client.create_topics(new_topics=topic_list, validate_only=False)
    except Exception as ex:
        return ex
    finally:
        admin_client.close()
        return 1
Exemple #15
0
def get_all_loader_topics(admin_client: KafkaAdminClient) -> List[str]:

    # TODO: Make this method use a regex on the topic name pattern instead
    LOG.info("Fetching topic names")
    topics: Set[str] = admin_client._client.cluster.topics(
        exclude_internal_topics=True)

    LOG.debug("Creating Kafka Admin Client")
    admin_client.close()

    return [
        topic for topic in topics
        if not any([True for excluded in EXCLUDED_TOPICS if excluded in topic])
    ]
Exemple #16
0
    def __init__(self, streamType, streamName, startTime, disconnectedProbes,
                 level, topicIn, topicOut):
        # Logging
        FORMAT = '%(asctime)s %(processName)s %(message)s'
        logging.basicConfig(format=FORMAT,
                            filename='disco-probetracker.log',
                            level=logging.WARN,
                            datefmt='%Y-%m-%d %H:%M:%S')
        logging.info("Probe tracker started: {} {} {} {}".format(
            streamType, streamName, startTime, disconnectedProbes))

        self.streamType = streamType
        self.streamName = streamName
        self.startTime = startTime
        self.level = level
        self.topicIn = topicIn
        self.disconnectedProbes = disconnectedProbes
        self.nbProbesThreshold = len(disconnectedProbes) / 2
        self.reconnectedProbes = {}
        self.topicName = topicOut

        admin_client = KafkaAdminClient(
            bootstrap_servers=['kafka1:9092', 'kafka2:9092', 'kafka3:9092'],
            client_id='disco_disco_admin')

        try:
            topic_list = [
                NewTopic(name=self.topicName,
                         num_partitions=1,
                         replication_factor=2,
                         topic_configs={'retention.ms': 30758400000})
            ]
            admin_client.create_topics(new_topics=topic_list,
                                       validate_only=False)
        except Exception as e:
            pass
        finally:
            admin_client.close()

        self.producer = KafkaProducer(
            bootstrap_servers='localhost:9092',
            value_serializer=lambda v: msgpack.packb(v, use_bin_type=True),
            compression_type='snappy')

        self.consumer = KafkaConsumer(
            enable_auto_commit=False,
            bootstrap_servers=['localhost:9092'],
            value_deserializer=lambda v: msgpack.unpackb(v, raw=False))
        self.consumer.subscribe(topicIn)
Exemple #17
0
def pushRIBData(AF, collector, startts, endts):

    stream = getBGPStream("ribs", AF, [collector], startts, endts)
    topicName = "ihr_bgp_" + collector + "_rib"
    admin_client = KafkaAdminClient(
        bootstrap_servers=['kafka1:9092', 'kafka2:9092', 'kafka3:9092'],
        client_id='bgp_producer_admin')

    try:
        topic_list = [
            NewTopic(name=topicName, num_partitions=1, replication_factor=1)
        ]
        admin_client.create_topics(new_topics=topic_list, validate_only=False)
    except:
        pass
    admin_client.close()

    stream.start()

    producer = KafkaProducer(
        bootstrap_servers=['kafka1:9092', 'kafka2:9092', 'kafka3:9092'],
        # acks=0,
        value_serializer=lambda v: msgpack.packb(v, use_bin_type=True),
        linger_ms=1000,
        request_timeout_ms=300000,
        compression_type='snappy')

    rec = BGPRecord()

    while stream and stream.get_next_record(rec):
        completeRecord = {}
        completeRecord["rec"] = getRecordDict(rec)
        completeRecord["elements"] = []

        recordTimeStamp = rec.time

        recordTimeStamp = int(recordTimeStamp) * 1000

        elem = rec.get_next_elem()

        while (elem):
            elementDict = getElementDict(elem)
            completeRecord["elements"].append(elementDict)
            elem = rec.get_next_elem()

        producer.send(topicName, completeRecord, timestamp_ms=recordTimeStamp)

    producer.close()
Exemple #18
0
def handle(event, context):
    if event.method == "POST":
        try:
            data = json.loads(event.body)
            topic = data["topic"]

            admin_client = KafkaAdminClient(
                bootstrap_servers="kafka.deployment8:9092",
                client_id="delete_kafka_topic",
            )

            admin_client.delete_topics(topics=[topic])
            admin_client.close()
            return {"statusCode": 201, "body": "No Content"}
        except Exception as e:
            return {"statusCode": 400, "body": f"Error parsing request: {e}"}
    else:
        return {"statusCode": 200, "body": "No action for this endpoint"}
Exemple #19
0
    def get(self):
        """
    Get List of Topics.

    """
        app.logger.info("Request to get list of topics.")
        try:

            admin = KafkaAdminClient(
                bootstrap_servers=config['cluster.broker.listeners'],
                security_protocol=config['cluster.security.protocol'],
                ssl_cafile=config['cluster.ssl.cafile'],
                ssl_certfile=config['cluster.ssl.certfile'],
                ssl_keyfile=config['cluster.ssl.keyfile'])
            return admin.list_topics()

        except Exception as e:
            ns_topic.abort(500, str(e.args))
        finally:
            admin.close()
Exemple #20
0
 def create_topic_if_needed(self, messaging_properties, topic_config_properties):
     if topic_config_properties.auto_create:
         # KafkaAdminClient is picky about which keyword arguments are passed in, so build the parameters from KafkaAdminClient.DEFAULT_CONFIG
         config = {key:messaging_properties.config.get(key, None) for key in KafkaAdminClient.DEFAULT_CONFIG if messaging_properties.config.get(key, None) is not None }
         config['bootstrap_servers'] = messaging_properties.connection_address
         config['client_id'] ='ignition'
         admin_client = KafkaAdminClient(**config)
         try:
             logger.info("Creating topic {0} with replication factor {1}, partitions {2} and config {3}".format(topic_config_properties.name, topic_config_properties.replication_factor, topic_config_properties.num_partitions, topic_config_properties.config))
             topic_list = [NewTopic(name=topic_config_properties.name, num_partitions=topic_config_properties.num_partitions, replication_factor=topic_config_properties.replication_factor, topic_configs=topic_config_properties.config)]
             admin_client.create_topics(new_topics=topic_list, validate_only=False)
         except TopicAlreadyExistsError as _:
             logger.info("Topic {0} already exists, not creating".format(topic_config_properties.name))
         finally:
             try:
                 admin_client.close()
             except Exception as e:
                 logger.debug("Exception closing Kafka admin client {0}".format(str(e)))
     else:
         logger.info("Not creating job queue topic {0}".format(topic_config_properties.name))
Exemple #21
0
  def init(self):
    if not 'INIT_KAFKA_CREATE_CHANNELS' in self.config:
      return
    if self.reconnect() is None:
      return
    admin_client = KafkaAdminClient(bootstrap_servers=self.url_out) # group_id=self.id_group
    channels = self.config['INIT_KAFKA_CREATE_CHANNELS']
    achs = channels.split(';')
    topic_list = []
    for channel in achs:
      if self.verbose:
        print("DBG: createTopic in Kafka '%s': topic='%s'" % (self.url_out, channel))
      topic_list.append(NewTopic(name=channel, num_partitions=1, replication_factor=1))
    res = None
    try:
      res = admin_client.create_topics(new_topics=topic_list, validate_only=False)
    except Exception as e:
      print("ERR: createTopic in Kafka '%s': err='%s'" % (self.url_out, str(e)))

    admin_client.close()
Exemple #22
0
class KafkaClient:
    def __init__(self, client, url="localhost:9092"):
        try:
            self.client = KafkaAdminClient(bootstrap_servers=url,
                                           client_id=client)
        except Exception as excp:
            print(f"[ERROR] - could not connect to kafka: {str(excp)}")

    def create_topics(self, topics):
        topics_list = [
            NewTopic(name=topic, num_partitions=1, replication_factor=1)
            for topic in topics
        ]
        self.client.create_topics(new_topics=topics_list, validate_only=False)

    def close_topics(self, topics):
        self.client.delete_topics(topics)

    def close(self):
        self.client.close()
        print("[INFO] - kafka connection closed")
Exemple #23
0
def __delTopic(topic):
    try:
        admin_client = KafkaAdminClient(
            bootstrap_servers=current_app.config["KFKA_URL"], )
        # logging.info(f"admin_client={admin_client}")
        if admin_client == None:
            return "admin_client is none", RespCode.ARGS_ERROR
        topic_list = []
        topic_list.append(topic)

        res = admin_client.delete_topics(topic_list)
        admin_client.close()
        # logging.info(f"NewTopic res={res}")
        with MsgCount.get_lock():
            MsgCount.value = 0
        return ""
    except KafkaError as ke:
        admin_client.close()
        # logging.info(f"except ={dir(ke)}")
        # logging.info(f"except ={ke}")
        # logging.info(f"except ke.errno={ke.errno}")
        # logging.info(f"except ke.message={ke.message}")
        return f"{ke.errno}-{ke.message}", RespCode.ARGS_ERROR
    except Exception as e:
        admin_client.close()
        logging.info(f"exception ={e}")
        # logging.info(f"exception ={dir(e)}")
        return "exception ", RespCode.ARGS_ERROR
Exemple #24
0
def __createTopic(topic):
    try:
        admin_client = KafkaAdminClient(
            bootstrap_servers=current_app.config["KFKA_URL"], )
        # logging.info(f"admin_client={admin_client}")
        if admin_client == None:
            return "admin_client is none", RespCode.ARGS_ERROR
        topic_list = []
        topic_list.append(
            NewTopic(name=topic, num_partitions=3, replication_factor=2))

        res = admin_client.create_topics(new_topics=topic_list,
                                         validate_only=False)
        # logging.info(f"NewTopic res={res}")
        admin_client.close()
        return ""
    except KafkaError as ke:
        # logging.info(f"except ={dir(ke)}")
        # logging.info(f"except ={ke}")
        # logging.info(f"except ke.errno={ke.errno}")
        # logging.info(f"except ke.message={ke.message}")
        admin_client.close()
        return f"{ke.errno}-{ke.message}", RespCode.ARGS_ERROR

    except Exception as e:
        admin_client.close()
        return "exception ", RespCode.ARGS_ERROR
Exemple #25
0
    def post(self, topicName):
        """
    Create New Topic.

    """
        partitions = request.json['partitions']
        replicas = request.json['replicas']
        app.logger.info(
            "Request to create topic witn name {0} and {1} partitions and {2} replicas."
            .format(topicName, partitions, replicas))
        try:

            admin = KafkaAdminClient(
                bootstrap_servers=config['cluster.broker.listeners'],
                security_protocol=config['cluster.security.protocol'],
                ssl_cafile=config['cluster.ssl.cafile'],
                ssl_certfile=config['cluster.ssl.certfile'],
                ssl_keyfile=config['cluster.ssl.keyfile'])
            newTopic = NewTopic(topicName, partitions, replicas)
            result = admin.create_topics([newTopic])

        except TopicAlreadyExistsError as e:
            api.abort(400, e.description)
        except IllegalArgumentError as e:
            api.abort(400, e.description)
        except InvalidReplicationFactorError as e:
            api.abort(400, e.description)
        except Exception as e:
            api.abort(500, str(e.args))
        finally:
            admin.close()

        app.logger.debug(result)

        if result.topic_errors[0][1] == 0:
            return {"created": topicName}
        else:
            api.abort(400, "Bad Request(Topic Creation Failed)")
Exemple #26
0
    def delete(self, topicName):
        """
    Delete Topic ACL.

    """
        args = acl_detail_parse.parse_args()
        auser = args['user']
        atype = args['type']
        acl_user = "******" + auser
        app.logger.info(
            "Request to delete ACL for topic {0} for user {1} and access type {2}."
            .format(topicName, auser, atype))
        try:

            admin = KafkaAdminClient(
                bootstrap_servers=config['cluster.broker.listeners'],
                security_protocol=config['cluster.security.protocol'],
                ssl_cafile=config['cluster.ssl.cafile'],
                ssl_certfile=config['cluster.ssl.certfile'],
                ssl_keyfile=config['cluster.ssl.keyfile'])
            results = admin.delete_acls([
                ACLFilter(principal=acl_user,
                          host="*",
                          operation=ACLOperation.ANY,
                          permission_type=ACLPermissionType.ANY,
                          resource_pattern=ResourcePattern(
                              ResourceType.TOPIC, topicName))
            ])

        except Exception as e:
            ns_acl.abort(500, str(e.args))
        finally:
            admin.close()

        if len(results[0][1]) > 0:
            return {"delete": "sucess"}
        else:
            ns_acl.abort(500, "Internal Error(Cannot delete any ACL)")
Exemple #27
0
class KafkaSchemaReader(Thread):
    def __init__(self, config, master_coordinator=None):
        Thread.__init__(self)
        self.master_coordinator = master_coordinator
        self.log = logging.getLogger("KafkaSchemaReader")
        self.timeout_ms = 200
        self.config = config
        self.subjects = {}
        self.schemas: Dict[int, TypedSchema] = {}
        self.global_schema_id = 0
        self.offset = 0
        self.admin_client = None
        self.schema_topic = None
        self.topic_replication_factor = self.config["replication_factor"]
        self.consumer = None
        self.queue = Queue()
        self.ready = False
        self.running = True
        self.id_lock = Lock()
        sentry_config = config.get("sentry", {"dsn": None}).copy()
        if "tags" not in sentry_config:
            sentry_config["tags"] = {}
        self.stats = StatsClient(sentry_config=sentry_config)

    def init_consumer(self):
        # Group not set on purpose, all consumers read the same data
        session_timeout_ms = self.config["session_timeout_ms"]
        request_timeout_ms = max(
            session_timeout_ms,
            KafkaConsumer.DEFAULT_CONFIG["request_timeout_ms"])
        self.consumer = KafkaConsumer(
            self.config["topic_name"],
            enable_auto_commit=False,
            api_version=(1, 0, 0),
            bootstrap_servers=self.config["bootstrap_uri"],
            client_id=self.config["client_id"],
            security_protocol=self.config["security_protocol"],
            ssl_cafile=self.config["ssl_cafile"],
            ssl_certfile=self.config["ssl_certfile"],
            ssl_keyfile=self.config["ssl_keyfile"],
            auto_offset_reset="earliest",
            session_timeout_ms=session_timeout_ms,
            request_timeout_ms=request_timeout_ms,
            kafka_client=KarapaceKafkaClient,
            metadata_max_age_ms=self.config["metadata_max_age_ms"],
        )

    def init_admin_client(self):
        try:
            self.admin_client = KafkaAdminClient(
                api_version_auto_timeout_ms=constants.
                API_VERSION_AUTO_TIMEOUT_MS,
                bootstrap_servers=self.config["bootstrap_uri"],
                client_id=self.config["client_id"],
                security_protocol=self.config["security_protocol"],
                ssl_cafile=self.config["ssl_cafile"],
                ssl_certfile=self.config["ssl_certfile"],
                ssl_keyfile=self.config["ssl_keyfile"],
            )
            return True
        except (NodeNotReadyError, NoBrokersAvailable, AssertionError):
            self.log.warning(
                "No Brokers available yet, retrying init_admin_client()")
            time.sleep(2.0)
        except:  # pylint: disable=bare-except
            self.log.exception(
                "Failed to initialize admin client, retrying init_admin_client()"
            )
            time.sleep(2.0)
        return False

    @staticmethod
    def get_new_schema_topic(config):
        return NewTopic(name=config["topic_name"],
                        num_partitions=constants.SCHEMA_TOPIC_NUM_PARTITIONS,
                        replication_factor=config["replication_factor"],
                        topic_configs={"cleanup.policy": "compact"})

    def create_schema_topic(self):
        schema_topic = self.get_new_schema_topic(self.config)
        try:
            self.log.info("Creating topic: %r", schema_topic)
            self.admin_client.create_topics(
                [schema_topic], timeout_ms=constants.TOPIC_CREATION_TIMEOUT_MS)
            self.log.info("Topic: %r created successfully",
                          self.config["topic_name"])
            self.schema_topic = schema_topic
            return True
        except TopicAlreadyExistsError:
            self.log.warning("Topic: %r already exists",
                             self.config["topic_name"])
            self.schema_topic = schema_topic
            return True
        except:  # pylint: disable=bare-except
            self.log.exception(
                "Failed to create topic: %r, retrying create_schema_topic()",
                self.config["topic_name"])
            time.sleep(5)
        return False

    def get_schema_id(self, new_schema):
        with self.id_lock:
            schemas = self.schemas.items()
        for schema_id, schema in schemas:
            if schema == new_schema:
                return schema_id
        with self.id_lock:
            self.global_schema_id += 1
            return self.global_schema_id

    def close(self):
        self.log.info("Closing schema_reader")
        self.running = False

    def run(self):
        while self.running:
            try:
                if not self.admin_client:
                    if self.init_admin_client() is False:
                        continue
                if not self.schema_topic:
                    if self.create_schema_topic() is False:
                        continue
                if not self.consumer:
                    self.init_consumer()
                self.handle_messages()
            except Exception as e:  # pylint: disable=broad-except
                if self.stats:
                    self.stats.unexpected_exception(ex=e,
                                                    where="schema_reader_loop")
                self.log.exception(
                    "Unexpected exception in schema reader loop")
        try:
            if self.admin_client:
                self.admin_client.close()
            if self.consumer:
                self.consumer.close()
        except Exception as e:  # pylint: disable=broad-except
            if self.stats:
                self.stats.unexpected_exception(ex=e,
                                                where="schema_reader_exit")
            self.log.exception("Unexpected exception closing schema reader")

    def handle_messages(self):
        raw_msgs = self.consumer.poll(timeout_ms=self.timeout_ms)
        if self.ready is False and raw_msgs == {}:
            self.ready = True
        add_offsets = False
        if self.master_coordinator is not None:
            master, _ = self.master_coordinator.get_master_info()
            # keep old behavior for True. When master is False, then we are a follower, so we should not accept direct
            # writes anyway. When master is None, then this particular node is waiting for a stable value, so any
            # messages off the topic are writes performed by another node
            if master is True:
                add_offsets = True

        for _, msgs in raw_msgs.items():
            for msg in msgs:
                try:
                    key = json.loads(msg.key.decode("utf8"))
                except json.JSONDecodeError:
                    self.log.exception(
                        "Invalid JSON in msg.key: %r, value: %r", msg.key,
                        msg.value)
                    continue

                value = None
                if msg.value:
                    try:
                        value = json.loads(msg.value.decode("utf8"))
                    except json.JSONDecodeError:
                        self.log.exception(
                            "Invalid JSON in msg.value: %r, key: %r",
                            msg.value, msg.key)
                        continue

                self.log.info(
                    "Read new record: key: %r, value: %r, offset: %r", key,
                    value, msg.offset)
                self.handle_msg(key, value)
                self.offset = msg.offset
                self.log.info("Handled message, current offset: %r",
                              self.offset)
                if self.ready and add_offsets:
                    self.queue.put(self.offset)

    def handle_msg(self, key: dict, value: dict):
        if key["keytype"] == "CONFIG":
            if "subject" in key and key["subject"] is not None:
                if not value:
                    self.log.info(
                        "Deleting compatibility config completely for subject: %r",
                        key["subject"])
                    self.subjects[key["subject"]].pop("compatibility", None)
                    return
                self.log.info("Setting subject: %r config to: %r, value: %r",
                              key["subject"], value["compatibilityLevel"],
                              value)
                if not key["subject"] in self.subjects:
                    self.log.info(
                        "Adding first version of subject: %r with no schemas",
                        key["subject"])
                    self.subjects[key["subject"]] = {"schemas": {}}
                subject_data = self.subjects.get(key["subject"])
                subject_data["compatibility"] = value["compatibilityLevel"]
            else:
                self.log.info("Setting global config to: %r, value: %r",
                              value["compatibilityLevel"], value)
                self.config["compatibility"] = value["compatibilityLevel"]
        elif key["keytype"] == "SCHEMA":
            if not value:
                subject, version = key["subject"], key["version"]
                self.log.info("Deleting subject: %r version: %r completely",
                              subject, version)
                if subject not in self.subjects:
                    self.log.error("Subject %s did not exist, should have",
                                   subject)
                elif version not in self.subjects[subject]["schemas"]:
                    self.log.error(
                        "Version %d for subject %s did not exist, should have",
                        version, subject)
                else:
                    self.subjects[subject]["schemas"].pop(version, None)
                return
            schema_type = value.get("schemaType", "AVRO")
            schema_str = value["schema"]
            try:
                typed_schema = TypedSchema.parse(
                    schema_type=SchemaType(schema_type), schema_str=schema_str)
            except InvalidSchema:
                try:
                    schema_json = json.loads(schema_str)
                    typed_schema = TypedSchema(
                        schema_type=SchemaType(schema_type),
                        schema=schema_json,
                        schema_str=schema_str)
                except JSONDecodeError:
                    self.log.error("Invalid json: %s", value["schema"])
                    return
            self.log.debug("Got typed schema %r", typed_schema)
            subject = value["subject"]
            if subject not in self.subjects:
                self.log.info("Adding first version of subject: %r, value: %r",
                              subject, value)
                self.subjects[subject] = {
                    "schemas": {
                        value["version"]: {
                            "schema": typed_schema,
                            "version": value["version"],
                            "id": value["id"],
                            "deleted": value.get("deleted", False),
                        }
                    }
                }
                self.log.info("Setting schema_id: %r with schema: %r",
                              value["id"], typed_schema)
                self.schemas[value["id"]] = typed_schema
                if value["id"] > self.global_schema_id:  # Not an existing schema
                    self.global_schema_id = value["id"]
            elif value.get("deleted", False) is True:
                self.log.info("Deleting subject: %r, version: %r", subject,
                              value["version"])
                if not value["version"] in self.subjects[subject]["schemas"]:
                    self.schemas[value["id"]] = typed_schema
                else:
                    self.subjects[subject]["schemas"][
                        value["version"]]["deleted"] = True
            elif value.get("deleted", False) is False:
                self.log.info("Adding new version of subject: %r, value: %r",
                              subject, value)
                self.subjects[subject]["schemas"][value["version"]] = {
                    "schema": typed_schema,
                    "version": value["version"],
                    "id": value["id"],
                    "deleted": value.get("deleted", False),
                }
                self.log.info("Setting schema_id: %r with schema: %r",
                              value["id"], value["schema"])
                with self.id_lock:
                    self.schemas[value["id"]] = typed_schema
                if value["id"] > self.global_schema_id:  # Not an existing schema
                    self.global_schema_id = value["id"]
        elif key["keytype"] == "DELETE_SUBJECT":
            self.log.info("Deleting subject: %r, value: %r", value["subject"],
                          value)
            if not value["subject"] in self.subjects:
                self.log.error("Subject: %r did not exist, should have",
                               value["subject"])
            else:
                updated_schemas = {
                    key:
                    self._delete_schema_below_version(schema, value["version"])
                    for key, schema in self.subjects[
                        value["subject"]]["schemas"].items()
                }
                self.subjects[value["subject"]]["schemas"] = updated_schemas
        elif key["keytype"] == "NOOP":  # for spec completeness
            pass

    @staticmethod
    def _delete_schema_below_version(schema, version):
        if schema["version"] <= version:
            schema["deleted"] = True
        return schema

    def get_schemas(self, subject):
        non_deleted_schemas = {
            key: val
            for key, val in self.subjects[subject]["schemas"].items()
            if val.get("deleted", False) is False
        }
        return non_deleted_schemas
Exemple #28
0
class SchemaBackup:
    def __init__(self, config_path, backup_path, topic_option=None):
        self.config = KarapaceBase.read_config(config_path)
        self.backup_location = backup_path
        self.topic_name = topic_option or self.config["topic_name"]
        self.log = logging.getLogger("SchemaBackup")
        self.consumer = None
        self.producer = None
        self.admin_client = None
        self.timeout_ms = 1000

    def init_consumer(self):
        self.consumer = KafkaConsumer(
            self.topic_name,
            enable_auto_commit=False,
            bootstrap_servers=self.config["bootstrap_uri"],
            client_id=self.config["client_id"],
            security_protocol=self.config["security_protocol"],
            ssl_cafile=self.config["ssl_cafile"],
            ssl_certfile=self.config["ssl_certfile"],
            ssl_keyfile=self.config["ssl_keyfile"],
            auto_offset_reset="earliest",
            metadata_max_age_ms=self.config["metadata_max_age_ms"],
            client_factory=KarapaceKafkaClient,
        )

    def init_producer(self):
        self.producer = KafkaProducer(
            bootstrap_servers=self.config["bootstrap_uri"],
            security_protocol=self.config["security_protocol"],
            ssl_cafile=self.config["ssl_cafile"],
            ssl_certfile=self.config["ssl_certfile"],
            ssl_keyfile=self.config["ssl_keyfile"],
            client_factory=KarapaceKafkaClient,
        )

    def init_admin_client(self):
        start_time = time.monotonic()
        wait_time = constants.MINUTE
        while True:
            if time.monotonic() - start_time > wait_time:
                raise Timeout(
                    f"Timeout ({wait_time}) on creating admin client")

            try:
                self.admin_client = KafkaAdminClient(
                    api_version_auto_timeout_ms=constants.
                    API_VERSION_AUTO_TIMEOUT_MS,
                    bootstrap_servers=self.config["bootstrap_uri"],
                    client_id=self.config["client_id"],
                    security_protocol=self.config["security_protocol"],
                    ssl_cafile=self.config["ssl_cafile"],
                    ssl_certfile=self.config["ssl_certfile"],
                    ssl_keyfile=self.config["ssl_keyfile"],
                    client_factory=KarapaceKafkaClient,
                )
                break
            except (NodeNotReadyError, NoBrokersAvailable, AssertionError):
                self.log.warning(
                    "No Brokers available yet, retrying init_admin_client()")
            except:  # pylint: disable=bare-except
                self.log.exception(
                    "Failed to initialize admin client, retrying init_admin_client()"
                )

            time.sleep(2.0)

    def _create_schema_topic_if_needed(self):
        if self.topic_name != self.config["topic_name"]:
            self.log.info(
                "Topic name overridden, not creating a topic with schema configuration"
            )
            return

        self.init_admin_client()

        start_time = time.monotonic()
        wait_time = constants.MINUTE
        while True:
            if time.monotonic() - start_time > wait_time:
                raise Timeout(
                    f"Timeout ({wait_time}) on creating admin client")

            schema_topic = KafkaSchemaReader.get_new_schema_topic(self.config)
            try:
                self.log.info("Creating schema topic: %r", schema_topic)
                self.admin_client.create_topics(
                    [schema_topic],
                    timeout_ms=constants.TOPIC_CREATION_TIMEOUT_MS)
                self.log.info("Topic: %r created successfully",
                              self.config["topic_name"])
                break
            except TopicAlreadyExistsError:
                self.log.info("Topic: %r already exists",
                              self.config["topic_name"])
                break
            except:  # pylint: disable=bare-except
                self.log.exception(
                    "Failed to create topic: %r, retrying _create_schema_topic_if_needed()",
                    self.config["topic_name"])
                time.sleep(5)

    def close(self):
        self.log.info("Closing schema backup reader")
        if self.consumer:
            self.consumer.close()
            self.consumer = None
        if self.producer:
            self.producer.close()
            self.producer = None
        if self.admin_client:
            self.admin_client.close()
            self.admin_client = None

    def request_backup(self):
        if not self.consumer:
            self.init_consumer()
        self.log.info("Starting schema backup read for topic: %r",
                      self.topic_name)
        values = []
        raw_msg = self.consumer.poll(timeout_ms=self.timeout_ms)
        for _, messages in raw_msg.items():
            for message in messages:
                key = message.key.decode("utf8")
                try:
                    key = json.loads(key)
                except json.JSONDecodeError:
                    self.log.debug(
                        "Invalid JSON in message.key: %r, value: %r",
                        message.key, message.value)
                value = None
                if message.value:
                    value = message.value.decode("utf8")
                    try:
                        value = json.loads(value)
                    except json.JSONDecodeError:
                        self.log.debug(
                            "Invalid JSON in message.value: %r, key: %r",
                            message.value, message.key)
                values.append((key, value))
        ser = json.dumps(values)
        if self.backup_location:
            with open(self.backup_location, "w") as fp:
                fp.write(ser)
                self.log.info("Schema backup written to %r",
                              self.backup_location)
        else:
            print(ser)
            self.log.info("Schema backup written to stdout")
        self.close()

    def restore_backup(self):
        if not os.path.exists(self.backup_location):
            raise BackupError("Backup location doesn't exist")

        self._create_schema_topic_if_needed()

        if not self.producer:
            self.init_producer()
        self.log.info("Starting backup restore for topic: %r", self.topic_name)

        values = None
        with open(self.backup_location, "r") as fp:
            raw_msg = fp.read()
            values = json.loads(raw_msg)
        if not values:
            raise BackupError("Nothing to restore in %s" %
                              self.backup_location)

        for item in values:
            key = encode_value(item[0])
            value = encode_value(item[1])
            future = self.producer.send(self.topic_name, key=key, value=value)
            self.producer.flush(timeout=self.timeout_ms)
            msg = future.get(self.timeout_ms)
            self.log.debug("Sent kafka msg key: %r, value: %r, offset: %r",
                           key, value, msg.offset)
        self.close()
Exemple #29
0
class Admin(object):
    def __init__(self, *args, **kwargs):
        self.args = args
        self.kwargs = kwargs
        self.kwargs['bootstrap_servers'] = self.kwargs.get(
            'bootstrap_servers', ['localhost:9092'])
        self._client = None
        self.connect()

    def __enter__(self):
        return self

    def __exit__(self, errortype, value, traceback):
        self.close()

    def _connect(self):
        """Connect to kafka admin client"""
        try:
            self._client = KafkaAdminClient(**self.kwargs)
            logger.info('Connected to Kafka Admin client!')
        except Exception as ex:
            logger.error('Unable to connect to Kafka Admin client. %s',
                         str(ex))

    def connect(self):
        """Connect to kafka admin"""
        if self._client is None:
            self._connect()

    def create_topics(self, topics: List[dict]):
        """Create topics in kafka"""
        topics_to_create = []
        for topic in topics:
            _topic = NewTopic(name=topic['name'],
                              num_partitions=topic.get('num_partitions', 1),
                              replication_factor=topic.get(
                                  'replication_factor', 1))
            topics_to_create.append(_topic)
        try:
            self._client.create_topics(new_topics=topics_to_create,
                                       validate_only=False)
            logger.info('Topic {%s} creates successfully', topics)
        except TopicAlreadyExistsError as ex:
            logger.debug('Skipping to create topic as topic already exists %s',
                         str(ex))
        except Exception as ex:
            logger.error('Unable to create topics. %s', str(ex))

    def delete_topics(self, topics: List[str]):
        """Delete topics"""
        try:
            self._client.delete_topics(topics)
            logger.info('Delete topics: {%s}', topics)
        except UnknownTopicOrPartitionError as ex:
            logger.debug('Skipping topic delete as topic dos not exists %s',
                         str(ex))
        except Exception as ex:
            logger.error('Unable to delete topics. %s', str(ex))

    def close(self):
        if self._client is None:
            return

        try:
            self._client.close()
            logger.info('Closed Kafka Admin client connection')
        except Exception as ex:
            logger.error('Unable to close Kafka Admin client cnnection. %s',
                         str(ex))
from kafka.admin import KafkaAdminClient
from kafka import KafkaConsumer
from config import *

delete_topic = "mytopic"
admin_client = KafkaAdminClient(bootstrap_servers=BOOTSTRAP_SERVERS)
consumer = KafkaConsumer(bootstrap_servers=[BOOTSTRAP_SERVERS])

if delete_topic in consumer.topics():
    admin_client.delete_topics(topics=[delete_topic])

    if delete_topic not in consumer.topics():
        print(f"Topic: {delete_topic} deleted successfully !!")
    else:
        print(f"Unable to delete topic: {delete_topic}")

else:
    print(f"Topic: {delete_topic} not found !!")

consumer.close()
admin_client.close()