Пример #1
0
def test_paused(kafka_broker, topic):
    consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
    topics = [TopicPartition(topic, 1)]
    consumer.assign(topics)
    assert set(topics) == consumer.assignment()
    assert set() == consumer.paused()

    consumer.pause(topics[0])
    assert set([topics[0]]) == consumer.paused()

    consumer.resume(topics[0])
    assert set() == consumer.paused()

    consumer.unsubscribe()
    assert set() == consumer.paused()
Пример #2
0
def test_paused(kafka_broker, topic):
    consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
    topics = [TopicPartition(topic, 1)]
    consumer.assign(topics)
    assert set(topics) == consumer.assignment()
    assert set() == consumer.paused()

    consumer.pause(topics[0])
    assert set([topics[0]]) == consumer.paused()

    consumer.resume(topics[0])
    assert set() == consumer.paused()

    consumer.unsubscribe()
    assert set() == consumer.paused()
Пример #3
0
class GroupConsumer(threading.Thread):
        
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
     
    def __init__(self, threadId, name, sKey, bootstrapServers, groupId, permissions, token, cipher = None, uid = None):
        threading.Thread.__init__(self)
        self.threadId = threadId
        self.name = name
        
        self.group = groupId;
        self.subKey = sKey;
        
        self.cipher = None if cipher is None else cipher;
                
        configs = Configs.consumerConfigs();
        configs["bootstrap_servers"] = bootstrapServers.split(',');
        configs["group_id"] = groupId;
        configs['enable_auto_commit'] = False;
        
        self.__isAlive = True
        
        home = expanduser("~")
        
        if uid == None:
            self.__consumer = KafkaConsumer(
                bootstrap_servers = configs["bootstrap_servers"],
                check_crcs = False,
                exclude_internal_topics = True,
                session_timeout_ms = 10000,
                reconnect_backoff_ms = 10000,
                heartbeat_interval_ms = 2000,
                retry_backoff_ms = 500,
                fetch_min_bytes = 64,
                fetch_max_wait_ms = 96,           
                enable_auto_commit = False,
                max_in_flight_requests_per_connection = 4,
                api_version = (0, 10),            
                group_id = groupId);
        else:
            self.__consumer = KafkaConsumer(
                bootstrap_servers = configs["bootstrap_servers"],
                check_crcs = False,
                exclude_internal_topics = True,
                session_timeout_ms = 10000,
                reconnect_backoff_ms = 10000,
                heartbeat_interval_ms = 2000,
                retry_backoff_ms = 500,
                fetch_min_bytes = 64,
                fetch_max_wait_ms = 96,           
                enable_auto_commit = False,
                max_in_flight_requests_per_connection = 4,
                security_protocol = 'SSL',
                ssl_check_hostname = False,
                ssl_keyfile = home + '/magistral/' + token + '/key.pem',
                ssl_cafile = home + '/magistral/' + token + '/ca.pem',
                ssl_certfile = home + '/magistral/' + token + '/certificate.pem',
                api_version = (0, 10),
                group_id = groupId);
        
        self.permissions = permissions;
        self.map = {}
        
        self.__offsets = {}
        
        
    def recordsTotally(self, data):
        size = 0;
        for val in list(data.values()): 
            if len(val) > 0: size = size + len(val);
                               
        return size;
    
    def consumerRecord2Message(self, record):                    
        payload = record[6]
                                        
        if self.cipher is not None:
            try:
                payload = self.cipher.decrypt(payload)
            except:
                pass
                                        
        msg = Message(record[0][41:], record[1], payload, record[2], record[3])
        return msg

    def run(self):
        
        threadLock.acquire(False)
        
        while self.__isAlive:
            try:

                data = self.__consumer.poll(512);
                for values in list(data.values()):
                                         
                    for value in values:
                        msg = self.consumerRecord2Message(value);
                        listener = self.map[value[0]][msg.channel()];
                        if listener is not None: listener(msg);
                        
                if len(list(data.values())) > 0: self.__consumer.commit_async(); 
                    
            except:
                pass
              
        threadLock.release()

#   ////////////////////////////////////////////////////////////////////////////////////    

    def subscribe(self, topic, channel = -1, listener = None, callback = None):
        
        assert channel is not None and isinstance(channel, int), "Channel expected as int argument"        
        if (channel < -1): channel = -1;
                
        etopic = self.subKey + "." + topic;
                           
        self.logger.debug("Subscribe -> %s : %s | key = %s", topic, channel, self.subKey);
        
        if (self.permissions == None or len(self.permissions) == 0): 
            raise MagistralException("User has no permissions for topic [" + topic + "].");
        
        self.fch = [];
        
        for meta in self.permissions:             
            if (meta.topic() != topic): continue;  
            
            if channel == -1:
                self.fch = meta.channels();
            elif channel in meta.channels():                          
                self.fch = [ channel ];  
        
        if (len(self.fch) == 0): 
            npgex = "No permissions for topic [" + topic + "] granted";
            self.logger.error(npgex);                                
            raise MagistralException(npgex);
        
        if (self.map == None or etopic not in self.map): 
            self.map[etopic] = {}
        
#         // Assign Topic-partition pairs to listen
        
        tpas = [];
        for ch in self.fch:            
            tpas.append(TopicPartition(etopic, ch));
            if (listener is not None): self.map[etopic][ch] = listener
            
            ca = self.__consumer.assignment()
            if (ca is not None):
                for tp in ca: tpas.append(tp)
        
        self.__consumer.assign(tpas);       
                
        if callback is not None: 
            callback(self.__consumer.assignment());
            
        return self.__consumer.assignment();
        
        
    def unsubscribe(self, topic):
        self.consumer.assign([]);
        self.map.remove(topic);        

    def close(self):
        self.__isAlive = False
        self.__consumer.pause()
        self.__consumer.close()
        
    logging.getLogger('kafka.conn').setLevel(logging.FATAL)
    logging.getLogger('kafka.cluster').setLevel(logging.FATAL)
    logging.getLogger('kafka.consumer.group').setLevel(logging.INFO)    
    logging.getLogger('kafka.consumer.fetcher').setLevel(logging.INFO)
    logging.getLogger('kafka.coordinator.consumer').setLevel(logging.INFO)
    logging.getLogger('kafka.producer.record_accumulator').setLevel(logging.INFO)
class MagistralConsumer(object):
    
    __HISTORY_DATA_FETCH_SIZE_LIMIT = 10000;

    def __init__(self, pubKey, subKey, secretKey, bootstrap, token, cipher = None, uid = None):
        self.__pubKey = pubKey
        self.__subKey = subKey
        self.__secretKey = secretKey
        
        self.__token = token
        
        self.uid = uid
                
        self.__bootstrap = bootstrap.split(',')
        if cipher is not None: self.__cipher = cipher
    
    def history(self, topic, channel, records):
        
        messages = []
        
        if self.uid == None:
            self.consumer = KafkaConsumer(bootstrap_servers = self.__bootstrap,
                    check_crcs = False,
                    exclude_internal_topics = True,
                    session_timeout_ms = 10000,
                    reconnect_backoff_ms = 10000,
                    heartbeat_interval_ms = 2000,
                    retry_backoff_ms = 500,
                    fetch_min_bytes = 64,
                    fetch_max_wait_ms = 96,           
                    enable_auto_commit = False,
                    max_in_flight_requests_per_connection = 4,
                    api_version = (0, 10));
        else:            
            home = expanduser("~")
            
            self.consumer = KafkaConsumer(bootstrap_servers = self.__bootstrap,
                    check_crcs = False,
                    exclude_internal_topics = True,
                    session_timeout_ms = 10000,
                    reconnect_backoff_ms = 10000,
                    heartbeat_interval_ms = 2000,
                    retry_backoff_ms = 500,
                    fetch_min_bytes = 64,
                    fetch_max_wait_ms = 96,           
                    enable_auto_commit = False,
                    max_in_flight_requests_per_connection = 4,
                    security_protocol = 'SSL',
                    ssl_check_hostname = False,
                    ssl_keyfile = home + '/magistral/' + self.__token + '/key.pem',
                    ssl_cafile = home + '/magistral/' + self.__token + '/ca.pem',
                    ssl_certfile = home + '/magistral/' + self.__token + '/certificate.pem',
                    api_version = (0, 10));
            
        if (records > self.__HISTORY_DATA_FETCH_SIZE_LIMIT): records = self.__HISTORY_DATA_FETCH_SIZE_LIMIT;
            
        kfkTopic = self.__subKey + "." + topic;
        x = TopicPartition(kfkTopic, channel);
        
        self.consumer.assign([x]);
        self.consumer.seek_to_end();        
        last = self.consumer.position(x);
        
        pos = last - records if last > records else 0;
        self.consumer.seek(x, pos);
        
        data = self.consumer.poll(256);   
        
        endIsNotReached = True;
        while endIsNotReached:
            
            if len(data.values()) == 0:
                return messages;
            
            records = list(data.values())
            
            for record in records[0]:
                index = record[2];
                if index >= last - 1: endIsNotReached = False;
                
                message = Message(record[0][41:], record[1], record[6], index, record[3]);
                messages.append(message);
            
            if endIsNotReached == False: 
                self.consumer.close();
                return messages;
            
            pos = pos + len(messages)
            self.consumer.seek(x, pos);
            data = self.consumer.poll(256);
            
        self.consumer.close();
        
        return messages;
    
    def historyForTimePeriod(self, topic, channel, start, end, limit = -1):
        
        out = []        
        
        try:
            kfkTopic = self.__subKey + "." + topic;
            x = TopicPartition(kfkTopic, channel);
            
            if self.uid == None:
                self.consumer = KafkaConsumer(bootstrap_servers = self.__bootstrap,
                        check_crcs = False,
                        exclude_internal_topics = True,
                        session_timeout_ms = 30000,
                        reconnect_backoff_ms = 10000,
                        heartbeat_interval_ms = 2000,
                        retry_backoff_ms = 500,
                        fetch_min_bytes = 32,
                        fetch_max_wait_ms = 96,           
                        enable_auto_commit = False,
                        max_partition_fetch_bytes = 65536,
                        max_in_flight_requests_per_connection = 4,
                        api_version = (0, 10));
            else:            
                home = expanduser("~")
                
                self.consumer = KafkaConsumer(bootstrap_servers = self.__bootstrap,
                        check_crcs = False,
                        exclude_internal_topics = True,
                        session_timeout_ms = 30000,
                        reconnect_backoff_ms = 10000,
                        heartbeat_interval_ms = 2000,
                        retry_backoff_ms = 500,
                        fetch_min_bytes = 32,
                        fetch_max_wait_ms = 96,           
                        enable_auto_commit = False,
                        max_partition_fetch_bytes = 65536,
                        max_in_flight_requests_per_connection = 4,
                        security_protocol = 'SSL',
                        ssl_check_hostname = False,
                        ssl_keyfile = home + '/magistral/' + self.__token + '/key.pem',
                        ssl_cafile = home + '/magistral/' + self.__token + '/ca.pem',
                        ssl_certfile = home + '/magistral/' + self.__token + '/certificate.pem',
                        api_version = (0, 10));
                        
            self.consumer = KafkaConsumer(bootstrap_servers = self.__bootstrap);
            self.consumer.assign([x]);
            
            self.consumer.seek_to_end();        
            last = self.consumer.position(x);
        
            position = last - 1000;
            
            found = False;
            while found == False:
                self.consumer.seek(x, position);
                data = self.consumer.poll(500);
                 
                if x not in data.keys() or len(data[x]) == 0: break;
                
                record = data[x][0];
                timestamp = record[3];
 
                if timestamp < start: 
                    found = True;
                    break;
                 
                position = position - 1000;
             
            self.consumer.close();
                       
            if self.uid == None:
                self.с = KafkaConsumer(bootstrap_servers = self.__bootstrap,
                        check_crcs = False,
                        exclude_internal_topics = True,
                        session_timeout_ms = 10000,
                        reconnect_backoff_ms = 10000,
                        heartbeat_interval_ms = 2000,
                        retry_backoff_ms = 500,
                        fetch_min_bytes = 32,
                        fetch_max_wait_ms = 96,           
                        enable_auto_commit = False,
                        max_partition_fetch_bytes = 65536,
                        max_in_flight_requests_per_connection = 4,
                        api_version = (0, 10));
            else:            
                home = expanduser("~")
                
                self.с = KafkaConsumer(bootstrap_servers = self.__bootstrap,
                        check_crcs = False,
                        exclude_internal_topics = True,
                        session_timeout_ms = 10000,
                        reconnect_backoff_ms = 10000,
                        heartbeat_interval_ms = 2000,
                        retry_backoff_ms = 500,
                        fetch_min_bytes = 32,
                        fetch_max_wait_ms = 96,           
                        enable_auto_commit = False,
                        max_partition_fetch_bytes = 65536,
                        max_in_flight_requests_per_connection = 4,
                        security_protocol = 'SSL',
                        ssl_check_hostname = False,
                        ssl_keyfile = home + '/magistral/' + self.__token + '/key.pem',
                        ssl_cafile = home + '/magistral/' + self.__token + '/ca.pem',
                        ssl_certfile = home + '/magistral/' + self.__token + '/certificate.pem',
                        api_version = (0, 10));  
                      
            self.с.assign([x]);
                       
            self.c.seek(x, position);                        
            data = self.с.poll(256);
            
            while (x in data.keys() and len(data[x]) > 0):
                
                for record in data[x] :
                    timestamp = record[3];
                    if timestamp < start: continue;
                    
                    index = record[2];
                    
                    if timestamp > end or index >= last - 1:  
                        self.с.close();                 
                        return out;
                                    
                    message = Message(record[0][41:], record[1], record[6], index, timestamp);
                    out.append(message); 
                    
                    if limit is not None and limit > 0 and len(out) >= limit:
                        self.с.close();                 
                        return out;                 
                    
                self.с.seek(x, position + len(data[x]));                        
                data = self.с.poll(256);
            
            return out;
        
        except:
            
            raise MagistralException("Exception during history invocation occurred");
Пример #5
0
class MagistralConsumer(object):

    __HISTORY_DATA_FETCH_SIZE_LIMIT = 10000

    def __init__(self,
                 pubKey,
                 subKey,
                 secretKey,
                 bootstrap,
                 token,
                 cipher=None,
                 uid=None):
        self.__pubKey = pubKey
        self.__subKey = subKey
        self.__secretKey = secretKey

        self.__token = token

        self.uid = uid

        self.__bootstrap = bootstrap.split(',')
        if cipher is not None: self.__cipher = cipher

    def history(self, topic, channel, records):

        messages = []

        if self.uid == None:
            self.consumer = KafkaConsumer(
                bootstrap_servers=self.__bootstrap,
                check_crcs=False,
                exclude_internal_topics=True,
                session_timeout_ms=10000,
                reconnect_backoff_ms=10000,
                heartbeat_interval_ms=2000,
                retry_backoff_ms=500,
                fetch_min_bytes=64,
                fetch_max_wait_ms=96,
                enable_auto_commit=False,
                max_in_flight_requests_per_connection=4,
                api_version=(0, 10))
        else:
            home = expanduser("~")

            self.consumer = KafkaConsumer(
                bootstrap_servers=self.__bootstrap,
                check_crcs=False,
                exclude_internal_topics=True,
                session_timeout_ms=10000,
                reconnect_backoff_ms=10000,
                heartbeat_interval_ms=2000,
                retry_backoff_ms=500,
                fetch_min_bytes=64,
                fetch_max_wait_ms=96,
                enable_auto_commit=False,
                max_in_flight_requests_per_connection=4,
                security_protocol='SSL',
                ssl_check_hostname=False,
                ssl_keyfile=home + '/magistral/' + self.__token + '/key.pem',
                ssl_cafile=home + '/magistral/' + self.__token + '/ca.pem',
                ssl_certfile=home + '/magistral/' + self.__token +
                '/certificate.pem',
                api_version=(0, 10))

        if (records > self.__HISTORY_DATA_FETCH_SIZE_LIMIT):
            records = self.__HISTORY_DATA_FETCH_SIZE_LIMIT

        kfkTopic = self.__subKey + "." + topic
        x = TopicPartition(kfkTopic, channel)

        self.consumer.assign([x])
        self.consumer.seek_to_end()
        last = self.consumer.position(x)

        pos = last - records if last > records else 0
        self.consumer.seek(x, pos)

        data = self.consumer.poll(256)

        endIsNotReached = True
        while endIsNotReached:

            if len(data.values()) == 0:
                return messages

            records = list(data.values())

            for record in records[0]:
                index = record[2]
                if index >= last - 1: endIsNotReached = False

                message = Message(record[0][41:], record[1], record[6], index,
                                  record[3])
                messages.append(message)

            if endIsNotReached == False:
                self.consumer.close()
                return messages

            pos = pos + len(messages)
            self.consumer.seek(x, pos)
            data = self.consumer.poll(256)

        self.consumer.close()

        return messages

    def historyForTimePeriod(self, topic, channel, start, end, limit=-1):

        out = []

        try:
            kfkTopic = self.__subKey + "." + topic
            x = TopicPartition(kfkTopic, channel)

            if self.uid == None:
                self.consumer = KafkaConsumer(
                    bootstrap_servers=self.__bootstrap,
                    check_crcs=False,
                    exclude_internal_topics=True,
                    session_timeout_ms=30000,
                    reconnect_backoff_ms=10000,
                    heartbeat_interval_ms=2000,
                    retry_backoff_ms=500,
                    fetch_min_bytes=32,
                    fetch_max_wait_ms=96,
                    enable_auto_commit=False,
                    max_partition_fetch_bytes=65536,
                    max_in_flight_requests_per_connection=4,
                    api_version=(0, 10))
            else:
                home = expanduser("~")

                self.consumer = KafkaConsumer(
                    bootstrap_servers=self.__bootstrap,
                    check_crcs=False,
                    exclude_internal_topics=True,
                    session_timeout_ms=30000,
                    reconnect_backoff_ms=10000,
                    heartbeat_interval_ms=2000,
                    retry_backoff_ms=500,
                    fetch_min_bytes=32,
                    fetch_max_wait_ms=96,
                    enable_auto_commit=False,
                    max_partition_fetch_bytes=65536,
                    max_in_flight_requests_per_connection=4,
                    security_protocol='SSL',
                    ssl_check_hostname=False,
                    ssl_keyfile=home + '/magistral/' + self.__token +
                    '/key.pem',
                    ssl_cafile=home + '/magistral/' + self.__token + '/ca.pem',
                    ssl_certfile=home + '/magistral/' + self.__token +
                    '/certificate.pem',
                    api_version=(0, 10))

            self.consumer = KafkaConsumer(bootstrap_servers=self.__bootstrap)
            self.consumer.assign([x])

            self.consumer.seek_to_end()
            last = self.consumer.position(x)

            position = last - 1000

            found = False
            while found == False:
                self.consumer.seek(x, position)
                data = self.consumer.poll(500)

                if x not in data.keys() or len(data[x]) == 0: break

                record = data[x][0]
                timestamp = record[3]

                if timestamp < start:
                    found = True
                    break

                position = position - 1000

            self.consumer.close()

            if self.uid == None:
                self.с = KafkaConsumer(bootstrap_servers=self.__bootstrap,
                                       check_crcs=False,
                                       exclude_internal_topics=True,
                                       session_timeout_ms=10000,
                                       reconnect_backoff_ms=10000,
                                       heartbeat_interval_ms=2000,
                                       retry_backoff_ms=500,
                                       fetch_min_bytes=32,
                                       fetch_max_wait_ms=96,
                                       enable_auto_commit=False,
                                       max_partition_fetch_bytes=65536,
                                       max_in_flight_requests_per_connection=4,
                                       api_version=(0, 10))
            else:
                home = expanduser("~")

                self.с = KafkaConsumer(
                    bootstrap_servers=self.__bootstrap,
                    check_crcs=False,
                    exclude_internal_topics=True,
                    session_timeout_ms=10000,
                    reconnect_backoff_ms=10000,
                    heartbeat_interval_ms=2000,
                    retry_backoff_ms=500,
                    fetch_min_bytes=32,
                    fetch_max_wait_ms=96,
                    enable_auto_commit=False,
                    max_partition_fetch_bytes=65536,
                    max_in_flight_requests_per_connection=4,
                    security_protocol='SSL',
                    ssl_check_hostname=False,
                    ssl_keyfile=home + '/magistral/' + self.__token +
                    '/key.pem',
                    ssl_cafile=home + '/magistral/' + self.__token + '/ca.pem',
                    ssl_certfile=home + '/magistral/' + self.__token +
                    '/certificate.pem',
                    api_version=(0, 10))

            self.с.assign([x])

            self.c.seek(x, position)
            data = self.с.poll(256)

            while (x in data.keys() and len(data[x]) > 0):

                for record in data[x]:
                    timestamp = record[3]
                    if timestamp < start: continue

                    index = record[2]

                    if timestamp > end or index >= last - 1:
                        self.с.close()
                        return out

                    message = Message(record[0][41:], record[1], record[6],
                                      index, timestamp)
                    out.append(message)

                    if limit is not None and limit > 0 and len(out) >= limit:
                        self.с.close()
                        return out

                self.с.seek(x, position + len(data[x]))
                data = self.с.poll(256)

            return out

        except:

            raise MagistralException(
                "Exception during history invocation occurred")
Пример #6
0
class GroupConsumer(threading.Thread):
        
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
     
    def __init__(self, threadId, name, sKey, bootstrapServers, groupId, permissions, token, cipher = None, uid = None):
        threading.Thread.__init__(self)
        self.threadId = threadId
        self.name = name
        
        self.group = groupId;
        self.subKey = sKey;
        
        self.cipher = None if cipher is None else cipher;
                
        configs = Configs.consumerConfigs();
        configs["bootstrap_servers"] = bootstrapServers.split(',');
        configs["group_id"] = groupId;
        configs['enable_auto_commit'] = False;
        
        self.__isAlive = True
        
        home = expanduser("~")
        
        if uid == None:
            self.__consumer = KafkaConsumer(
                bootstrap_servers = configs["bootstrap_servers"],
                check_crcs = False,
                exclude_internal_topics = True,
                session_timeout_ms = 10000,
                reconnect_backoff_ms = 10000,
                heartbeat_interval_ms = 2000,
                retry_backoff_ms = 500,
                fetch_min_bytes = 64,
                fetch_max_wait_ms = 96,           
                enable_auto_commit = False,
                max_in_flight_requests_per_connection = 4,
                api_version = (0, 10),            
                group_id = groupId);
        else:
            self.__consumer = KafkaConsumer(
                bootstrap_servers = configs["bootstrap_servers"],
                check_crcs = False,
                exclude_internal_topics = True,
                session_timeout_ms = 10000,
                reconnect_backoff_ms = 10000,
                heartbeat_interval_ms = 2000,
                retry_backoff_ms = 500,
                fetch_min_bytes = 64,
                fetch_max_wait_ms = 96,           
                enable_auto_commit = False,
                max_in_flight_requests_per_connection = 4,
                security_protocol = 'SSL',
                ssl_check_hostname = False,
                ssl_keyfile = home + '/magistral/' + token + '/key.pem',
                ssl_cafile = home + '/magistral/' + token + '/ca.pem',
                ssl_certfile = home + '/magistral/' + token + '/certificate.pem',
                api_version = (0, 10),
                group_id = groupId);
        
        self.permissions = permissions;
        self.map = {}
        
        self.__offsets = {}
        
        
    def recordsTotally(self, data):
        size = 0;
        for val in data.values(): 
            if len(val) > 0: size = size + len(val);
                               
        return size;
    
    def consumerRecord2Message(self, record):                    
        payload = record[6]
                                        
        if self.cipher is not None:
            try:
                payload = self.cipher.decrypt(payload)
            except:
                pass
                                        
        msg = Message(record[0][41:], record[1], payload, record[2], record[3])
        return msg

    def run(self):
        
        threadLock.acquire(False)
        
        while self.__isAlive:
            try:

                data = self.__consumer.poll(512);
                for values in data.values():
                                         
                    for value in values:
                        msg = self.consumerRecord2Message(value);
                        listener = self.map[value[0]][msg.channel()];
                        if listener is not None: listener(msg);
                        
                if len(data.values()) > 0: self.__consumer.commit_async(); 
                    
            except:
                pass
              
        threadLock.release()

#   ////////////////////////////////////////////////////////////////////////////////////    

    def subscribe(self, topic, channel = -1, listener = None, callback = None):
        
        assert channel is not None and isinstance(channel, int), "Channel expected as int argument"        
        if (channel < -1): channel = -1;
                
        etopic = self.subKey + "." + topic;
                           
        self.logger.debug("Subscribe -> %s : %s | key = %s", topic, channel, self.subKey);
        
        if (self.permissions == None or len(self.permissions) == 0): 
            raise MagistralException("User has no permissions for topic [" + topic + "].");
        
        self.fch = [];
        
        for meta in self.permissions:             
            if (meta.topic() != topic): continue;  
            
            if channel == -1:
                self.fch = meta.channels();
            elif channel in meta.channels():                          
                self.fch = [ channel ];  
        
        if (len(self.fch) == 0): 
            npgex = "No permissions for topic [" + topic + "] granted";
            self.logger.error(npgex);                                
            raise MagistralException(npgex);
        
        if (self.map == None or etopic not in self.map): 
            self.map[etopic] = {}
        
#         // Assign Topic-partition pairs to listen
        
        tpas = [];
        for ch in self.fch:            
            tpas.append(TopicPartition(etopic, ch));
            if (listener is not None): self.map[etopic][ch] = listener
            
            ca = self.__consumer.assignment()
            if (ca is not None):
                for tp in ca: tpas.append(tp)
        
        self.__consumer.assign(tpas);       
                
        if callback is not None: 
            callback(self.__consumer.assignment());
            
        return self.__consumer.assignment();
        
        
    def unsubscribe(self, topic):
        self.consumer.assign([]);
        self.map.remove(topic);        

    def close(self):
        self.__isAlive = False
        self.__consumer.pause()
        self.__consumer.close()
        
    logging.getLogger('kafka.conn').setLevel(logging.FATAL)
    logging.getLogger('kafka.cluster').setLevel(logging.FATAL)
    logging.getLogger('kafka.consumer.group').setLevel(logging.INFO)    
    logging.getLogger('kafka.consumer.fetcher').setLevel(logging.INFO)
    logging.getLogger('kafka.coordinator.consumer').setLevel(logging.INFO)
    logging.getLogger('kafka.producer.record_accumulator').setLevel(logging.INFO)