Example #1
0
    def __init__(self, name, task, e):
        threading.Thread.__init__(self)
        self.name = name
        self.i = 0
        self.op_factor = CLIENTSPERPROCESS * PROCSPERTASK
        self.ops_sec = task['ops_sec']
        self.bucket = task['bucket']
        self.password  = task['password']
        self.template = task['template']
        self.default_tsizes = [128, 256]
        self.create_count = task['create_count']/self.op_factor
        self.update_count = task['update_count']/self.op_factor
        self.get_count = task['get_count']/self.op_factor
        self.del_count = task['del_count']/self.op_factor
        self.exp_count = task['exp_count']/self.op_factor
        self.ttl = task['ttl']
        self.miss_perc = task['miss_perc']
        self.active_hosts = task['active_hosts']
        self.batch_size = 5000
        self.memq = queue.Queue()
        self.consume_queue = task['consume_queue']
        self.standalone = task['standalone']
        self.ccq = None
        self.hotkey_batches = []

        if self.consume_queue is not None:
            RabbitHelper().declare(self.consume_queue)

        if task['template']['cc_queues']:
            self.ccq = str(task['template']['cc_queues'][0])  #only supporting 1 now
            RabbitHelper().declare(self.ccq)

        if self.batch_size > self.create_count:
            self.batch_size = self.create_count

        self.active_hosts = task['active_hosts']
        if not self.active_hosts:
            self.active_hosts = [cfg.COUCHBASE_IP]

        addr = task['active_hosts'][random.randint(0,len(self.active_hosts) - 1)].split(':')
        host = addr[0]
        port = 8091
        if len(addr) > 1:
            port = addr[1]

        self.e = e
        self.cb = None
        self.isterminal = False
        self.done = False

        try:
            self.cb = GConnection(bucket=self.bucket, password = self.password, host = host, port = port)
        except Exception as ex:
            logging.error("[Thread %s] cannot reach %s:%s/%s" %
                          (self.name, host, port, self.bucket))
            logging.error(ex)
            self.isterminal = True

        logging.info("[Thread %s] started for workload: %s" % (self.name, task['id']))
Example #2
0
class DBSession:
    logger = logging.getLogger('dbsession')
    def __init__(self, host, bucket):
        self.host = host
        self.bucket = bucket
        self.cb = GConnection(host=host, bucket=bucket)

    def Save(self, obj):
        try:
            key = obj.Key()
            result = self.cb.replace(key, obj.Data())
        except Exception, e:
            self.logger.error('%s,%s,%s' % sys.exc_info())
Example #3
0
    def __init__(self, name, task, e):
        threading.Thread.__init__(self)
        self.name = name
        self.i = 0
        self.op_factor = CLIENTSPERPROCESS * PROCSPERTASK
        self.ops_sec = task['ops_sec']
        self.bucket = task['bucket']
        self.password  = task['password']
        self.template = task['template']
        self.create_count = task['create_count']/self.op_factor
        self.update_count = task['update_count']/self.op_factor
        self.get_count = task['get_count']/self.op_factor
        self.del_count = task['del_count']/self.op_factor
        self.exp_count = task['exp_count']/self.op_factor
        self.consume_queue = task['consume_queue']
        self.ttl = task['ttl']
        self.miss_perc = task['miss_perc']
        self.active_hosts = task['active_hosts']
        self.batch_size = 5000
        self.memq = queue.Queue()
        self.hotset = []
        self.ccq = None
        self.hotkeys = []
        if task['template']['cc_queues']:
            self.ccq = str(task['template']['cc_queues'][0])  #only supporting 1 now
            RabbitHelper().declare(self.ccq)

        self.batch_size = 1000
        if self.batch_size > self.create_count:
            self.batch_size = self.create_count

        self.active_hosts = task['active_hosts']
        if not self.active_hosts:
            self.active_hosts = [cfg.COUCHBASE_IP]

        addr = task['active_hosts'][random.randint(0,len(self.active_hosts) - 1)].split(':')
        host = addr[0]
        port = 8091
        if len(addr) > 1:
            port = addr[1]
        self.cb = GConnection(bucket=self.bucket, password = self.password, host = host, port = port)

        self.e = e
Example #4
0
from Config import Config
from Data import GrowthMap, MAP_SIZE
from Terrain import Terrain
from Player import Player

PLAYER_COUNT = 800

players = [Player('player_%d' % i, None) for i in xrange(PLAYER_COUNT)]
terrain = Terrain()
terrain.size = MAP_SIZE
terrain.growth = copy.deepcopy(GrowthMap)
terrain.sugar = copy.deepcopy(GrowthMap)

for player in players:
    player.sugar = 10
    terrain.born(player)

start = time.clock()
cb = GConnection(host=Config['dbHost'], bucket=Config['dbBucket'])
#cb = Couchbase.connect(host=Config['dbHost'], bucket=Config['dbBucket'])

cb.set('sugarscape', {'playerKeys': ','.join((p.Key() for p in players))})

for player in players:
    cb.set(player.Key(), player.Data())

cb.set(terrain.Key(), terrain.Data())

end = time.clock()
print 'success, elapsed:%s' % (end - start)
Example #5
0
 def __init__(self, host, bucket):
     self.host = host
     self.bucket = bucket
     self.cb = GConnection(host=host, bucket=bucket)
Example #6
0
class SDKClient(threading.Thread):

    def __init__(self, name, task, e):
        threading.Thread.__init__(self)
        self.name = name
        self.i = 0
        self.op_factor = CLIENTSPERPROCESS * PROCSPERTASK
        self.ops_sec = task['ops_sec']
        self.bucket = task['bucket']
        self.password  = task['password']
        self.template = task['template']
        self.create_count = task['create_count']/self.op_factor
        self.update_count = task['update_count']/self.op_factor
        self.get_count = task['get_count']/self.op_factor
        self.del_count = task['del_count']/self.op_factor
        self.exp_count = task['exp_count']/self.op_factor
        self.consume_queue = task['consume_queue']
        self.ttl = task['ttl']
        self.miss_perc = task['miss_perc']
        self.active_hosts = task['active_hosts']
        self.batch_size = 5000
        self.memq = queue.Queue()
        self.hotset = []
        self.ccq = None
        self.hotkeys = []
        if task['template']['cc_queues']:
            self.ccq = str(task['template']['cc_queues'][0])  #only supporting 1 now
            RabbitHelper().declare(self.ccq)

        self.batch_size = 1000
        if self.batch_size > self.create_count:
            self.batch_size = self.create_count

        self.active_hosts = task['active_hosts']
        if not self.active_hosts:
            self.active_hosts = [cfg.COUCHBASE_IP]

        addr = task['active_hosts'][random.randint(0,len(self.active_hosts) - 1)].split(':')
        host = addr[0]
        port = 8091
        if len(addr) > 1:
            port = addr[1]
        self.cb = GConnection(bucket=self.bucket, password = self.password, host = host, port = port)

        self.e = e

    def run(self):

        cycle = ops_total = 0

        while self.e.is_set() == False:

            start = datetime.datetime.now()


            # do an op cycle
            threads = self.do_cycle()
            gevent.joinall(threads)


            # wait till next cycle
            end = datetime.datetime.now()
            wait = 1 - (end - start).microseconds/float(1000000)
            if (wait > 0):
                time.sleep(wait)
            else:
                pass #probably  we are overcomitted, but it's ok

            ops_total = ops_total + self.ops_sec
            cycle = cycle + 1

            if (cycle % 100) == 0:
                logging.info("[Thread %s] total ops: %s" % (self.name, ops_total))

            if self.memq.qsize() > 100:
                self.flushq()

        # push everything to rabbitmq
        self.flushq()


    def flushq(self):

        if self.ccq is not None:

            # declare queue
            mq = RabbitHelper()
            mq.declare(self.ccq)

            while self.memq.empty() == False:
                try:
                    msg = self.memq.get_nowait()
                    msg = json.dumps(msg)
                    mq.putMsg(self.ccq, msg)
                except queue.Empty:
                    pass

                # hot keys
                if len(self.hotkeys) > 0:
                    key_map = {'start' : self.hotkeys[0],
                               'end' : self.hotkeys[-1]}
                    msg = json.dumps(key_map)
                    mq.putMsg(self.ccq, msg)


    def do_cycle(self):

        threads = []

        if self.create_count > 0:

            count = self.create_count
            docs_to_expire = self.exp_count

            # check if we need to expire some docs
            if docs_to_expire > 0:

                # create an expire batch
                self.mset(self.template['kv'], docs_to_expire, ttl = self.ttl)
                count = count - docs_to_expire

            t = gevent.spawn(self.mset, self.template['kv'], count)
            threads.append(t)

        if self.update_count > 0:
            t = gevent.spawn(self.mset_update, self.template['kv'], self.update_count)
            threads.append(t)

        if self.get_count > 0:
            t = gevent.spawn(self.mget, self.get_count)
            threads.append(t)

        if self.del_count > 0:
            t = gevent.spawn(self.mdelete, self.del_count)
            threads.append(t)


        return threads

    def mset(self, template, count, ttl = 0):
        msg = {}
        keys = []
        cursor = 0
        j = 0

        for j in xrange(count):
            self.i = self.i+1
            msg[self.name+str(self.i)] = template
            keys.append(self.name+str(self.i))

            if ((j+1) % self.batch_size) == 0:
                batch = keys[cursor:j+1]
                self.memq.put_nowait({'start' : batch[0],
                                      'end'  : batch[-1]})
                self._mset(msg, ttl)
                cursor = j + 1
                msg = {}

        if (cursor < j) and (len(msg) > 0):
            self._mset(msg, ttl)
            self.memq.put_nowait({'start' : keys[cursor],
                                  'end'  : keys[-1]})


    def _mset(self, msg, ttl = 0):

        try:
            self.cb.set_multi(msg, ttl=ttl)
        except TemporaryFailError:
            logging.warn("temp failure during mset - cluster may be unstable")
        except TimeoutError:
            logging.warn("cluster timed trying to handle mset")

    def mset_update(self, template, count):

        msg = {}
        batches = self.getKeys(count)
        if len(batches) > 0:

            for batch in batches:
                try:
                    for key in batch:
                        msg[key] = template
                    self.cb.set_multi(msg)
                except NotFoundError as nf:
                    logging.error("update key not found!  %s: " % nf.key)
                except TimeoutError:
                    logging.warn("cluster timed out trying to handle mset - cluster may be unstable")
                except TemporaryFailError:
                    logging.warn("temp failure during mset - cluster may be unstable")


    def mget(self, count):

        batches = []
        if self.miss_perc > 0:
            batches = self.getCacheMissKeys(count)
        else:
            batches = self.getKeys(count)

        if len(batches) > 0:

            for batch in batches:
                try:
                    self.cb.get_multi(batch)
                except NotFoundError as nf:
                    logging.warn("get key not found!  %s: " % nf.key)
                except TimeoutError:
                    logging.warn("cluster timed out trying to handle mget - cluster may be unstable")


    def mdelete(self, count):
        batches = self.getKeys(count, requeue = False)
        keys_deleted = 0

        # delete from buffer
        if len(batches) > 0:
            keys_deleted = self._mdelete(batches)
        else:
            pass

    def _mdelete(self, batches):
        keys_deleted = 0
        for batch in batches:
            try:
                if len(batch) > 0:
                    keys_deleted = len(batch) + keys_deleted
                    self.cb.delete_multi(batch)
            except NotFoundError as nf:
                logging.warn("get key not found!  %s: " % nf.key)
            except TimeoutError:
                logging.warn("cluster timed out trying to handle mdelete - cluster may be unstable")

        return keys_deleted


    def getCacheMissKeys(self, count):

        # returns batches of keys where first batch contains # of keys to miss
        keys_retrieved = 0
        batches = []
        miss_keys = []
        requeue = len(self.hotkeys) > 0

        keys = self.getKeysFromQueue(requeue = requeue, force_stale = True)

        if requeue == False:
            # hotkeys were taken off queue and cannot be reused
            self.hotkeys = keys


        if len(keys) > 0:
            # miss% of count keys
            num_to_miss = int( ((self.miss_perc/float(100)) * len(keys)) / float(self.op_factor))
            miss_keys = keys[:num_to_miss]
            batches.append(miss_keys)
            keys_retrieved = len(miss_keys)


        # use old hotkeys for rest of set
        while keys_retrieved < count:

            keys = self.hotkeys

            # in case we got too many keys slice the batch
            need = count - keys_retrieved
            if(len(keys) > need):
                keys = keys[:need]

            keys_retrieved = keys_retrieved + len(keys)

            # add to batch
            batches.append(keys)

        return batches

    def getKeys(self, count, requeue = True):

        keys_retrieved = 0
        batches = []

        while keys_retrieved < count:

            # get keys
            keys = self.getKeysFromQueue(requeue)

            if len(keys) == 0:
                break

            # in case we got too many keys slice the batch
            need = count - keys_retrieved
            if(len(keys) > need):
                keys = keys[:need]

            keys_retrieved = keys_retrieved + len(keys)

            # add to batch
            batches.append(keys)


        return batches

    def getKeysFromQueue(self, requeue = True, force_stale = False):

        # get key mapping and convert to keys
        keys = []
        key_map = None

        # priority to stale queue
        if force_stale:
            key_map = self.getKeyMapFromRemoteQueue(requeue)

        # fall back to local qeueue
        if key_map is None:
            key_map = self.getKeyMapFromLocalQueue(requeue)

        if key_map:
            keys = self.keyMapToKeys(key_map)


        return keys

    def keyMapToKeys(self, key_map):

        keys = []
        # reconstruct key-space
        prefix, start_idx = key_map['start'].split('_')
        prefix, end_idx = key_map['end'].split('_')

        for i in range(int(start_idx), int(end_idx) + 1):
            keys.append(prefix+"_"+str(i))

        return keys


    def fillq(self):

        if self.ccq == None:
            return

        # put about 20 items into the queue
        for i in xrange(20):
            key_map = self.getKeyMapFromRemoteQueue()
            if key_map:
                self.memq.put_nowait(key_map)

    def getKeyMapFromLocalQueue(self, requeue = True):

        key_map = None

        try:
            key_map = self.memq.get_nowait()
            if requeue:
                self.memq.put_nowait(key_map)
        except queue.Empty:
            #no more items
            if self.ccq is not None:
                self.fillq()

        return key_map

    def getKeyMapFromRemoteQueue(self, requeue = True):
        key_map = None
        mq = RabbitHelper()
        if mq.qsize(self.ccq) > 0:
            try:
                key_map = mq.getJsonMsg(self.ccq, requeue = requeue )
            except Exception:
                pass
        return key_map
Example #7
0
class SDKClient(threading.Thread):

    def __init__(self, name, task, e):
        threading.Thread.__init__(self)
        self.name = name
        self.i = 0
        self.op_factor = CLIENTSPERPROCESS * PROCSPERTASK
        self.ops_sec = task['ops_sec']
        self.bucket = task['bucket']
        self.password  = task['password']
        self.template = task['template']
        self.default_tsizes = [128, 256]
        self.create_count = task['create_count']/self.op_factor
        self.update_count = task['update_count']/self.op_factor
        self.get_count = task['get_count']/self.op_factor
        self.del_count = task['del_count']/self.op_factor
        self.exp_count = task['exp_count']/self.op_factor
        self.ttl = task['ttl']
        self.miss_perc = task['miss_perc']
        self.active_hosts = task['active_hosts']
        self.batch_size = 5000
        self.memq = queue.Queue()
        self.consume_queue = task['consume_queue']
        self.standalone = task['standalone']
        self.ccq = None
        self.hotkey_batches = []

        if self.consume_queue is not None:
            RabbitHelper().declare(self.consume_queue)

        if task['template']['cc_queues']:
            self.ccq = str(task['template']['cc_queues'][0])  #only supporting 1 now
            RabbitHelper().declare(self.ccq)

        if self.batch_size > self.create_count:
            self.batch_size = self.create_count

        self.active_hosts = task['active_hosts']
        if not self.active_hosts:
            self.active_hosts = [cfg.COUCHBASE_IP]

        addr = task['active_hosts'][random.randint(0,len(self.active_hosts) - 1)].split(':')
        host = addr[0]
        port = 8091
        if len(addr) > 1:
            port = addr[1]

        self.e = e
        self.cb = None
        self.isterminal = False
        self.done = False

        try:
            self.cb = GConnection(bucket=self.bucket, password = self.password, host = host, port = port)
        except Exception as ex:
            logging.error("[Thread %s] cannot reach %s:%s/%s" %
                          (self.name, host, port, self.bucket))
            logging.error(ex)
            self.isterminal = True

        logging.info("[Thread %s] started for workload: %s" % (self.name, task['id']))

    def run(self):

        cycle = ops_total = 0
        self.e.set()

        while self.e.is_set() == True:

            start = datetime.datetime.now()


            # do an op cycle
            self.do_cycle()

            if self.isterminal == True:
                # some error occured during workload
                self.flushq(True)
                exit(-1)

            # wait till next cycle
            end = datetime.datetime.now()
            wait = 1 - (end - start).microseconds/float(1000000)
            if (wait > 0):
                time.sleep(wait)
            else:
                pass #probably  we are overcomitted, but it's ok

            ops_total = ops_total + self.ops_sec
            cycle = cycle + 1

            if (cycle % 120) == 0: # 2 mins
                logging.info("[Thread %s] total ops: %s" % (self.name, ops_total))
                self.flushq()

        self.flushq()
        logging.info("[Thread %s] done!" % (self.name))


    def flushq(self, flush_hotkeys = False):

        if self.standalone:
            return

        mq = RabbitHelper()

        if self.ccq is not None:

            logging.info("[Thread %s] flushing %s items to %s" %
                         (self.name, self.memq.qsize(), self.ccq))

            # declare queue
            mq.declare(self.ccq)

            # empty the in memory queue
            while self.memq.empty() == False:
                try:
                    msg = self.memq.get_nowait()
                    msg = json.dumps(msg)
                    mq.putMsg(self.ccq, msg)
                except queue.Empty:
                    pass

        # hot keys
        if flush_hotkeys and (len(self.hotkey_batches) > 0):

            # try to put onto remote queue
            queue = self.consume_queue or self.ccq

            if queue is not None:
                key_map = {'start' : self.hotkey_batches[0][0],
                           'end' : self.hotkey_batches[-1][-1]}
                msg = json.dumps(key_map)
                mq.putMsg(queue, msg)
                self.hotkey_batches = []


    def do_cycle(self):

        sizes = self.template.get('size') or self.default_tsizes
        t_size = sizes[random.randint(0,len(sizes)-1)]
        self.template['t_size'] = t_size

        if self.create_count > 0:

            count = self.create_count
            docs_to_expire = self.exp_count
            # check if we need to expire some docs
            if docs_to_expire > 0:

                # create an expire batch
                self.mset(self.template, docs_to_expire, ttl = self.ttl)
                count = count - docs_to_expire

            self.mset(self.template, count)

        if self.update_count > 0:
            self.mset_update(self.template, self.update_count)

        if self.get_count > 0:
            self.mget(self.get_count)

        if self.del_count > 0:
            self.mdelete(self.del_count)


    def mset(self, template, count, ttl = 0):
        msg = {}
        keys = []
        cursor = 0
        j = 0

        template = resolveTemplate(template)
        for j in xrange(count):
            self.i = self.i+1
            msg[self.name+str(self.i)] = template
            keys.append(self.name+str(self.i))

            if ((j+1) % self.batch_size) == 0:
                batch = keys[cursor:j+1]
                self._mset(msg, ttl)
                self.memq.put_nowait({'start' : batch[0],
                                      'end'  : batch[-1]})
                msg = {}
                cursor = j
            elif j == (count -1):
                batch = keys[cursor:]
                self._mset(msg, ttl)
                self.memq.put_nowait({'start' : batch[0],
                                      'end'  : batch[-1]})


    def _mset(self, msg, ttl = 0):

        try:
            self.cb.set_multi(msg, ttl=ttl)
        except TemporaryFailError:
            logging.warn("temp failure during mset - cluster may be unstable")
        except TimeoutError:
            logging.warn("cluster timed trying to handle mset")
        except NetworkError as nx:
            logging.error("network error")
            logging.error(nx)
        except Exception as ex:
            logging.error(ex)
            self.isterminal = True

    def mset_update(self, template, count):

        msg = {}
        batches = self.getKeys(count)
        template = resolveTemplate(template)
        if len(batches) > 0:

            for batch in batches:
                try:
                    for key in batch:
                        msg[key] = template
                    self.cb.set_multi(msg)
                except NotFoundError as nf:
                    logging.error("update key not found!  %s: " % nf.key)
                except TimeoutError:
                    logging.warn("cluster timed out trying to handle mset - cluster may be unstable")
                except NetworkError as nx:
                    logging.error("network error")
                    logging.error(nx)
                except TemporaryFailError:
                    logging.warn("temp failure during mset - cluster may be unstable")
                except Exception as ex:
                    logging.error(ex)
                    self.isterminal = True


    def mget(self, count):

        batches = []
        if self.miss_perc > 0:
            batches = self.getCacheMissKeys(count)
        else:
            batches = self.getKeys(count)

        if len(batches) > 0:

            for batch in batches:
                try:
                    self.cb.get_multi(batch)
                except NotFoundError as nf:
                    logging.warn("get key not found!  %s: " % nf.key)
                    pass
                except TimeoutError:
                    logging.warn("cluster timed out trying to handle mget - cluster may be unstable")
                except NetworkError as nx:
                    logging.error("network error")
                    logging.error(nx)
                except Exception as ex:
                    logging.error(ex)
                    self.isterminal = True


    def mdelete(self, count):
        batches = self.getKeys(count, requeue = False)
        keys_deleted = 0

        # delete from buffer
        if len(batches) > 0:
            keys_deleted = self._mdelete(batches)
        else:
            pass

    def _mdelete(self, batches):
        keys_deleted = 0
        for batch in batches:
            try:
                if len(batch) > 0:
                    keys_deleted = len(batch) + keys_deleted
                    self.cb.delete_multi(batch)
            except NotFoundError as nf:
                logging.warn("get key not found!  %s: " % nf.key)
            except TimeoutError:
                logging.warn("cluster timed out trying to handle mdelete - cluster may be unstable")
            except NetworkError as nx:
                logging.error("network error")
                logging.error(nx)
            except Exception as ex:
                logging.error(ex)
                self.isterminal = True

        return keys_deleted


    def getCacheMissKeys(self, count):

        # returns batches of keys where first batch contains # of keys to miss
        keys_retrieved = 0
        batches = []
        miss_keys = []

        num_to_miss = int( ((self.miss_perc/float(100)) * count))
        miss_batches = self.getKeys(num_to_miss, force_stale = True)

        if len(self.hotkey_batches) == 0:
            # hotkeys are taken off queue and cannot be reused
            # until workload is flushed
            need = count - num_to_miss
            self.hotkey_batches = self.getKeys(need, requeue = False)


        batches = miss_batches + self.hotkey_batches
        return batches

    def getKeys(self, count, requeue = True, force_stale = False):

        keys_retrieved = 0
        batches = []

        while keys_retrieved < count:

            # get keys
            keys = self.getKeysFromQueue(requeue, force_stale = force_stale)

            if len(keys) == 0:
                break

            # in case we got too many keys slice the batch
            need = count - keys_retrieved
            if(len(keys) > need):
                keys = keys[:need]

            keys_retrieved = keys_retrieved + len(keys)

            # add to batch
            batches.append(keys)


        return batches

    def getKeysFromQueue(self, requeue = True, force_stale = False):

        # get key mapping and convert to keys
        keys = []
        key_map = None

        # priority to stale queue
        if force_stale:
            key_map = self.getKeyMapFromRemoteQueue(requeue)

        # fall back to local qeueue
        if key_map is None:
            key_map = self.getKeyMapFromLocalQueue(requeue)

        if key_map:
            keys = self.keyMapToKeys(key_map)


        return keys

    def keyMapToKeys(self, key_map):

        keys = []
        # reconstruct key-space
        prefix, start_idx = key_map['start'].split('_')
        prefix, end_idx = key_map['end'].split('_')

        for i in range(int(start_idx), int(end_idx) + 1):
            keys.append(prefix+"_"+str(i))

        return keys


    def fillq(self):

        if (self.consume_queue == None) and (self.ccq == None):
            return

        # put about 20 items into the queue
        for i in xrange(20):
            key_map = self.getKeyMapFromRemoteQueue()
            if key_map:
                self.memq.put_nowait(key_map)

        logging.info("[Thread %s] filled %s items from  %s" %
                     (self.name, self.memq.qsize(), self.consume_queue or self.ccq))

    def getKeyMapFromLocalQueue(self, requeue = True):

        key_map = None

        try:
            key_map = self.memq.get_nowait()
            if requeue:
                self.memq.put_nowait(key_map)
        except queue.Empty:
            #no more items
            self.fillq()

        return key_map

    def getKeyMapFromRemoteQueue(self, requeue = True):

        key_map = None
        mq = RabbitHelper()

        # try to fetch from consume queue and
        # fall back to ccqueue
        queue = self.consume_queue

        if queue is None  or mq.qsize(queue) == 0:
            queue = self.ccq

        if mq.qsize(queue) > 0:
            try:
                key_map = mq.getJsonMsg(queue, requeue = requeue )
            except Exception:
                pass

        return key_map