Esempio n. 1
0
    def test_superset(self):
        for init in (self.create_set, set):
            s_1 = init([1, 2, 3, 4])
            s_2 = init([1, 2])
            s_3 = init([1, 2, 3, 4, 5])
            s_4 = {1, 2}
            s_5 = {1, 2, 3, 4}
            s_6 = [1, 2]
            s_7 = List([1, 2], redis=self.redis)

            self.assertTrue(s_1.issuperset(s_2))
            self.assertTrue(s_1 >= s_2)
            self.assertTrue(s_1 > s_2)

            self.assertFalse(s_1.issuperset(s_3))
            self.assertFalse(s_1 >= s_3)
            self.assertFalse(s_1 > s_3)

            self.assertTrue(s_1.issuperset(s_4))
            self.assertTrue(s_1 >= s_4)
            self.assertTrue(s_1 > s_4)

            self.assertTrue(s_1.issuperset(s_5))
            self.assertTrue(s_1 >= s_5)
            self.assertFalse(s_1 > s_5)

            self.assertTrue(s_1.issuperset(s_6))
            if PYTHON_VERSION >= (3, 4):
                self.assertRaises(TypeError, lambda: s_1 >= s_6)

            self.assertTrue(s_1.issuperset(s_7))

            self.assertRaises(TypeError, s_1.issuperset, None)
def update_unsync_list(sync_list):
    unsync_list = List(key=config.get('constants',
                                      'REDIS_UNSYNC_DEV_LIST_KEY'),
                       redis=r)
    for device in unsync_list:
        if device in sync_list:
            unsync_list.remove(device)
Esempio n. 3
0
    def test_update(self):
        d = self.create_dict()
        d['a'] = 'b'

        # Update from built-in dicts
        d.update({'c': 'd'})
        self.assertEqual(sorted(d.items()), [('a', 'b'), ('c', 'd')])

        d.update({'c': 42})
        self.assertEqual(sorted(d.items()), [('a', 'b'), ('c', 42)])

        d.update({'x': 38})
        self.assertEqual(sorted(d.items()), [('a', 'b'), ('c', 42), ('x', 38)])

        # Update from list of tuples
        d.update([('a', 'g')])
        self.assertEqual(sorted(d.items()), [('a', 'g'), ('c', 42), ('x', 38)])

        # Update from kwargs
        d.update(c=None)
        self.assertEqual(sorted(d.items()), [('a', 'g'), ('c', None),
                                             ('x', 38)])

        # Update from another redis_collections class
        redis_list = List([('a', 'h')], redis=self.redis)
        d.update(redis_list)
        self.assertEqual(sorted(d.items()), [('a', 'h'), ('c', None),
                                             ('x', 38)])
Esempio n. 4
0
    def test_symmetric_difference(self):
        for init in (self.create_set, set):
            s_1 = init([1, 2, 3, 4])
            s_2 = init([3, 4, 5, 6])
            s_3 = {3, 4, 5, 6}
            s_4 = [3, 4, 5, 6]
            s_5 = List([3, 4, 5, 6], redis=self.redis)

            self.assertEqual(sorted(s_1.symmetric_difference(s_2)),
                             [1, 2, 5, 6])
            self.assertEqual(sorted(s_1 ^ s_2), [1, 2, 5, 6])
            self.assertEqual(sorted(s_2 ^ s_1), [1, 2, 5, 6])

            self.assertEqual(sorted(s_1.symmetric_difference(s_3)),
                             [1, 2, 5, 6])
            if PYTHON_VERSION >= (3, 4):
                self.assertEqual(sorted(s_1 ^ s_3), [1, 2, 5, 6])
                self.assertEqual(sorted(s_3 ^ s_1), [1, 2, 5, 6])

            self.assertEqual(sorted(s_1.symmetric_difference(s_4)),
                             [1, 2, 5, 6])
            self.assertRaises(TypeError, lambda: s_1 ^ s_4)

            self.assertEqual(sorted(s_1.symmetric_difference(s_5)),
                             [1, 2, 5, 6])
Esempio n. 5
0
    def test_update(self):
        expected_result = [('a', 3), ('b', 4), ('c', 5)]
        for init in (self.create_counter, collections.Counter):
            # Both Counters
            c_1 = init('abbccc')
            c_2 = init('aabbcc')
            c_1.update(c_2)
            self.assertEqual(sorted(c_1.items()), expected_result)

            # One Counter, one dict
            c = init('abbccc')
            c.update({'a': 2, 'b': 2, 'c': 2})
            self.assertEqual(sorted(c.items()), expected_result)

            # One Counter, one sequence, and kwargs
            c = init('abbccc')
            c.update(['a', 'a', 'b', 'b'], c=2)
            self.assertEqual(sorted(c.items()), expected_result)

            # One Counter, one redis_collections.List
            c = init('abbccc')
            redis_list = List(['a', 'a', 'b', 'b'], redis=self.redis)
            c.update(redis_list, c=2)
            self.assertEqual(sorted(c.items()), expected_result)

        # Writeback enabled
        c = self.create_counter(writeback=True)
        c[('tuple', 'key')] = 1
        self.assertIn(('tuple', 'key'), c._data())
        self.assertIn(('tuple', 'key'), c.cache)
        c.update({('tuple', 'key'): 2})
        self.assertEqual(c[('tuple', 'key')], 2)
def parse_config_and_sync(data=None, sync_list=True):
    if sync_list:
        sync_list = List(key=config.get('constants',
                                        'REDIS_SYNC_DEV_LIST_KEY'),
                         redis=r)
    else:
        sync_list = List(key=config.get('constants',
                                        'REDIS_NEW_SYNC_DEV_LIST_KEY'),
                         redis=r)

    unsync_list = List(key=config.get('constants',
                                      'REDIS_UNSYNC_DEV_LIST_KEY'),
                       redis=r)
    if data is None:
        data = r.blpop(config.get('constants', 'REDIS_CONFIG_XML_QUEUE_KEY'))
    data = eval(data[1])
    device = data[0]
    xml = data[1]
    p_obj = Parser.parse_mfc_config(device, xml)
    if p_obj.header.status_code == 0:
        """ Update the gloabl DS

        extend the sync_dev_list with the device tuple
        Store the UUID in a global hashmap. will be retrieved using IP key.
        """
        try:
            mfc_uuid.update({device[2] + '_uuid': p_obj.data.config.host_id})
            mfc_uuid.update(
                {device[2] + '_hostname': p_obj.data.config.hostname})
            #Update to the sync list if its able to retrieve the data attributes
            sync_list.extend((device, ))
        except AttributeError:
            LOG.error("Something wrong with the Config data from MFC: " +
                      device[2])
            LOG.error("Restart agentd or make sure the config data is valid.")
            unsync_list.extend((device, ))
        finally:
            r.rpush(config.get('constants', 'REDIS_CONFIG_STORE_QUEUE_KEY'),
                    Serialize.to_json(p_obj))
    else:
        LOG.error("Unable to get config from MFC: " + device[2])
        LOG.error("Status Code: %s Message: %s" %
                  (p_obj.header.status_code, p_obj.header.status_msg))
        LOG.error("Check MFC state make sure Agentd is working fine.")
        unsync_list.extend((device, ))

    return p_obj
def profile_data():
	results = []
	try:
	        creds = json.loads(os.environ['VCAP_SERVICES'])['p-redis'][0]['credentials']
	        r = redis.StrictRedis(host=creds['host'], port=creds['port'], password=creds['password'], db=0)
	        l = List(redis=r, key='profiles')
		results = [item for item in l]
	except Exception as e:
		print e
	return json.dumps({'profiles':results}, indent=4)
    def recheck_unsync_devices():
        unsync_list = List(key=config.get('constants',
                                          'REDIS_UNSYNC_DEV_LIST_KEY'),
                           redis=r)
        recheck_devices = []
        while len(unsync_list) > 0:
            recheck_devices.append(unsync_list.pop())

        LOG.info("Processing unsync device list")
        recheck_task = chain(request_cluster_config.s(recheck_devices),
                             update_unsync_list.s())
        recheck_task.apply_async()
def saveProfile(profile=None):
	try:
		print "[profile-scanner] saving profile %s" % profile['url']
		creds = json.loads(os.environ['VCAP_SERVICES'])['p-redis'][0]['credentials']
		r = redis.StrictRedis(host=creds['host'], port=creds['port'], password=creds['password'], db=0)
		l = List(redis=r, key='profiles')
		l.append( profile['url'].strip('\n').strip() )
		while len(l) > 20:
			l.pop(0)
	except Exception as e:
		print e
		raise
Esempio n. 10
0
    def test_disjoint(self):
        for init in (self.create_set, set):
            s_1 = init([1, 2, 3, 3])
            s_2 = init([4, 5])
            s_3 = {3, 4, 5}
            s_4 = [4, 5]
            s_5 = List([4, 5], redis=self.redis)

            self.assertTrue(s_1.isdisjoint(s_2))
            self.assertFalse(s_1.isdisjoint(s_3))
            self.assertTrue(s_1.isdisjoint(s_4))
            self.assertTrue(s_1.isdisjoint(s_5))
            self.assertRaises(TypeError, s_1.isdisjoint, None)
Esempio n. 11
0
    def __init__(self, app, base_url, redis, slack_client):
        """ Init. """
        if not all(REQUIRED_KEYS):
            self._logger.critical('%s must be set.', REQUIRED_KEYS)
            sys.exit(1)

        self._base_url = base_url
        self.cache = CTFdCache(solves=List(key='ctfd_submission_db',
                                           redis=redis),
                               teams=Dict(key='ctfd_teams', redis=redis),
                               users=Dict(key='ctfd_users', redis=redis))
        self._api.headers.update({
            'Authorization': f'Token {CTFD_TOKEN}',
            'Content-type': 'application/json'
        })
        self._slack = slack_client

        self.bind_route(app)
Esempio n. 12
0
def index(request):
    ip = getattr(settings, 'RS_HOST', 'localhost')
    port = getattr(settings, 'RS_PORT', 'def_port')
    pw = getattr(settings, 'RS_PASSWORD', 'def_password')
    redis_connection = StrictRedis(host=ip, port=port, db=0, password=pw)
    r = List(redis=redis_connection, key="rp3:00000000448f5428")

    latest_val = r[-60:-1]  # last 60 sec
    latest_val.sort()
    middle_val = latest_val[20:40]  # middle of value
    avg_val = reduce(lambda x, y: x + y, middle_val) / len(middle_val)
    # is_empty = False if avg_val < 1800 else True
    is_empty = False if avg_val < 2200 else True
    print(avg_val, is_empty)
 
    context = {
        'is_empty': is_empty,
        'avg': avg_val,
    }
    return render(request, 'index.html', context)
Esempio n. 13
0
    def test_intersection(self):
        for init in (self.create_set, set):
            s_1 = init([1, 2, 3])
            s_2 = init([2, 3, 4])
            s_3 = {2, 3, 4}
            s_4 = [2, 3, 4]
            s_5 = List([2, 3, 4], redis=self.redis)

            self.assertEqual(sorted(s_1.intersection(s_2)), [2, 3])
            self.assertEqual(sorted(s_1 & s_2), [2, 3])
            self.assertEqual(sorted(s_2 & s_1), [2, 3])

            self.assertEqual(sorted(s_1.intersection(s_3)), [2, 3])
            self.assertEqual(sorted(s_1 & s_3), [2, 3])
            self.assertEqual(sorted(s_3 & s_1), [2, 3])

            self.assertEqual(sorted(s_1.intersection(s_4)), [2, 3])
            self.assertRaises(TypeError, lambda: s_1 & s_4)

            self.assertEqual(sorted(s_1.intersection(s_5)), [2, 3])
Esempio n. 14
0
def request_cluster_config(dev_list, unsync_list=False):
    req_uri = '/admin/agentd_comm'
    conf_q = config.get('constants', 'REDIS_CONFIG_XML_QUEUE_KEY')
    mfc_count = len(dev_list)
    g_pool = gevent.pool.Pool(size=mfc_count)
    sync_flag = True
    if unsync_list:
        sync_flag = False

    LOG.debug("Creating Config request clients")
    conf_clients = []
    for device in dev_list:
        url = URL('http://' + device[2] + ':8080' + req_uri)
        conf_clients.append(
            HTTPClient.from_url(url, concurrency=1, headers_type=dict))

    LOG.debug("Starting to request Config from MFC")
    for i in xrange(mfc_count):
        g_pool.spawn(request_config_mfc_cb, conf_clients[i], dev_list[i],
                     conf_q)
    g_pool.join()
    LOG.debug("Finished collecting Config from MFC")

    for i in xrange(mfc_count):
        conf_clients[i].close()
    """Parse and store the config.

    mfc_uuid is a global hashmap(redis Dict) with ip as key and UUID as value
    parse_config_and_sync will update the sync_dev_list, mfc_uuid for each XML response.
    """
    LOG.debug("Parsing config request output and building the UUID hash.")
    q_len = r.llen(conf_q)
    g_pool = gevent.pool.Pool(size=q_len)
    for _ in xrange(q_len):
        data = r.blpop(conf_q)
        g_pool.spawn(parse_config_and_sync, data, sync_flag)
    g_pool.join()
    """Return list of MFCs which was able to communicate."""
    sync_list = List(key=config.get('constants', 'REDIS_SYNC_DEV_LIST_KEY'),
                     redis=r)
    return list(sync_list)
Esempio n. 15
0
    def test_eq_le_lt_issubset(self):
        for init in (
                self.create_set,
                set,
        ):
            s_1 = init([1, 2])
            s_2 = init([1, 2, 3, 4])
            s_3 = {1, 2, 3, 4}
            s_4 = {1, 2}
            s_5 = [1, 2, 3, 4]
            s_6 = self.create_set([1, 2, 3, 4])
            s_7 = List([1, 2, 3, 4], redis=self.redis)

            self.assertTrue(s_1.issubset(s_2))
            self.assertFalse(s_1 == s_2)
            self.assertTrue(s_1 <= s_2)
            self.assertTrue(s_1 < s_2)

            self.assertTrue(s_1.issubset(s_3))
            self.assertFalse(s_1 == s_3)
            self.assertTrue(s_1 <= s_3)
            self.assertTrue(s_1 < s_3)

            self.assertTrue(s_1.issubset(s_4))
            self.assertTrue(s_1 == s_4)
            self.assertTrue(s_1 <= s_4)
            self.assertFalse(s_1 < s_4)

            self.assertTrue(s_1.issubset(s_5))
            if PYTHON_VERSION >= (3, 4):
                self.assertRaises(TypeError, lambda: s_1 <= s_5)

            self.assertRaises(TypeError, s_1.issubset, None)

            self.assertNotEqual(s_1, s_6)
            self.assertTrue(s_2 == s_6)
            self.assertTrue(s_6 == s_6)

            self.assertTrue(s_2.issubset(s_7))
Esempio n. 16
0
def process_cluster_stats():
    def multi_dict_counter(level):
        if level < 1:
            return Counter()
        return defaultdict(lambda: multi_dict_counter(level - 1))

    """ Creating a 2D dictionary to hold the cluster wide counter

    counters from across MFCs will be aggregated based on the sample ID.
    cluster[<Sample ID>][<Counter Name>] = Counter(<Dict of counter values>)

    cluster['cumulative'][<Counter Name>] will be used to keep track of the cumulative of last sample
    Delta will be calculated using above counter.
    """
    cluster = multi_dict_counter(2)  # 2 Level dictionary of Counter

    tick = lambda x: time.time() - x
    item_cnt = 0
    cur_sample = None
    #mfc_hash = Dict(key=config.get('constants', 'REDIS_MFC_UUID_HASH_KEY'), redis=r)
    sync_list = List(key=config.get('constants', 'REDIS_SYNC_DEV_LIST_KEY'),
                     redis=r)
    cluster_sample_timeout = config.get('constants', 'CLUSTER_SAMPLE_TIMEOUT')
    store_q = config.get('constants', 'REDIS_CLUSTER_STORE_QUEUE_KEY')
    req_interval = int(config.get('collector', 'MFC_REQUEST_FREQUENCY'))
    sample_q = []

    while True:
        data = r.blpop(config.get('constants', 'REDIS_PARSER_QUEUE_KEY'))
        counters = json.loads(data[1])

        #Check if data exist for the parsed response. Agentd response can be empty
        if counters['data'] is not None:
            """Process each MFC counter."""
            process_mfc_counters.apply_async(args=[counters],
                                             queue='process',
                                             routing_key='process.stat')
            """Process Cluster wide cumulative data for same sample ID."""
            item_cnt += 1

            # Requests
            cluster[counters['sample_id']]['requests'].update(
                counters['data']['glbl']['requests'])

            #Cumulative Bytes
            cluster[counters['sample_id']]['bytes'].update(
                counters['data']['glbl']['bytes'])

            #Timestamp
            cluster[counters['sample_id']]['timestamp'] = counters['data'][
                'timestamp']

            try:
                cluster[counters['sample_id']]['ip_list'].append(
                    counters['ip'])  # Preserve the IP
            except AttributeError:
                cluster[counters['sample_id']]['ip_list'] = list()
                cluster[counters['sample_id']]['ip_list'].append(
                    counters['ip'])

            if cur_sample is not None and cur_sample != counters['sample_id']:
                # new sample has arrived
                if item_cnt > len(sync_list) or tick(
                        init_sample_ts) >= cluster_sample_timeout:
                    # 1st case: record from all the Sync'd MFCs received. Store and remove the sample from cluster DS.
                    # or 2nd case: some data still left to be received but hit sample time out.

                    #Calculate cumulative Delta.
                    cluster[cur_sample]['cur_thrpt'] = cluster[cur_sample][
                        'bytes'] - cluster['cumulative']['bytes']
                    cluster[cur_sample]['cur_thrpt']['total'] = sum(
                        cluster[cur_sample]['cur_thrpt'].values())
                    cluster[cur_sample]['cur_thrpt']['cache'] = cluster[cur_sample]['cur_thrpt']['ram'] + \
                                                                cluster[cur_sample]['cur_thrpt']['disk']

                    #Preserve the cumulative for next sample set
                    cluster['cumulative']['bytes'] = cluster[cur_sample][
                        'bytes']

                    #Push to store the data
                    r.rpush(store_q, (cur_sample, dict(cluster[cur_sample])))

                    del cluster[cur_sample]
                    item_cnt = 1
                    cur_sample = sample_q.pop(0) if (
                        len(sample_q) > 0) else counters['sample_id']
                    init_sample_ts = time.time()
                else:
                    LOG.info(
                        "Got new sample ID: %s. Need to wait for current sample(%s) to arrive until pushed out"
                        % (counters['sample_id'], cur_sample))
                    LOG.info("Adding sample ID to the waiting list.")
                    if counters['sample_id'] not in sample_q:
                        sample_q.append(counters['sample_id'])

            if cur_sample is None:
                cur_sample = counters['sample_id']
                init_sample_ts = time.time()
        else:
            LOG.critical(
                "Device: %s, %s IP: %s" %
                (counters['device_id'], counters['name'], counters['ip']))
            LOG.critical(
                "MFC response doesn't have any counter data. skipping sample: %s"
                % (counters['sample_id']))
Esempio n. 17
0
    # program.  Many of the data structures Redis can store are
    # nearly identical to standard Python built in data types such
    # as lists and dictionaries.
    # We are using the RedisList object from the redis_collections
    # module to access Redis.  This provides us with a python
    # object that works almost exactly like a normal Python list,
    # except the information is stored in Redis.
    # This means the information in the list can persist between
    # program runs or be stored to mulitple Redis servers using
    # Redis repliataion.
    # Unlike SQL databases, the default Redis configuration will
    # do a single disk write operation every 15 minutes instead
    # every time data is written.  This significantly decreases
    # wear on flash/sd-card.
    # We are going to create two lists in Redis to store our temps
    # one for celsius and one for fahranheit.
    redis_connection = redislite.StrictRedis(redisrdb)
    temp_c_list = RedisList(redis=redis_connection, key='temp_c')
    temp_f_list = RedisList(redis=redis_connection, key='temp_f')

    del temp_c_list[:]
    del temp_f_list[:]

    # Now loop through
    while True:
        temp_c_list.append(read_temp_c())
        temp_f_list.append(read_temp_f())
        print('Temp F:', read_temp_f(), 'Hourly Average Temp F:',
              sum(temp_f_list[-3600:]) / len(temp_f_list[-3600:]))
        time.sleep(frequency)
Esempio n. 18
0
    parser.add_argument('-t', '--host', help='Remote host address')
    parser.add_argument('-p', '--port', help='Remote port')

    args = parser.parse_args()

    if args.host:
        host = args.host

    if args.port:
        port = args.port

    slave = '{} {}'.format(host, port)
    print slave
    #rc = redislite.StrictRedis(serverconfig={'slaveof': slave})
    rc = redislite.StrictRedis(host=host, port=port)
    lst = List(redis=rc, key='speed')

    print "Keys in redis: ", rc.keys()
    print "No of items in redis['speed']: ", len(lst)
    print "Weekly speed\n", processor.average_speed_weekly(lst)

    settings = Dict(redis=rc, key='settings')
    if settings.get('last_test'):
        print 'Last test: ', settings['last_test']
    else:
        print 'No last run'

    settings['last_test'] = datetime.now()

    print "Last hour aggr: ", settings.get('last_hour') if settings.get(
        'last_hour') else 'not yet'
Esempio n. 19
0
 def __init__(self, database=None, name=None, data=None):
     super().__init__(database, name)
     self.data = List(key=name, data=data, redis=self.database.redis)
Esempio n. 20
0
from geventhttpclient import HTTPClient, URL

#redis.connection.socket = gevent.socket
r = redis.Redis(host=config.get('redis', 'db_host'),
                port=config.get('redis', 'db_port'),
                db=config.get('redis', 'db'))
r_keys = {
    'dev_list': config.get('constants', 'REDIS_DEV_LIST_KEY'),
    'sync_dev_list': config.get('constants', 'REDIS_SYNC_DEV_LIST_KEY'),
    'mfc_uuid': config.get('constants', 'REDIS_MFC_UUID_HASH_KEY'),
    'cur_thrpt': config.get('constants', 'REDIS_MFC_CUR_THRPT_KEY'),
    'unsync_dev_list': config.get('constants', 'REDIS_UNSYNC_DEV_LIST_KEY'),
    'new_found_dev_list': config.get('constants',
                                     'REDIS_NEW_FOUND_DEV_LIST_KEY')
}
mfa_dev_list = List(key=r_keys['dev_list'], redis=r)
sync_dev_list = List(key=r_keys['sync_dev_list'], redis=r)
mfc_uuid = Dict(key=r_keys['mfc_uuid'], redis=r)
"""Cluster Request Tasks"""


@celery.task(rate_limit=2)
def request_config_mfc(ip, data=None):
    if data is None:
        data = """<mfc-request><header><type>GET</type></header>
        <data>running-config mfc-cluster mfc</data></mfc-request>"""
    mfc_con = MfcHttpConnection(ip)
    resp = mfc_con.send_request(data)
    redis.rpush(config.get('constants', 'REDIS_CONFIG_XML_QUEUE_KEY'),
                [ip, resp])
    return resp
    load_w1_modules()
    
    # Redis is key/value store.  It provides a way to store data 
    # structures to a service that is seperate from the running 
    # program.  Many of the data structures Redis can store are
    # nearly identical to standard Python built in data types such
    # as lists and dictionaries.  
    # We are using the RedisList object from the redis_collections 
    # module to access Redis.  This provides us with a python 
    # object that works almost exactly like a normal Python list, 
    # except the information is stored in Redis.
    # This means the information in the list can persist between
    # program runs or be stored to mulitple Redis servers using
    # Redis repliataion.
    # Unlike SQL databases, the default Redis configuration will
    # do a single disk write operation every 15 minutes instead
    # every time data is written.  This significantly decreases
    # wear on flash/sd-card.
    # We are going to create two lists in Redis to store our temps
    # one for celsius and one for fahranheit.
    redis_connection = redislite.StrictRedis(redisrdb)
    temp_c_list = RedisList(redis=redis_connection, key='temp_c')
    temp_f_list = RedisList(redis=redis_connection, key='temp_f')

    # Now loop through 
    while True:
        temp_c_list.append(read_temp_c())
        temp_f_list.append(read_temp_f())
        print('Temp F:', read_temp_f(), 'Hourly Average Temp F:', sum(temp_f_list[-3600:])/len(temp_f_list[-3600:]))
        time.sleep(frequency)
Esempio n. 22
0
 def create_list(self, *args, **kwargs):
     kwargs['redis'] = self.redis
     return List(*args, **kwargs)
Esempio n. 23
0
def request_cluster_stats(sync_mfcs, interval=20):
    req_uri = '/admin/agentd_comm'
    xml_q = config.get('constants', 'REDIS_XML_QUEUE_KEY')
    new_dev_list_key = config.get('constants', 'REDIS_NEW_FOUND_DEV_LIST_KEY')
    new_sync_dev_list_key = config.get('constants',
                                       'REDIS_NEW_SYNC_DEV_LIST_KEY')
    sync_mfcs_key = config.get('constants', 'REDIS_SYNC_DEV_LIST_KEY')
    signal.signal(signal.SIGQUIT, gevent.kill)
    stat_clients = []
    """Request to synced MFCs

    will get the IP list from mfc_uuid
    """
    sync_mfcs_count = len(sync_mfcs)

    LOG.info("Synced MFCs: ")
    for device_id, name, ip in sync_mfcs:
        LOG.info("%s %s %s" % (device_id, name, ip))

    def create_stat_clients():
        LOG.info("Creating Stats request clients")
        for device_id, name, ip in sync_mfcs:
            url = URL('http://' + ip + ':8080' + req_uri)
            stat_clients.append(
                HTTPClient.from_url(url, concurrency=1, headers_type=dict))

    def close_stat_clients():
        for c in xrange(sync_mfcs_count):
            stat_clients[c].close()

    create_stat_clients()
    g_req_pool = gevent.pool.Pool(size=sync_mfcs_count)
    LOG.info("Starting to request stats from MFC")
    while True:
        # Commented following : time based check hogs CPU cycles.
        '''
        if tick(t1) >= interval or initial:
            t1 = time.time()
            initial = False
        '''
        sample_id = str(uuid.uuid1())
        for i in xrange(sync_mfcs_count):
            g_req_pool.spawn(request_stats_mfc_cb, stat_clients[i],
                             sync_mfcs[i], sample_id, xml_q)
        g_req_pool.join(timeout=interval)
        gevent.sleep(interval)

        if r.exists(new_sync_dev_list_key):
            LOG.info(
                "New MFCs added to the Sync list- updating stat request clients"
            )
            close_stat_clients()
            stat_clients = []
            LOG.info("Newly Synced MFCs: ")
            new_sync_mfcs = list(List(key=new_sync_dev_list_key, redis=r))
            for device_id, name, ip in new_sync_mfcs:
                LOG.info("%s %s %s" % (device_id, name, ip))
            r.delete(new_dev_list_key)
            #Get the current synced list and extend with newly synced list
            sync_mfcs = List(key=sync_mfcs_key, redis=r)
            sync_mfcs.extend(new_sync_mfcs)
            sync_mfcs = list(sync_mfcs)
            sync_mfcs_count = len(sync_mfcs)
            create_stat_clients()

    close_stat_clients()