def process_mfc_counters(counters=None, data=None):
    decimal.getcontext().prec = 6

    if counters is None:
        data = r.blpop(config.get('constants', 'REDIS_PARSER_QUEUE_KEY'))
        counters = json.loads(data[1])

    if counters['data'] is None:
        LOG.critical("Device: %s, %s IP: %s" %
                     (counters['device_id'], counters['name'], counters['ip']))
        LOG.critical(
            "MFC response doesn't have any counter data. skipping sample: %s" %
            (counters['sample_id']))
    else:
        gl_bytes = Counter(counters['data']['glbl']['bytes'])
        # MFC CHR
        tot_bytes = sum(gl_bytes.values())
        tot_cache = counters['data']['glbl']['bytes']['ram'] + counters[
            'data']['glbl']['bytes']['disk']
        # Handle Zero condition. Cumulative sum could be 0
        if tot_bytes == 0:
            counters['data']['chr'] = 0
        else:
            counters['data']['chr'] = float(
                (decimal.Decimal(tot_cache) / decimal.Decimal(tot_bytes)) *
                100)

        #Calculate current throughput
        mfcs_cur_thrpt = Dict(key=config.get('constants',
                                             'REDIS_MFC_CUR_THRPT_KEY'),
                              redis=r)
        try:
            counters['data']['cur_thrpt'] = gl_bytes - mfcs_cur_thrpt[
                counters['device_id']]
            counters['data']['cur_thrpt']['total'] = sum(
                counters['data']['cur_thrpt'].values())
            counters['data']['cur_thrpt']['cache'] = counters['data']['cur_thrpt']['ram'] + \
                                                     counters['data']['cur_thrpt']['disk']
            mfcs_cur_thrpt[counters['device_id']] = gl_bytes
        except KeyError:
            LOG.debug("current throughput hashmap - Initial update for " +
                      str(counters['device_id']))
            counters['data']['cur_thrpt'] = mfcs_cur_thrpt[
                counters['device_id']] = gl_bytes
            counters['data']['cur_thrpt']['total'] = counters['data'][
                'cur_thrpt']['cache'] = 0

        r.rpush(config.get('constants', 'REDIS_MFC_STORE_QUEUE_KEY'),
                json.dumps(counters))

    return counters
def process_mfc_counters(counters=None, data=None):
    decimal.getcontext().prec = 6

    if counters is None:
        data = r.blpop(config.get('constants', 'REDIS_PARSER_QUEUE_KEY'))
        counters = json.loads(data[1])

    if counters['data'] is None:
        LOG.critical("Device: %s, %s IP: %s" % (counters['device_id'], counters['name'], counters['ip']))
        LOG.critical("MFC response doesn't have any counter data. skipping sample: %s" % (counters['sample_id']))
    else:
        gl_bytes = Counter(counters['data']['glbl']['bytes'])
        # MFC CHR
        tot_bytes = sum(gl_bytes.values())
        tot_cache = counters['data']['glbl']['bytes']['ram'] + counters['data']['glbl']['bytes']['disk']
        # Handle Zero condition. Cumulative sum could be 0
        if tot_bytes == 0:
            counters['data']['chr'] = 0
        else:
            counters['data']['chr'] = float((decimal.Decimal(tot_cache) / decimal.Decimal(tot_bytes)) * 100)

        #Calculate current throughput
        mfcs_cur_thrpt = Dict(key=config.get('constants', 'REDIS_MFC_CUR_THRPT_KEY'), redis=r)
        try:
            counters['data']['cur_thrpt'] = gl_bytes - mfcs_cur_thrpt[counters['device_id']]
            counters['data']['cur_thrpt']['total'] = sum(counters['data']['cur_thrpt'].values())
            counters['data']['cur_thrpt']['cache'] = counters['data']['cur_thrpt']['ram'] + \
                                                     counters['data']['cur_thrpt']['disk']
            mfcs_cur_thrpt[counters['device_id']] = gl_bytes
        except KeyError:
            LOG.debug("current throughput hashmap - Initial update for " + str(counters['device_id']))
            counters['data']['cur_thrpt'] = mfcs_cur_thrpt[counters['device_id']] = gl_bytes
            counters['data']['cur_thrpt']['total'] = counters['data']['cur_thrpt']['cache'] = 0

        r.rpush(config.get('constants', 'REDIS_MFC_STORE_QUEUE_KEY'), json.dumps(counters))

    return counters
 def send_request(self, data=None):
     self.build_req(data=data)
     if self.request is not None:
         try:
             LOG.info("sending request to MFC agentd - " + self.ip)
             req_open = urlopen(self.request)
         except HTTPError, e:
             LOG.critical("Error code: %s Error Message: %s"%(e.code, e.msg))
             raise e
         except URLError, e:
             if hasattr(e, 'code'):
                 LOG.critical('ERROR code: ', e.code)
             elif hasattr(e, 'reason') :
                 LOG.critical('URL ERROR: ' + str(e.reason))
             else:
                 LOG.debug("No HTTP errors.." + str(e))
             raise e
 def send_request(self, data=None):
     self.build_req(data=data)
     if self.request is not None:
         try:
             LOG.info("sending request to MFC agentd - " + self.ip)
             req_open = urlopen(self.request)
         except HTTPError, e:
             LOG.critical("Error code: %s Error Message: %s" %
                          (e.code, e.msg))
             raise e
         except URLError, e:
             if hasattr(e, 'code'):
                 LOG.critical('ERROR code: ', e.code)
             elif hasattr(e, 'reason'):
                 LOG.critical('URL ERROR: ' + str(e.reason))
             else:
                 LOG.debug("No HTTP errors.." + str(e))
             raise e
def process_cluster_stats():
    def multi_dict_counter(level):
        if level < 1:
            return Counter()
        return defaultdict(lambda: multi_dict_counter(level - 1))

    """ Creating a 2D dictionary to hold the cluster wide counter

    counters from across MFCs will be aggregated based on the sample ID.
    cluster[<Sample ID>][<Counter Name>] = Counter(<Dict of counter values>)

    cluster['cumulative'][<Counter Name>] will be used to keep track of the cumulative of last sample
    Delta will be calculated using above counter.
    """
    cluster = multi_dict_counter(2)  # 2 Level dictionary of Counter

    tick = lambda x: time.time() - x
    item_cnt = 0
    cur_sample = None
    #mfc_hash = Dict(key=config.get('constants', 'REDIS_MFC_UUID_HASH_KEY'), redis=r)
    sync_list = List(key=config.get('constants', 'REDIS_SYNC_DEV_LIST_KEY'),
                     redis=r)
    cluster_sample_timeout = config.get('constants', 'CLUSTER_SAMPLE_TIMEOUT')
    store_q = config.get('constants', 'REDIS_CLUSTER_STORE_QUEUE_KEY')
    req_interval = int(config.get('collector', 'MFC_REQUEST_FREQUENCY'))
    sample_q = []

    while True:
        data = r.blpop(config.get('constants', 'REDIS_PARSER_QUEUE_KEY'))
        counters = json.loads(data[1])

        #Check if data exist for the parsed response. Agentd response can be empty
        if counters['data'] is not None:
            """Process each MFC counter."""
            process_mfc_counters.apply_async(args=[counters],
                                             queue='process',
                                             routing_key='process.stat')
            """Process Cluster wide cumulative data for same sample ID."""
            item_cnt += 1

            # Requests
            cluster[counters['sample_id']]['requests'].update(
                counters['data']['glbl']['requests'])

            #Cumulative Bytes
            cluster[counters['sample_id']]['bytes'].update(
                counters['data']['glbl']['bytes'])

            #Timestamp
            cluster[counters['sample_id']]['timestamp'] = counters['data'][
                'timestamp']

            try:
                cluster[counters['sample_id']]['ip_list'].append(
                    counters['ip'])  # Preserve the IP
            except AttributeError:
                cluster[counters['sample_id']]['ip_list'] = list()
                cluster[counters['sample_id']]['ip_list'].append(
                    counters['ip'])

            if cur_sample is not None and cur_sample != counters['sample_id']:
                # new sample has arrived
                if item_cnt > len(sync_list) or tick(
                        init_sample_ts) >= cluster_sample_timeout:
                    # 1st case: record from all the Sync'd MFCs received. Store and remove the sample from cluster DS.
                    # or 2nd case: some data still left to be received but hit sample time out.

                    #Calculate cumulative Delta.
                    cluster[cur_sample]['cur_thrpt'] = cluster[cur_sample][
                        'bytes'] - cluster['cumulative']['bytes']
                    cluster[cur_sample]['cur_thrpt']['total'] = sum(
                        cluster[cur_sample]['cur_thrpt'].values())
                    cluster[cur_sample]['cur_thrpt']['cache'] = cluster[cur_sample]['cur_thrpt']['ram'] + \
                                                                cluster[cur_sample]['cur_thrpt']['disk']

                    #Preserve the cumulative for next sample set
                    cluster['cumulative']['bytes'] = cluster[cur_sample][
                        'bytes']

                    #Push to store the data
                    r.rpush(store_q, (cur_sample, dict(cluster[cur_sample])))

                    del cluster[cur_sample]
                    item_cnt = 1
                    cur_sample = sample_q.pop(0) if (
                        len(sample_q) > 0) else counters['sample_id']
                    init_sample_ts = time.time()
                else:
                    LOG.info(
                        "Got new sample ID: %s. Need to wait for current sample(%s) to arrive until pushed out"
                        % (counters['sample_id'], cur_sample))
                    LOG.info("Adding sample ID to the waiting list.")
                    if counters['sample_id'] not in sample_q:
                        sample_q.append(counters['sample_id'])

            if cur_sample is None:
                cur_sample = counters['sample_id']
                init_sample_ts = time.time()
        else:
            LOG.critical(
                "Device: %s, %s IP: %s" %
                (counters['device_id'], counters['name'], counters['ip']))
            LOG.critical(
                "MFC response doesn't have any counter data. skipping sample: %s"
                % (counters['sample_id']))
def process_cluster_stats():

    def multi_dict_counter(level):
        if level < 1:
            return Counter()
        return defaultdict(lambda: multi_dict_counter(level-1))

    """ Creating a 2D dictionary to hold the cluster wide counter

    counters from across MFCs will be aggregated based on the sample ID.
    cluster[<Sample ID>][<Counter Name>] = Counter(<Dict of counter values>)

    cluster['cumulative'][<Counter Name>] will be used to keep track of the cumulative of last sample
    Delta will be calculated using above counter.
    """
    cluster = multi_dict_counter(2)  # 2 Level dictionary of Counter

    tick = lambda x: time.time() - x
    item_cnt = 0
    cur_sample = None
    #mfc_hash = Dict(key=config.get('constants', 'REDIS_MFC_UUID_HASH_KEY'), redis=r)
    sync_list = List(key=config.get('constants', 'REDIS_SYNC_DEV_LIST_KEY'), redis=r)
    cluster_sample_timeout = config.get('constants', 'CLUSTER_SAMPLE_TIMEOUT')
    store_q = config.get('constants', 'REDIS_CLUSTER_STORE_QUEUE_KEY')
    req_interval = int(config.get('collector', 'MFC_REQUEST_FREQUENCY'))
    sample_q = []

    while True:
        data = r.blpop(config.get('constants', 'REDIS_PARSER_QUEUE_KEY'))
        counters = json.loads(data[1])

        #Check if data exist for the parsed response. Agentd response can be empty
        if counters['data'] is not None:
            """Process each MFC counter."""
            process_mfc_counters.apply_async(args=[counters], queue='process', routing_key='process.stat')

            """Process Cluster wide cumulative data for same sample ID."""
            item_cnt += 1

            # Requests
            cluster[counters['sample_id']]['requests'].update(counters['data']['glbl']['requests'])

            #Cumulative Bytes
            cluster[counters['sample_id']]['bytes'].update(counters['data']['glbl']['bytes'])

            #Timestamp
            cluster[counters['sample_id']]['timestamp'] = counters['data']['timestamp']

            try:
                cluster[counters['sample_id']]['ip_list'].append(counters['ip'])  # Preserve the IP
            except AttributeError:
                cluster[counters['sample_id']]['ip_list'] = list()
                cluster[counters['sample_id']]['ip_list'].append(counters['ip'])

            if cur_sample is not None and cur_sample != counters['sample_id']:
                # new sample has arrived
                if item_cnt > len(sync_list) or tick(init_sample_ts) >= cluster_sample_timeout:
                    # 1st case: record from all the Sync'd MFCs received. Store and remove the sample from cluster DS.
                    # or 2nd case: some data still left to be received but hit sample time out.

                    #Calculate cumulative Delta.
                    cluster[cur_sample]['cur_thrpt'] = cluster[cur_sample]['bytes'] - cluster['cumulative']['bytes']
                    cluster[cur_sample]['cur_thrpt']['total'] = sum(cluster[cur_sample]['cur_thrpt'].values())
                    cluster[cur_sample]['cur_thrpt']['cache'] = cluster[cur_sample]['cur_thrpt']['ram'] + \
                                                                cluster[cur_sample]['cur_thrpt']['disk']

                    #Preserve the cumulative for next sample set
                    cluster['cumulative']['bytes'] = cluster[cur_sample]['bytes']

                    #Push to store the data
                    r.rpush(store_q, (cur_sample, dict(cluster[cur_sample])))

                    del cluster[cur_sample]
                    item_cnt = 1
                    cur_sample = sample_q.pop(0) if(len(sample_q) > 0) else counters['sample_id']
                    init_sample_ts = time.time()
                else:
                    LOG.info("Got new sample ID: %s. Need to wait for current sample(%s) to arrive until pushed out" %
                             (counters['sample_id'], cur_sample))
                    LOG.info("Adding sample ID to the waiting list.")
                    if counters['sample_id'] not in sample_q:
                        sample_q.append(counters['sample_id'])

            if cur_sample is None:
                cur_sample = counters['sample_id']
                init_sample_ts = time.time()
        else:
            LOG.critical("Device: %s, %s IP: %s" % (counters['device_id'], counters['name'], counters['ip']))
            LOG.critical("MFC response doesn't have any counter data. skipping sample: %s" % (counters['sample_id']))