Example #1
0
    def execute_process(self, key, **kwargs):

        logging.info('Commencing caching of fundraiser totals data at:  %s' %
                     self.CACHING_HOME)

        end_time = TP.timestamp_from_obj(datetime.datetime.utcnow(), 1, 3)
        """ DATA CONFIG """
        """ set the metrics to plot """
        lttdl = DL.LongTermTrendsLoader(db='db1025')

        start_of_2011_fundraiser = '20111116000000'
        countries = DL.CiviCRMLoader().get_ranked_donor_countries(
            start_of_2011_fundraiser)
        countries.append('Total')
        """ Dictionary object storing lists of regexes - each expression must pass for a label to persist """
        year_groups = dict()
        for country in countries:
            if cmp(country, 'Total') == 0:
                year_groups['2011 Total'] = ['2011.*']
                year_groups['2010 Total'] = ['2010.*']
            else:
                year_groups['2011 ' + country] = ['2011' + country]
                year_groups['2010 ' + country] = ['2010' + country]

        metrics = 'amount'
        weights = ''
        groups = year_groups
        group_metrics = ['year', 'country']

        metric_types = DL.LongTermTrendsLoader._MT_AMOUNT_

        include_totals = False
        include_others = False
        hours_back = 0
        time_unit = TP.DAY
        """ END CONFIG """
        """ For each metric use the LongTermTrendsLoader to generate the data to plot """

        dr = DR.DataReporting()

        times, counts = lttdl.run_fundrasing_totals(end_time, metric_name=metrics, metric_type=metric_types, groups=groups, group_metric=group_metrics, include_other=include_others, \
                                        include_total=include_totals, hours_back=hours_back, weight_name=weights, time_unit=time_unit)
        dict_param = dict()

        for country in countries:

            key_2011 = '2011 ' + country
            key_2010 = '2010 ' + country

            new_counts = dict()
            new_counts[key_2010] = counts[key_2010]
            new_counts[key_2011] = counts[key_2011]

            new_times = dict()
            new_times[key_2010] = times[key_2010]
            new_times[key_2011] = times[key_2011]

            dr._counts_ = new_counts
            dr._times_ = new_times

            empty_data = [0] * len(new_times[new_times.keys()[0]])
            data = list()
            data.append(dr.get_data_lists([''], empty_data))

            dict_param[country] = Hlp.combine_data_lists(data)

        self.clear_cached_data(key)
        self.cache_data(dict_param, key)

        logging.info('Caching complete.')
Example #2
0
    def execute_process(self, key, **kwargs):

        logging.info('Commencing caching of long term trends data at:  %s' %
                     self.CACHING_HOME)

        end_time, start_time = TP.timestamps_for_interval(datetime.datetime.utcnow(), 1, \
                                                          hours=-self.VIEW_DURATION_HRS, resolution=1)
        """ DATA CONFIG """

        countries = DL.CiviCRMLoader().get_ranked_donor_countries(start_time)
        countries = countries[1:6]
        """ set the metrics to plot """
        lttdl = DL.LongTermTrendsLoader(db='storage3')
        """ Dictionary object storing lists of regexes - each expression must pass for a label to persist """
        # country_groups = {'US': ['(US)'], 'CA': ['(CA)'], 'JP': ['(JP)'], 'IN': ['(IN)'], 'NL': ['(NL)']}
        payment_groups = {'Credit Card': ['^cc$'], 'Paypal': ['^pp$']}
        currency_groups = {
            'USD': ['(USD)'],
            'CAD': ['(CAD)'],
            'JPY': ['(JPY)'],
            'EUR': ['(EUR)']
        }
        lang_cntry_groups = {
            'US': ['US..', '.{4}'],
            'EN': ['[^U^S]en', '.{4}']
        }

        top_cntry_groups = dict()
        for country in countries:
            top_cntry_groups[country] = [country, '.{2}']

        # To include click rate
        # groups = [ lang_cntry_groups] metrics = ['click_rate'] metrics_index = [3]
        # group_metrics = [DL.LongTermTrendsLoader._MT_RATE_] metric_types = ['country', 'language'] include_totals = [True] include_others = [True]

        metrics = [
            'impressions', 'views', 'donations', 'donations', 'amount',
            'amount', 'diff_don', 'diff_don', 'donations', 'conversion_rate'
        ]
        weights = ['', '', '', '', '', '', 'donations', 'donations', '', '']
        metrics_index = [0, 1, 2, 2, 2, 4, 5, 5, 6, 6]
        groups = [lang_cntry_groups, lang_cntry_groups, lang_cntry_groups, top_cntry_groups, lang_cntry_groups, currency_groups, \
                  lang_cntry_groups, lang_cntry_groups, payment_groups, payment_groups]
        """  The metrics that are used to build a group string to be qualified via regex - the values of the list metrics are concatenated """
        group_metrics = [['country', 'language'], ['country', 'language'], ['country', 'language'], \
                         ['country', 'language'], ['country', 'language'], ['currency'], ['country', 'language'], \
                         ['country', 'language'], ['payment_method'], ['payment_method']]

        metric_types = [DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, \
                        DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, \
                        DL.LongTermTrendsLoader._MT_RATE_WEIGHTED_, DL.LongTermTrendsLoader._MT_RATE_WEIGHTED_, DL.LongTermTrendsLoader._MT_AMOUNT_, \
                        DL.LongTermTrendsLoader._MT_RATE_]

        include_totals = [
            True, True, True, False, True, True, False, False, False, True
        ]
        include_others = [
            True, True, True, False, True, True, True, True, True, False
        ]
        hours_back = [0, 0, 0, 0, 0, 0, 24, 168, 0, 0]
        time_unit = [
            TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR,
            TP.HOUR, TP.HOUR, TP.HOUR
        ]

        data = list()
        """ END CONFIG """
        """ For each metric use the LongTermTrendsLoader to generate the data to plot """
        for index in range(len(metrics)):

            dr = DR.DataReporting()

            times, counts = lttdl.run_query(start_time, end_time, metrics_index[index], metric_name=metrics[index], metric_type=metric_types[index], \
                                            groups=groups[index], group_metric=group_metrics[index], include_other=include_others[index], \
                                            include_total=include_totals[index], hours_back=hours_back[index], weight_name=weights[index], \
                                            time_unit=time_unit[index])

            times = TP.normalize_timestamps(times, False, time_unit[index])

            dr._counts_ = counts
            dr._times_ = times

            empty_data = [0] * len(times[times.keys()[0]])
            data.append(dr.get_data_lists([''], empty_data))

        dict_param = Hlp.combine_data_lists(data)
        dict_param['interval'] = self.VIEW_DURATION_HRS
        dict_param['end_time'] = TP.timestamp_convert_format(end_time, 1, 2)

        self.clear_cached_data(key)
        self.cache_data(dict_param, key)

        logging.info('Caching complete.')
Example #3
0
def index(request):
    """ 
        PROCESS POST DATA
        ================= 
        
        Escape all user input that can be entered in text fields 
        
    """
    try:
        campaign_regexp_filter = MySQLdb._mysql.escape_string(
            request.POST['campaign_regexp_filter'])

        if cmp(campaign_regexp_filter, '') == 0:
            campaign_regexp_filter = '^C_|^C11_'
    except:
        campaign_regexp_filter = '^C_|^C11_'

    try:
        min_donation = MySQLdb._mysql.escape_string(
            request.POST['min_donation'].strip())
        min_donation = int(min_donation)

    except:
        min_donation = 0

    # Filter on ISO codes to include matched countries
    try:
        iso_filter = MySQLdb._mysql.escape_string(
            request.POST['iso_filter'].strip())

    except:
        iso_filter = '.{2}'
    """
        Call up cached results
    """

    cache = DC.LiveResults_DataCaching()
    dict_param = cache.retrieve_cached_data(view_keys.LIVE_RESULTS_DICT_KEY)

    measured_metrics_counts = dict_param['measured_metrics_counts']
    results = dict_param['results']
    column_names = dict_param['column_names']
    sampling_interval = dict_param['interval']
    duration_hrs = dict_param['duration']

    start_time = dict_param['start_time']
    end_time = dict_param['end_time']

    ir_cmpgn = DR.IntervalReporting(query_type=FDH._QTYPE_CAMPAIGN_ +
                                    FDH._QTYPE_TIME_,
                                    generate_plot=False)
    ir_banner = DR.IntervalReporting(query_type=FDH._QTYPE_BANNER_ +
                                     FDH._QTYPE_TIME_,
                                     generate_plot=False)
    ir_lp = DR.IntervalReporting(query_type=FDH._QTYPE_LP_ + FDH._QTYPE_TIME_,
                                 generate_plot=False)

    ir_cmpgn._counts_ = dict_param['ir_cmpgn_counts']
    ir_banner._counts_ = dict_param['ir_banner_counts']
    ir_lp._counts_ = dict_param['ir_lp_counts']

    ir_cmpgn._times_ = dict_param['ir_cmpgn_times']
    ir_banner._times_ = dict_param['ir_banner_times']
    ir_lp._times_ = dict_param['ir_lp_times']

    metric_legend_table = dict_param['metric_legend_table']
    conf_legend_table = dict_param['conf_legend_table']
    """ Filtering -- donations and artifacts """

    country_index = column_names.index('country')
    donations_index = column_names.index('donations')
    campaign_index = column_names.index('utm_campaign')
    new_results = list()

    # minimum d
    for row in results:
        try:
            if row[donations_index] > min_donation and re.search(
                    campaign_regexp_filter, row[campaign_index]) and re.search(
                        iso_filter, row[country_index]):
                new_results.append(list(row))
        except:
            logging.error(
                'live_results/views.py -- Could not process row: %s' %
                str(row))

    results = new_results

    new_measured_metrics_counts = dict()
    for metric in measured_metrics_counts:
        new_measured_metrics_counts[metric] = dict()

        for artifact_key in measured_metrics_counts[metric]:
            if re.search(campaign_regexp_filter, artifact_key):
                new_measured_metrics_counts[metric][
                    artifact_key] = measured_metrics_counts[metric][
                        artifact_key]
    """ 
        Format results to encode html table cell markup in results        
    """

    ret = DR.ConfidenceReporting(
        query_type='', hyp_test='').get_confidence_on_time_range(
            None,
            None,
            None,
            measured_metrics_counts=new_measured_metrics_counts
        )  # first get color codes on confidence
    conf_colour_code = ret[0]

    for row_index in range(len(results)):
        artifact_index = results[row_index][0] + '-' + results[row_index][
            1] + '-' + results[row_index][2]

        for col_index in range(len(column_names)):

            is_coloured_cell = False
            if column_names[col_index] in conf_colour_code.keys():
                if artifact_index in conf_colour_code[
                        column_names[col_index]].keys():
                    results[row_index][
                        col_index] = '<td style="background-color:' + conf_colour_code[
                            column_names[col_index]][
                                artifact_index] + ';">' + str(
                                    results[row_index][col_index]) + '</td>'
                    is_coloured_cell = True

            if not (is_coloured_cell):
                results[row_index][col_index] = '<td>' + str(
                    results[row_index][col_index]) + '</td>'

    if results:
        summary_table = DR.DataReporting()._write_html_table(
            results,
            column_names,
            use_standard_metric_names=True,
            omit_cell_markup=True)
    else:
        summary_table = '<p><font size="4">No data available.</font></p>'

    summary_table = '<h4><u>Metrics Legend:</u></h4><div class="spacer"></div>' + metric_legend_table + \
    '<div class="spacer"></div><h4><u>Confidence Legend for Hypothesis Testing:</u></h4><div class="spacer"></div>' + conf_legend_table + '<div class="spacer"></div><div class="spacer"></div>' + summary_table
    """ 
        Prepare Live Plots
    """
    """ compose a list of zero data """
    empty_data = [[1.0, 0.0]] * (duration_hrs * 60 / sampling_interval + 1)
    for i in range(len(empty_data)):
        empty_data[i][0] = empty_data[i][0] * i * sampling_interval
    """ Extract data from interval reporting objects """
    cmpgn_data_dict = ir_cmpgn.get_data_lists(
        ['C_', 'C11_', campaign_regexp_filter], empty_data)
    cmpgn_banner_dict = ir_banner.get_data_lists(['B_', 'B11_'], empty_data)
    cmpgn_lp_dict = ir_lp.get_data_lists(['L11_', '^cc'], empty_data)
    """  
        Build template parameters
    """

    template_dict = Hlp.combine_data_lists(
        [cmpgn_data_dict, cmpgn_banner_dict,
         cmpgn_lp_dict])  # combine the separate data sets
    template_dict['summary_table'] = summary_table
    template_dict['latest_log_end_time'] = end_time
    template_dict['start_time'] = start_time

    return render_to_response('live_results/index.html',
                              template_dict,
                              context_instance=RequestContext(request))
Example #4
0
    def execute_process(self, key, **kwargs):
        
        logging.info('Commencing caching of fundraiser totals data at:  %s' % self.CACHING_HOME)        
                
        end_time = TP.timestamp_from_obj(datetime.datetime.utcnow(), 1, 3)
        
        """ DATA CONFIG """
        
        """ set the metrics to plot """
        lttdl = DL.LongTermTrendsLoader(db='db1025')
        
        start_of_2011_fundraiser = '20111116000000'
        countries = DL.CiviCRMLoader().get_ranked_donor_countries(start_of_2011_fundraiser)
        countries.append('Total')
        
        """ Dictionary object storing lists of regexes - each expression must pass for a label to persist """
        year_groups = dict()
        for country in countries:
            if cmp(country, 'Total') == 0:
                year_groups['2011 Total'] = ['2011.*']
                year_groups['2010 Total'] = ['2010.*']
            else:                
                year_groups['2011 ' + country] = ['2011' + country]
                year_groups['2010 ' + country] = ['2010' + country]

        metrics = 'amount'
        weights = ''
        groups = year_groups
        group_metrics = ['year', 'country']
        
        metric_types = DL.LongTermTrendsLoader._MT_AMOUNT_
        
        include_totals = False
        include_others = False
        hours_back = 0
        time_unit = TP.DAY
                        
        """ END CONFIG """
        
        
        """ For each metric use the LongTermTrendsLoader to generate the data to plot """
            
        dr = DR.DataReporting()
        
        times, counts = lttdl.run_fundrasing_totals(end_time, metric_name=metrics, metric_type=metric_types, groups=groups, group_metric=group_metrics, include_other=include_others, \
                                        include_total=include_totals, hours_back=hours_back, weight_name=weights, time_unit=time_unit)
        dict_param = dict()
        
        for country in countries:
            
            key_2011 = '2011 ' +  country
            key_2010 = '2010 ' +  country
            
            new_counts = dict()
            new_counts[key_2010] = counts[key_2010]
            new_counts[key_2011] = counts[key_2011]
            
            new_times = dict()
            new_times[key_2010] = times[key_2010]
            new_times[key_2011] = times[key_2011]
            
            dr._counts_ = new_counts
            dr._times_ = new_times

            empty_data = [0] * len(new_times[new_times.keys()[0]])
            data = list()
            data.append(dr.get_data_lists([''], empty_data))
            
            dict_param[country] = Hlp.combine_data_lists(data)
        
        self.clear_cached_data(key)
        self.cache_data(dict_param, key)
        
        logging.info('Caching complete.')
Example #5
0
 def execute_process(self, key, **kwargs):
     
     logging.info('Commencing caching of long term trends data at:  %s' % self.CACHING_HOME)
     
     end_time, start_time = TP.timestamps_for_interval(datetime.datetime.utcnow(), 1, \
                                                       hours=-self.VIEW_DURATION_HRS, resolution=1)
     
     """ DATA CONFIG """
     
     countries = DL.CiviCRMLoader().get_ranked_donor_countries(start_time)
     countries = countries[1:6]
     
     """ set the metrics to plot """
     lttdl = DL.LongTermTrendsLoader(db='storage3')
             
     """ Dictionary object storing lists of regexes - each expression must pass for a label to persist """
     # country_groups = {'US': ['(US)'], 'CA': ['(CA)'], 'JP': ['(JP)'], 'IN': ['(IN)'], 'NL': ['(NL)']}
     payment_groups = {'Credit Card' : ['^cc$'], 'Paypal': ['^pp$']}
     currency_groups = {'USD' : ['(USD)'], 'CAD': ['(CAD)'], 'JPY': ['(JPY)'], 'EUR': ['(EUR)']}
     lang_cntry_groups = {'US': ['US..', '.{4}'], 'EN' : ['[^U^S]en', '.{4}']}
     
     top_cntry_groups = dict()
     for country in countries:
         top_cntry_groups[country] = [country, '.{2}']
     
     # To include click rate
     # groups = [ lang_cntry_groups] metrics = ['click_rate'] metrics_index = [3]
     # group_metrics = [DL.LongTermTrendsLoader._MT_RATE_] metric_types = ['country', 'language'] include_totals = [True] include_others = [True]
     
     metrics = ['impressions', 'views', 'donations', 'donations', 'amount', 'amount', 'diff_don', 'diff_don', 'donations', 'conversion_rate']
     weights = ['', '', '', '', '', '', 'donations', 'donations', '', '']
     metrics_index = [0, 1, 2, 2, 2, 4, 5, 5, 6, 6]
     groups = [lang_cntry_groups, lang_cntry_groups, lang_cntry_groups, top_cntry_groups, lang_cntry_groups, currency_groups, \
               lang_cntry_groups, lang_cntry_groups, payment_groups, payment_groups]
     
     """  The metrics that are used to build a group string to be qualified via regex - the values of the list metrics are concatenated """ 
     group_metrics = [['country', 'language'], ['country', 'language'], ['country', 'language'], \
                      ['country', 'language'], ['country', 'language'], ['currency'], ['country', 'language'], \
                      ['country', 'language'], ['payment_method'], ['payment_method']]
     
     metric_types = [DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, \
                     DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, \
                     DL.LongTermTrendsLoader._MT_RATE_WEIGHTED_, DL.LongTermTrendsLoader._MT_RATE_WEIGHTED_, DL.LongTermTrendsLoader._MT_AMOUNT_, \
                     DL.LongTermTrendsLoader._MT_RATE_]
     
     include_totals = [True, True, True, False, True, True, False, False, False, True]
     include_others = [True, True, True, False, True, True, True, True, True, False]
     hours_back = [0, 0, 0, 0, 0, 0, 24, 168, 0, 0]
     time_unit = [TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR]
     
     data = list()
     
     """ END CONFIG """
     
     
     """ For each metric use the LongTermTrendsLoader to generate the data to plot """
     for index in range(len(metrics)):
         
         dr = DR.DataReporting()
         
         times, counts = lttdl.run_query(start_time, end_time, metrics_index[index], metric_name=metrics[index], metric_type=metric_types[index], \
                                         groups=groups[index], group_metric=group_metrics[index], include_other=include_others[index], \
                                         include_total=include_totals[index], hours_back=hours_back[index], weight_name=weights[index], \
                                         time_unit=time_unit[index])
         
         times = TP.normalize_timestamps(times, False, time_unit[index])
         
         dr._counts_ = counts
         dr._times_ = times
   
         empty_data = [0] * len(times[times.keys()[0]])
         data.append(dr.get_data_lists([''], empty_data))
         
     dict_param = Hlp.combine_data_lists(data)
     dict_param['interval'] = self.VIEW_DURATION_HRS    
     dict_param['end_time'] = TP.timestamp_convert_format(end_time,1,2)
     
     self.clear_cached_data(key)
     self.cache_data(dict_param, key)
     
     logging.info('Caching complete.')
Example #6
0
def index(request):
    
    
    """ 
        PROCESS POST DATA
        ================= 
        
        Escape all user input that can be entered in text fields 
        
    """
    try:
        campaign_regexp_filter = MySQLdb._mysql.escape_string(request.POST['campaign_regexp_filter'])
        
        if cmp(campaign_regexp_filter, '') == 0:
            campaign_regexp_filter = '^C_|^C11_'
    except:
        campaign_regexp_filter = '^C_|^C11_'

    try:
        min_donation = MySQLdb._mysql.escape_string(request.POST['min_donation'].strip())
        min_donation = int(min_donation)
    
    except:
        min_donation = 0
    
    # Filter on ISO codes to include matched countries
    try:
        iso_filter = MySQLdb._mysql.escape_string(request.POST['iso_filter'].strip())        
    
    except:
        iso_filter = '.{2}'
        
    
    """
        Call up cached results
    """     
    
    cache = DC.LiveResults_DataCaching()
    dict_param = cache.retrieve_cached_data(view_keys.LIVE_RESULTS_DICT_KEY)
    
    measured_metrics_counts = dict_param['measured_metrics_counts']
    results = dict_param['results']
    column_names = dict_param['column_names']
    sampling_interval = dict_param['interval']    
    duration_hrs = dict_param['duration']
    
    start_time = dict_param['start_time']
    end_time = dict_param['end_time']
        
    ir_cmpgn = DR.IntervalReporting(query_type=FDH._QTYPE_CAMPAIGN_ + FDH._QTYPE_TIME_, generate_plot=False)
    ir_banner = DR.IntervalReporting(query_type=FDH._QTYPE_BANNER_ + FDH._QTYPE_TIME_, generate_plot=False)
    ir_lp = DR.IntervalReporting(query_type=FDH._QTYPE_LP_ + FDH._QTYPE_TIME_, generate_plot=False)
    
    ir_cmpgn._counts_ = dict_param['ir_cmpgn_counts']
    ir_banner._counts_ = dict_param['ir_banner_counts']
    ir_lp._counts_ = dict_param['ir_lp_counts']
    
    ir_cmpgn._times_ = dict_param['ir_cmpgn_times']
    ir_banner._times_ = dict_param['ir_banner_times']
    ir_lp._times_ = dict_param['ir_lp_times']

    metric_legend_table = dict_param['metric_legend_table']
    conf_legend_table = dict_param['conf_legend_table']

    
    """ Filtering -- donations and artifacts """
    
    country_index = column_names.index('country')
    donations_index = column_names.index('donations')
    campaign_index = column_names.index('utm_campaign')
    new_results = list()
    
    # minimum d
    for row in results:
        try:
            if row[donations_index] > min_donation and re.search(campaign_regexp_filter, row[campaign_index]) and re.search(iso_filter, row[country_index]):
                new_results.append(list(row))
        except:
            logging.error('live_results/views.py -- Could not process row: %s' % str(row))

    results = new_results
    
    new_measured_metrics_counts = dict()
    for metric in measured_metrics_counts:        
        new_measured_metrics_counts[metric] = dict()
        
        for artifact_key in measured_metrics_counts[metric]:
            if re.search(campaign_regexp_filter, artifact_key):
                new_measured_metrics_counts[metric][artifact_key] = measured_metrics_counts[metric][artifact_key]
         
    """ 
        Format results to encode html table cell markup in results        
    """

    ret = DR.ConfidenceReporting(query_type='', hyp_test='').get_confidence_on_time_range(None, None, None, measured_metrics_counts=new_measured_metrics_counts) # first get color codes on confidence
    conf_colour_code = ret[0]
    
    for row_index in range(len(results)):
        artifact_index = results[row_index][0] + '-' + results[row_index][1] + '-' + results[row_index][2]
        
        for col_index in range(len(column_names)):
            
            is_coloured_cell = False
            if column_names[col_index] in conf_colour_code.keys():
                if artifact_index in conf_colour_code[column_names[col_index]].keys():
                    results[row_index][col_index] = '<td style="background-color:' + conf_colour_code[column_names[col_index]][artifact_index] + ';">' + str(results[row_index][col_index]) + '</td>'
                    is_coloured_cell = True
                    
            if not(is_coloured_cell):
                results[row_index][col_index] = '<td>' + str(results[row_index][col_index]) + '</td>'
                
    if results:
        summary_table = DR.DataReporting()._write_html_table(results, column_names, use_standard_metric_names=True, omit_cell_markup=True)
    else:
        summary_table = '<p><font size="4">No data available.</font></p>'
        
    summary_table = '<h4><u>Metrics Legend:</u></h4><div class="spacer"></div>' + metric_legend_table + \
    '<div class="spacer"></div><h4><u>Confidence Legend for Hypothesis Testing:</u></h4><div class="spacer"></div>' + conf_legend_table + '<div class="spacer"></div><div class="spacer"></div>' + summary_table


        
    """ 
        Prepare Live Plots
    """
    
    """ compose a list of zero data """    
    empty_data = [[1.0, 0.0]] * (duration_hrs * 60 / sampling_interval + 1)
    for i in range(len(empty_data)):
        empty_data[i][0] = empty_data[i][0] * i *  sampling_interval
        
    """ Extract data from interval reporting objects """        
    cmpgn_data_dict = ir_cmpgn.get_data_lists(['C_', 'C11_', campaign_regexp_filter], empty_data)
    cmpgn_banner_dict = ir_banner.get_data_lists(['B_', 'B11_'], empty_data)
    cmpgn_lp_dict = ir_lp.get_data_lists(['L11_', '^cc'], empty_data)
        
        
    """  
        Build template parameters
    """
    
    template_dict = Hlp.combine_data_lists([cmpgn_data_dict, cmpgn_banner_dict, cmpgn_lp_dict]) # combine the separate data sets
    template_dict['summary_table'] = summary_table
    template_dict['latest_log_end_time'] = end_time
    template_dict['start_time'] = start_time
    
    return render_to_response('live_results/index.html', template_dict, context_instance=RequestContext(request))