Example #1
0
def process_filter_data(request):

    err_msg = ""

    time_curr = datetime.datetime.utcnow()
    time_dayback = time_curr + datetime.timedelta(hours=-4)

    _beginning_time_ = TP.timestamp_from_obj(time_dayback, 1, 3)
    _end_time_ = TP.timestamp_from_obj(time_curr, 1, 3)

    """ 
        PROCESS POST VARS 
        =================
    """

    try:

        latest_utc_ts_var = MySQLdb._mysql.escape_string(request.POST["latest_utc_ts"].strip())

        if not (TP.is_timestamp(latest_utc_ts_var, 1)) and not (TP.is_timestamp(latest_utc_ts_var, 2)):
            raise TypeError

        if latest_utc_ts_var == "":
            latest_utc_ts_var = _end_time_

        ts_format = TP.getTimestampFormat(latest_utc_ts_var)
        if ts_format == TP.TS_FORMAT_FORMAT1:
            latest_utc_ts_var = TP.timestamp_convert_format(latest_utc_ts_var, TP.TS_FORMAT_FORMAT1, TP.TS_FORMAT_FLAT)

    except KeyError:
        latest_utc_ts_var = _end_time_

    except TypeError:
        err_msg = "Please enter a valid end-timestamp."
        latest_utc_ts_var = _end_time_

    try:

        earliest_utc_ts_var = MySQLdb._mysql.escape_string(request.POST["earliest_utc_ts"].strip())

        if not (TP.is_timestamp(earliest_utc_ts_var, 1)) and not (TP.is_timestamp(earliest_utc_ts_var, 2)):
            raise TypeError

        if earliest_utc_ts_var == "":
            earliest_utc_ts_var = _beginning_time_

        ts_format = TP.getTimestampFormat(earliest_utc_ts_var)
        if ts_format == TP.TS_FORMAT_FORMAT1:
            earliest_utc_ts_var = TP.timestamp_convert_format(
                earliest_utc_ts_var, TP.TS_FORMAT_FORMAT1, TP.TS_FORMAT_FLAT
            )

    except KeyError:
        earliest_utc_ts_var = _beginning_time_

    except TypeError:
        err_msg = "Please enter a valid start-timestamp."
        earliest_utc_ts_var = _beginning_time_

    return err_msg, earliest_utc_ts_var, latest_utc_ts_var
Example #2
0
def impression_list(request):
    
    err_msg = ''
    where_clause = ''
    
    """ 
        Process times and POST
        =============
    """
    duration_hrs = 2
    end_time, start_time = TP.timestamps_for_interval(datetime.datetime.utcnow(), 1, hours=-duration_hrs)    
    
    if 'earliest_utc_ts' in request.POST:
        if cmp(request.POST['earliest_utc_ts'], '') != 0:
            earliest_utc_ts = MySQLdb._mysql.escape_string(request.POST['earliest_utc_ts'].strip())
            format = TP.getTimestampFormat(earliest_utc_ts)
            
            if format == 1:
                start_time = earliest_utc_ts
            if format == 2:
                start_time = TP.timestamp_convert_format(earliest_utc_ts, 2, 1)
            elif format == -1:
                err_msg = err_msg + 'Start timestamp is formatted incorrectly\n'
    
    if 'latest_utc_ts' in request.POST:
        if cmp(request.POST['latest_utc_ts'], '') != 0:
            latest_utc_ts = MySQLdb._mysql.escape_string(request.POST['latest_utc_ts'].strip())
            format = TP.getTimestampFormat(latest_utc_ts)
            
            if format == 1:
                end_time = latest_utc_ts
            if format == 2:
                end_time = TP.timestamp_convert_format(latest_utc_ts, 2, 1)
            elif format == -1:
                err_msg = err_msg + 'End timestamp is formatted incorrectly\n'
            
    if 'iso_code' in request.POST:
        if cmp(request.POST['iso_code'], '') != 0:
            iso_code = MySQLdb._mysql.escape_string(request.POST['iso_code'].strip())
            where_clause = "where bi.country regexp '%s' " % iso_code
                    
    """ 
        Format and execute query 
        ========================
    """
        
    query_name = 'report_country_impressions.sql'    
    
    sql_stmnt = Hlp.file_to_string(projSet.__sql_home__ + query_name)
    sql_stmnt = sql_stmnt % (start_time, end_time, start_time, end_time, start_time, end_time, where_clause)
    
    dl = DL.DataLoader()
    results = dl.execute_SQL(sql_stmnt)
    column_names = dl.get_column_names()

    imp_table = DR.DataReporting()._write_html_table(results, column_names)
    
    return render_to_response('live_results/impression_list.html', {'imp_table' : imp_table.decode("utf-8"), 'err_msg' : err_msg, 'start' : TP.timestamp_convert_format(start_time, 1, 2), 'end' : TP.timestamp_convert_format(end_time, 1, 2)},  context_instance=RequestContext(request))
Example #3
0
def index(request):
    
    err_msg, earliest_utc_ts_var, latest_utc_ts_var = process_filter_data(request)
    
    sltl = DL.SquidLogTableLoader()
    
    """ Show the squid log table """
    squid_table = sltl.get_all_rows_unique_start_time()
    filtered_squid_table = list()
    
    for row in squid_table:
         
        log_start_time = sltl.get_squid_log_record_field(row, 'start_time')
        
        """ Ensure the timestamp is properly formatted """
        if TP.is_timestamp(log_start_time, 2):
            log_start_time = TP.timestamp_convert_format(log_start_time, 2, 1)
            
        if int(log_start_time) > int(earliest_utc_ts_var) and int(log_start_time) < int(latest_utc_ts_var):
            filtered_squid_table.append(row)
    
    squid_table = filtered_squid_table
    squid_table.reverse()
    column_names = sltl.get_column_names()
    new_column_names = list()
    
    for name in column_names:
        new_column_names.append(sltl.get_verbose_column(name))
        
    squid_table = DR.DataReporting()._write_html_table(squid_table, new_column_names)
    
    """ Show the latest log that has been or is loading and its progress """
    completion_rate = sltl.get_completion_rate_of_latest_log()
    
    return render_to_response('LML/index.html', {'err_msg' : err_msg, 'squid_table' : squid_table, 'completion_rate' : completion_rate},  context_instance=RequestContext(request))
Example #4
0
def process_post_vars(request):

    end_time, start_time = TP.timestamps_for_interval(
        datetime.datetime.utcnow(), 1, hours=-24)

    # POST: minimum donations for records
    try:
        min_donations_var = MySQLdb._mysql.escape_string(
            request.POST['min_donations'].strip())
        min_donations_var = int(min_donations_var)

    except:
        min_donations_var = 0

    # POST Start Timestamp for records
    try:

        earliest_utc_ts_var = MySQLdb._mysql.escape_string(
            request.POST['utc_ts'].strip())
        """ If the user timestamp is earlier than the default start time run the query for the earlier start time  """
        ts_format = TP.getTimestampFormat(earliest_utc_ts_var)
        """ Ensure the validity of the timestamp input """
        if ts_format == TP.TS_FORMAT_FORMAT1:
            start_time = TP.timestamp_convert_format(earliest_utc_ts_var,
                                                     TP.TS_FORMAT_FORMAT1,
                                                     TP.TS_FORMAT_FLAT)
        elif ts_format == TP.TS_FORMAT_FLAT:
            start_time = earliest_utc_ts_var

    except Exception:  # In the case the form was incorrectly formatted notify the user
        pass

    # POST: minimum donations for records
    try:
        view_order = MySQLdb._mysql.escape_string(
            request.POST['view_order'].strip())

        if cmp(view_order, 'campaign') == 0:
            view_order_str = 'order by utm_campaign, country, language, landing_page desc'
        elif cmp(view_order, 'country') == 0:
            view_order_str = 'order by country, language, utm_campaign, landing_page desc'

    except:
        view_order_str = 'order by utm_campaign, country, language, landing_page desc'

    return start_time, end_time, min_donations_var, view_order_str
Example #5
0
def process_post_vars(request):
    
    end_time, start_time = TP.timestamps_for_interval(datetime.datetime.utcnow(), 1, hours=-24)
    
    # POST: minimum donations for records
    try:
        min_donations_var = MySQLdb._mysql.escape_string(request.POST['min_donations'].strip())
        min_donations_var = int(min_donations_var)
        
    except:
        min_donations_var = 0
    
    # POST Start Timestamp for records
    try:
        
        earliest_utc_ts_var = MySQLdb._mysql.escape_string(request.POST['utc_ts'].strip())
        
        """ If the user timestamp is earlier than the default start time run the query for the earlier start time  """
        ts_format = TP.getTimestampFormat(earliest_utc_ts_var)
    
        """ Ensure the validity of the timestamp input """
        if ts_format == TP.TS_FORMAT_FORMAT1:
            start_time = TP.timestamp_convert_format(earliest_utc_ts_var, TP.TS_FORMAT_FORMAT1, TP.TS_FORMAT_FLAT)
        elif ts_format == TP.TS_FORMAT_FLAT:
            start_time = earliest_utc_ts_var
                
    except Exception: # In the case the form was incorrectly formatted notify the user        
        pass
    
    # POST: minimum donations for records
    try:
        view_order = MySQLdb._mysql.escape_string(request.POST['view_order'].strip())
        
        if cmp(view_order, 'campaign') == 0:
            view_order_str = 'order by utm_campaign, country, language, landing_page desc'
        elif cmp(view_order, 'country') == 0:
            view_order_str = 'order by country, language, utm_campaign, landing_page desc'
        
    except:
        view_order_str = 'order by utm_campaign, country, language, landing_page desc'
    
    return start_time, end_time, min_donations_var, view_order_str
Example #6
0
def index(request):

    err_msg, earliest_utc_ts_var, latest_utc_ts_var = process_filter_data(request)

    sltl = DL.SquidLogTableLoader()

    """ Show the squid log table """
    squid_table = sltl.get_all_rows_unique_start_time()
    filtered_squid_table = list()

    for row in squid_table:

        log_start_time = sltl.get_squid_log_record_field(row, "start_time")

        """ Ensure the timestamp is properly formatted """
        if TP.is_timestamp(log_start_time, 2):
            log_start_time = TP.timestamp_convert_format(log_start_time, 2, 1)

        if int(log_start_time) > int(earliest_utc_ts_var) and int(log_start_time) < int(latest_utc_ts_var):
            filtered_squid_table.append(row)

    squid_table = filtered_squid_table
    squid_table.reverse()
    column_names = sltl.get_column_names()
    new_column_names = list()

    for name in column_names:
        new_column_names.append(sltl.get_verbose_column(name))

    squid_table = DR.DataReporting()._write_html_table(squid_table, new_column_names)

    """ Show the latest log that has been or is loading and its progress """
    completion_rate = sltl.get_completion_rate_of_latest_log()

    return render_to_response(
        "LML/index.html",
        {"err_msg": err_msg, "squid_table": squid_table, "completion_rate": completion_rate},
        context_instance=RequestContext(request),
    )
Example #7
0
    def _clear_squid_records(self, start, request_type):

        """ Ensure that the range is correct; otherwise abort - critical that outside records are not deleted """
        timestamp = TP.timestamp_convert_format(start, 1, 2)

        try:
            if request_type == self._BANNER_REQUEST_:
                self._DL_impressions_.delete_row(timestamp)
            elif request_type == self._LP_REQUEST_:
                self._DL_LPrequests_.delete_row(timestamp)

            logging.info("Executed delete for start time " + timestamp)

        except Exception as inst:

            logging.error("Could not execute delete for start time " + timestamp)

            logging.error(type(inst))  # the exception instance
            logging.error(inst.args)  # arguments stored in .args
            logging.error(inst)  # __str__ allows args to printed directly

            """ Die if the records cannot be removed """
            sys.exit()
Example #8
0
def index(request, **kwargs):
    """ 
        PROCESS POST DATA
        ================= 
    """

    if 'err_msg' in kwargs:
        err_msg = kwargs['err_msg']
    else:
        err_msg = ''

    try:

        latest_utc_ts_var = MySQLdb._mysql.escape_string(
            request.POST['latest_utc_ts'].strip())
        earliest_utc_ts_var = MySQLdb._mysql.escape_string(
            request.POST['earliest_utc_ts'].strip())

        if not TP.is_timestamp(earliest_utc_ts_var, 1) or not TP.is_timestamp(
                earliest_utc_ts_var, 1):
            raise TypeError

        if latest_utc_ts_var == '':
            latest_utc_ts_var = _end_time_

    except KeyError:

        earliest_utc_ts_var = _beginning_time_
        latest_utc_ts_var = _end_time_

    except TypeError:

        err_msg = 'Please enter a valid timestamp.'

        earliest_utc_ts_var = _beginning_time_
        latest_utc_ts_var = _end_time_

    ttl = DL.TestTableLoader()
    columns = ttl.get_column_names()
    test_rows = ttl.get_all_test_rows()
    """ Build a list of tests -- apply filters """
    l = []

    utm_campaign_index = ttl.get_test_index('utm_campaign')
    html_report_index = ttl.get_test_index('html_report')

    for i in test_rows:
        test_start_time = ttl.get_test_field(i, 'start_time')
        new_row = list(i)
        """ Ensure the timestamp is properly formatted """
        if TP.is_timestamp(test_start_time, 2):
            test_start_time = TP.timestamp_convert_format(
                test_start_time, 2, 1)

        new_row[
            html_report_index] = '<a href="/tests/report/%s">view</a>' % new_row[
                utm_campaign_index]

        if int(test_start_time) > int(earliest_utc_ts_var) and int(
                test_start_time) < int(latest_utc_ts_var):
            l.append(new_row)

    l.reverse()

    test_table = DR.DataReporting()._write_html_table(
        l, columns, use_standard_metric_names=True)

    return render_to_response('tests/index.html', {
        'err_msg': err_msg,
        'test_table': test_table
    },
                              context_instance=RequestContext(request))
Example #9
0
def impression_list(request):

    err_msg = ''
    where_clause = ''
    """ 
        Process times and POST
        =============
    """
    duration_hrs = 2
    end_time, start_time = TP.timestamps_for_interval(
        datetime.datetime.utcnow(), 1, hours=-duration_hrs)

    if 'earliest_utc_ts' in request.POST:
        if cmp(request.POST['earliest_utc_ts'], '') != 0:
            earliest_utc_ts = MySQLdb._mysql.escape_string(
                request.POST['earliest_utc_ts'].strip())
            format = TP.getTimestampFormat(earliest_utc_ts)

            if format == 1:
                start_time = earliest_utc_ts
            if format == 2:
                start_time = TP.timestamp_convert_format(earliest_utc_ts, 2, 1)
            elif format == -1:
                err_msg = err_msg + 'Start timestamp is formatted incorrectly\n'

    if 'latest_utc_ts' in request.POST:
        if cmp(request.POST['latest_utc_ts'], '') != 0:
            latest_utc_ts = MySQLdb._mysql.escape_string(
                request.POST['latest_utc_ts'].strip())
            format = TP.getTimestampFormat(latest_utc_ts)

            if format == 1:
                end_time = latest_utc_ts
            if format == 2:
                end_time = TP.timestamp_convert_format(latest_utc_ts, 2, 1)
            elif format == -1:
                err_msg = err_msg + 'End timestamp is formatted incorrectly\n'

    if 'iso_code' in request.POST:
        if cmp(request.POST['iso_code'], '') != 0:
            iso_code = MySQLdb._mysql.escape_string(
                request.POST['iso_code'].strip())
            where_clause = "where bi.country regexp '%s' " % iso_code
    """ 
        Format and execute query 
        ========================
    """

    query_name = 'report_country_impressions.sql'

    sql_stmnt = Hlp.file_to_string(projSet.__sql_home__ + query_name)
    sql_stmnt = sql_stmnt % (start_time, end_time, start_time, end_time,
                             start_time, end_time, where_clause)

    dl = DL.DataLoader()
    results = dl.execute_SQL(sql_stmnt)
    column_names = dl.get_column_names()

    imp_table = DR.DataReporting()._write_html_table(results, column_names)

    return render_to_response(
        'live_results/impression_list.html', {
            'imp_table': imp_table.decode("utf-8"),
            'err_msg': err_msg,
            'start': TP.timestamp_convert_format(start_time, 1, 2),
            'end': TP.timestamp_convert_format(end_time, 1, 2)
        },
        context_instance=RequestContext(request))
Example #10
0
def daily_totals(request):

    err_msg = ''

    start_day_ts = TP.timestamp_from_obj(
        datetime.datetime.utcnow() + datetime.timedelta(days=-1), 1, 0)
    end_day_ts = TP.timestamp_from_obj(datetime.datetime.utcnow(), 1, 0)
    country = '.{2}'
    min_donation = 0
    order_str = 'order by 1 desc,3 desc'
    """
        PROCESS POST
    """

    if 'start_day_ts' in request.POST:
        if cmp(request.POST['start_day_ts'], '') != 0:
            start_day_ts = MySQLdb._mysql.escape_string(
                request.POST['start_day_ts'].strip())
            format = TP.getTimestampFormat(start_day_ts)

            if format == 2:
                start_day_ts = TP.timestamp_convert_format(start_day_ts, 2, 1)
                # start_day_ts = start_day_ts[:8] + '000000'
            elif format == -1:
                err_msg = err_msg + 'Start timestamp is formatted incorrectly\n'

    if 'end_day_ts' in request.POST:
        if cmp(request.POST['end_day_ts'], '') != 0:
            end_day_ts = MySQLdb._mysql.escape_string(
                request.POST['end_day_ts'].strip())
            format = TP.getTimestampFormat(start_day_ts)

            if format == 2:
                end_day_ts = TP.timestamp_convert_format(end_day_ts, 2, 1)
                # end_day_ts = end_day_ts[:8] + '000000'
            elif format == -1:
                err_msg = err_msg + 'End timestamp is formatted incorrectly\n'

    if 'country' in request.POST:
        if cmp(request.POST['country'], '') != 0:
            country = MySQLdb._mysql.escape_string(request.POST['country'])

    if 'min_donation' in request.POST:
        if cmp(request.POST['min_donation'], '') != 0:
            try:
                min_donation = int(
                    MySQLdb._mysql.escape_string(
                        request.POST['min_donation'].strip()))
            except:
                logging.error(
                    'live_results/daily_totals -- Could not process minimum donation for "%s" '
                    % request.POST['min_donation'].strip())
                min_donation = 0

    if 'order_metric' in request.POST:
        if cmp(request.POST['order_metric'], 'Date') == 0:
            order_str = 'order by 1 desc,3 desc'
        elif cmp(request.POST['order_metric'], 'Country') == 0:
            order_str = 'order by 2 asc,1 desc'
    """
        === END POST ===
    """

    query_name = 'report_daily_totals_by_country'
    filename = projSet.__sql_home__ + query_name + '.sql'
    sql_stmnt = Hlp.file_to_string(filename)
    sql_stmnt = QD.format_query(query_name,
                                sql_stmnt, [start_day_ts, end_day_ts],
                                country=country,
                                min_donation=min_donation,
                                order_str=order_str)

    dl = DL.DataLoader()
    results = dl.execute_SQL(sql_stmnt)
    html_table = DR.DataReporting()._write_html_table(
        results, dl.get_column_names(), use_standard_metric_names=True)

    return render_to_response('live_results/daily_totals.html', \
                              {'html_table' : html_table, 'start_time' : TP.timestamp_convert_format(start_day_ts, 1, 2), 'end_time' : TP.timestamp_convert_format(end_day_ts, 1, 2)}, \
                              context_instance=RequestContext(request))
Example #11
0
 def execute_process(self, key, **kwargs):
     
     logging.info('Commencing caching of long term trends data at:  %s' % self.CACHING_HOME)
     
     end_time, start_time = TP.timestamps_for_interval(datetime.datetime.utcnow(), 1, \
                                                       hours=-self.VIEW_DURATION_HRS, resolution=1)
     
     """ DATA CONFIG """
     
     countries = DL.CiviCRMLoader().get_ranked_donor_countries(start_time)
     countries = countries[1:6]
     
     """ set the metrics to plot """
     lttdl = DL.LongTermTrendsLoader(db='storage3')
             
     """ Dictionary object storing lists of regexes - each expression must pass for a label to persist """
     # country_groups = {'US': ['(US)'], 'CA': ['(CA)'], 'JP': ['(JP)'], 'IN': ['(IN)'], 'NL': ['(NL)']}
     payment_groups = {'Credit Card' : ['^cc$'], 'Paypal': ['^pp$']}
     currency_groups = {'USD' : ['(USD)'], 'CAD': ['(CAD)'], 'JPY': ['(JPY)'], 'EUR': ['(EUR)']}
     lang_cntry_groups = {'US': ['US..', '.{4}'], 'EN' : ['[^U^S]en', '.{4}']}
     
     top_cntry_groups = dict()
     for country in countries:
         top_cntry_groups[country] = [country, '.{2}']
     
     # To include click rate
     # groups = [ lang_cntry_groups] metrics = ['click_rate'] metrics_index = [3]
     # group_metrics = [DL.LongTermTrendsLoader._MT_RATE_] metric_types = ['country', 'language'] include_totals = [True] include_others = [True]
     
     metrics = ['impressions', 'views', 'donations', 'donations', 'amount', 'amount', 'diff_don', 'diff_don', 'donations', 'conversion_rate']
     weights = ['', '', '', '', '', '', 'donations', 'donations', '', '']
     metrics_index = [0, 1, 2, 2, 2, 4, 5, 5, 6, 6]
     groups = [lang_cntry_groups, lang_cntry_groups, lang_cntry_groups, top_cntry_groups, lang_cntry_groups, currency_groups, \
               lang_cntry_groups, lang_cntry_groups, payment_groups, payment_groups]
     
     """  The metrics that are used to build a group string to be qualified via regex - the values of the list metrics are concatenated """ 
     group_metrics = [['country', 'language'], ['country', 'language'], ['country', 'language'], \
                      ['country', 'language'], ['country', 'language'], ['currency'], ['country', 'language'], \
                      ['country', 'language'], ['payment_method'], ['payment_method']]
     
     metric_types = [DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, \
                     DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, \
                     DL.LongTermTrendsLoader._MT_RATE_WEIGHTED_, DL.LongTermTrendsLoader._MT_RATE_WEIGHTED_, DL.LongTermTrendsLoader._MT_AMOUNT_, \
                     DL.LongTermTrendsLoader._MT_RATE_]
     
     include_totals = [True, True, True, False, True, True, False, False, False, True]
     include_others = [True, True, True, False, True, True, True, True, True, False]
     hours_back = [0, 0, 0, 0, 0, 0, 24, 168, 0, 0]
     time_unit = [TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR]
     
     data = list()
     
     """ END CONFIG """
     
     
     """ For each metric use the LongTermTrendsLoader to generate the data to plot """
     for index in range(len(metrics)):
         
         dr = DR.DataReporting()
         
         times, counts = lttdl.run_query(start_time, end_time, metrics_index[index], metric_name=metrics[index], metric_type=metric_types[index], \
                                         groups=groups[index], group_metric=group_metrics[index], include_other=include_others[index], \
                                         include_total=include_totals[index], hours_back=hours_back[index], weight_name=weights[index], \
                                         time_unit=time_unit[index])
         
         times = TP.normalize_timestamps(times, False, time_unit[index])
         
         dr._counts_ = counts
         dr._times_ = times
   
         empty_data = [0] * len(times[times.keys()[0]])
         data.append(dr.get_data_lists([''], empty_data))
         
     dict_param = Hlp.combine_data_lists(data)
     dict_param['interval'] = self.VIEW_DURATION_HRS    
     dict_param['end_time'] = TP.timestamp_convert_format(end_time,1,2)
     
     self.clear_cached_data(key)
     self.cache_data(dict_param, key)
     
     logging.info('Caching complete.')
Example #12
0
    def execute_process(self, key, **kwargs):
        
        logging.info('Commencing caching of live results data at:  %s' % self.CACHING_HOME)
        shelve_key = key
        
        """ Find the earliest and latest page views for a given campaign  """
        lptl = DL.LandingPageTableLoader(db='db1025')
            
        query_name = 'report_summary_results_country.sql'
        query_name_1S = 'report_summary_results_country_1S.sql'                    
        campaign_regexp_filter = '^C_|^C11_'
                
        dl = DL.DataLoader(db='db1025')
        end_time, start_time = TP.timestamps_for_interval(datetime.datetime.utcnow(), 1, hours=-self.DURATION_HRS)
        
        """ Should a one-step query be used? """        
        use_one_step = lptl.is_one_step(start_time, end_time, 'C11')  # Assume it is a one step test if there are no impressions for this campaign in the landing page table
        
        """ 
            Retrieve the latest time for which impressions have been loaded
            ===============================================================
        """
        
        sql_stmnt = 'select max(end_time) as latest_ts from squid_log_record where log_completion_pct = 100.00'
        
        results = dl.execute_SQL(sql_stmnt)
        latest_timestamp = results[0][0]
        latest_timestamp = TP.timestamp_from_obj(latest_timestamp, 2, 3)
        latest_timestamp_flat = TP.timestamp_convert_format(latest_timestamp, 2, 1)
    
        ret = DR.ConfidenceReporting(query_type='', hyp_test='', db='db1025').get_confidence_on_time_range(start_time, end_time, campaign_regexp_filter, one_step=use_one_step)
        measured_metrics_counts = ret[1]
        
        """ Prepare Summary results """
        
        sql_stmnt = Hlp.file_to_string(projSet.__sql_home__ + query_name)
        sql_stmnt = sql_stmnt % (start_time, latest_timestamp_flat, start_time, latest_timestamp_flat, campaign_regexp_filter, start_time, latest_timestamp_flat, \
                                 start_time, end_time, campaign_regexp_filter, start_time, end_time, campaign_regexp_filter, start_time, end_time, campaign_regexp_filter, \
                                 start_time, latest_timestamp_flat, campaign_regexp_filter, start_time, latest_timestamp_flat, campaign_regexp_filter)        
        
        logging.info('Executing report_summary_results ...')
        
        results = dl.execute_SQL(sql_stmnt)
        column_names = dl.get_column_names()
        
        if use_one_step:
            
            logging.info('... including one step artifacts ...')
            
            sql_stmnt_1S = Hlp.file_to_string(projSet.__sql_home__ + query_name_1S)
            sql_stmnt_1S = sql_stmnt_1S % (start_time, latest_timestamp_flat, start_time, latest_timestamp_flat, campaign_regexp_filter, start_time, latest_timestamp_flat, \
                                     start_time, end_time, campaign_regexp_filter, start_time, end_time, campaign_regexp_filter, start_time, end_time, campaign_regexp_filter, \
                                     start_time, latest_timestamp_flat, campaign_regexp_filter, start_time, latest_timestamp_flat, campaign_regexp_filter)
            
            results = list(results)        
            results_1S = dl.execute_SQL(sql_stmnt_1S)
            
            """ Ensure that the results are unique """
            one_step_keys = list()
            for row in results_1S:
                one_step_keys.append(str(row[0]) + str(row[1]) + str(row[2]))
            
            new_results = list()
            for row in results:
                key = str(row[0]) + str(row[1]) + str(row[2])
                if not(key in one_step_keys):
                    new_results.append(row)
            results = new_results
                
            results.extend(list(results_1S))
            
        metric_legend_table = DR.DataReporting().get_standard_metrics_legend()
        conf_legend_table = DR.ConfidenceReporting(query_type='bannerlp', hyp_test='TTest').get_confidence_legend_table()

        """ Create a interval loader objects """
        
        sampling_interval = 5 # 5 minute sampling interval for donation plots
        
        ir_cmpgn = DR.IntervalReporting(query_type=FDH._QTYPE_CAMPAIGN_ + FDH._QTYPE_TIME_, generate_plot=False, db='db1025')
        ir_banner = DR.IntervalReporting(query_type=FDH._QTYPE_BANNER_ + FDH._QTYPE_TIME_, generate_plot=False, db='db1025')
        ir_lp = DR.IntervalReporting(query_type=FDH._QTYPE_LP_ + FDH._QTYPE_TIME_, generate_plot=False, db='db1025')
            
        """ Execute queries """        
        ir_cmpgn.run(start_time, end_time, sampling_interval, 'donations', '',{})
        ir_banner.run(start_time, end_time, sampling_interval, 'donations', '',{})
        ir_lp.run(start_time, end_time, sampling_interval, 'donations', '',{})
        
        
        """ Prepare serialized objects """
        
        dict_param = dict()

        dict_param['metric_legend_table'] = metric_legend_table
        dict_param['conf_legend_table'] = conf_legend_table
        
        dict_param['measured_metrics_counts'] = measured_metrics_counts
        dict_param['results'] = results
        dict_param['column_names'] = column_names

        dict_param['interval'] = sampling_interval
        dict_param['duration'] = self.DURATION_HRS    
        
        dict_param['start_time'] = TP.timestamp_convert_format(start_time,1,2)
        dict_param['end_time'] = TP.timestamp_convert_format(end_time,1,2)
        
        dict_param['ir_cmpgn_counts'] = ir_cmpgn._counts_
        dict_param['ir_banner_counts'] = ir_banner._counts_
        dict_param['ir_lp_counts'] = ir_lp._counts_
        
        dict_param['ir_cmpgn_times'] = ir_cmpgn._times_
        dict_param['ir_banner_times'] = ir_banner._times_
        dict_param['ir_lp_times'] = ir_lp._times_
        
        self.clear_cached_data(shelve_key)
        self.cache_data(dict_param, shelve_key)
        
        logging.info('Caching complete.')
        
Example #13
0
def index(request, **kwargs):
    
    """ 
        PROCESS POST DATA
        ================= 
    """
    
    if 'err_msg' in kwargs:
        err_msg = kwargs['err_msg']
    else:        
        err_msg = ''

    try:
        
        latest_utc_ts_var = MySQLdb._mysql.escape_string(request.POST['latest_utc_ts'].strip())
        earliest_utc_ts_var = MySQLdb._mysql.escape_string(request.POST['earliest_utc_ts'].strip())
        
        if not TP.is_timestamp(earliest_utc_ts_var, 1) or  not TP.is_timestamp(earliest_utc_ts_var, 1):
            raise TypeError      
            
        if latest_utc_ts_var == '':
            latest_utc_ts_var = _end_time_
            
    except KeyError:
        
        earliest_utc_ts_var = _beginning_time_
        latest_utc_ts_var = _end_time_
    
    except TypeError:
        
        err_msg = 'Please enter a valid timestamp.'
        
        earliest_utc_ts_var = _beginning_time_
        latest_utc_ts_var = _end_time_
            
    ttl = DL.TestTableLoader()
    columns = ttl.get_column_names()
    test_rows = ttl.get_all_test_rows()
    
    """ Build a list of tests -- apply filters """
    l = []
    
    utm_campaign_index = ttl.get_test_index('utm_campaign')
    html_report_index = ttl.get_test_index('html_report')
    
    for i in test_rows:
        test_start_time = ttl.get_test_field(i, 'start_time')
        new_row = list(i)
                
        """ Ensure the timestamp is properly formatted """
        if TP.is_timestamp(test_start_time, 2):
            test_start_time = TP.timestamp_convert_format(test_start_time, 2, 1)
        
        new_row[html_report_index] = '<a href="/tests/report/%s">view</a>' % new_row[utm_campaign_index]
        
        if int(test_start_time) > int(earliest_utc_ts_var) and int(test_start_time) < int(latest_utc_ts_var):
            l.append(new_row)
        
    l.reverse()
    
    test_table = DR.DataReporting()._write_html_table(l, columns, use_standard_metric_names=True)
    
    return render_to_response('tests/index.html', {'err_msg' : err_msg, 'test_table' : test_table},  context_instance=RequestContext(request))
Example #14
0
def generate_summary(request):
    
    try:
        
        err_msg = ''
        
        """ 
            PROCESS POST DATA
            ================= 
            
            Escape all user input that can be entered in text fields 
            
        """
        if 'utm_campaign' in request.POST:
            utm_campaign = MySQLdb._mysql.escape_string(request.POST['utm_campaign'])
        
        if 'start_time' in request.POST:
            start_time = MySQLdb._mysql.escape_string(request.POST['start_time'].strip())
            
            if not(TP.is_timestamp(start_time, 1)) and not(TP.is_timestamp(start_time, 2)):            
                err_msg = 'Incorrectly formatted start timestamp.'
                raise Exception()
            
        if 'end_time' in request.POST:
            end_time = MySQLdb._mysql.escape_string(request.POST['end_time'].strip())
            
            if not(TP.is_timestamp(end_time, 1)) and not(TP.is_timestamp(end_time, 2)):            
                err_msg = 'Incorrectly formatted end timestamp.'
                raise Exception()
        
        if 'iso_filter' in request.POST:
            country = MySQLdb._mysql.escape_string(request.POST['iso_filter'])
        else:
            country = '.{2}'
            
        if 'measure_confidence' in request.POST:
            if cmp(request.POST['measure_confidence'], 'yes') == 0:
                measure_confidence = True 
            else:
                measure_confidence = False
        else:
            measure_confidence = False
        
        if 'one_step' in request.POST:
            if cmp(request.POST['one_step'], 'yes') == 0:
                use_one_step = True 
            else:
                use_one_step = False
        else:
            use_one_step = False
            
        if 'donations_only' in request.POST:
            if cmp(request.POST['donations_only'], 'yes') == 0:
                donations_only = True 
            else:
                donations_only = False
        else:
            donations_only = False

            
        """ Convert timestamp format if necessary """
        
        if TP.is_timestamp(start_time, 2):
            start_time = TP.timestamp_convert_format(start_time, 2, 1)
        if TP.is_timestamp(end_time, 2):
            end_time = TP.timestamp_convert_format(end_time, 2, 1)
        
    
        """ =============================================== """
        
        """ 
            GENERATE A REPORT SUMMARY TABLE
            ===============================
        """
        
        if donations_only:
            srl = DL.SummaryReportingLoader(query_type=FDH._TESTTYPE_DONATIONS_)
        else:
            srl = DL.SummaryReportingLoader(query_type=FDH._TESTTYPE_BANNER_LP_)
            
        srl.run_query(start_time, end_time, utm_campaign, min_views=-1, country=country)            
        
        column_names = srl.get_column_names()
        summary_results = srl.get_results()
         
        if not(summary_results):
            html_table = '<h3>No artifact summary data available for %s.</h3>' % utm_campaign
            
        else:
            summary_results_list = list()
            for row in summary_results:
                summary_results_list.append(list(row))
            summary_results = summary_results_list
            
            """ 
                Format results to encode html table cell markup in results        
            """
            if measure_confidence:
                
                ret = DR.ConfidenceReporting(query_type='', hyp_test='').get_confidence_on_time_range(start_time, end_time, utm_campaign, one_step=use_one_step, country=country) # first get color codes on confidence
                conf_colour_code = ret[0]
                
                for row_index in range(len(summary_results)):
                    
                    artifact_index = summary_results[row_index][0] + '-' + summary_results[row_index][1] + '-' + summary_results[row_index][2]
                    
                    for col_index in range(len(column_names)):
                        
                        is_coloured_cell = False
                        if column_names[col_index] in conf_colour_code.keys():
                            if artifact_index in conf_colour_code[column_names[col_index]].keys():
                                summary_results[row_index][col_index] = '<td style="background-color:' + conf_colour_code[column_names[col_index]][artifact_index] + ';">' + str(summary_results[row_index][col_index]) + '</td>'
                                is_coloured_cell = True
                                
                        if not(is_coloured_cell):
                            summary_results[row_index][col_index] = '<td>' + str(summary_results[row_index][col_index]) + '</td>'
            
                html_table = DR.DataReporting()._write_html_table(summary_results, column_names, use_standard_metric_names=True, omit_cell_markup=True)
                
            else:
                
                html_table = DR.DataReporting()._write_html_table(summary_results, column_names, use_standard_metric_names=True)    
        
        """ Generate totals only if it's a non-donation-only query """
        
        if donations_only:
            srl = DL.SummaryReportingLoader(query_type=FDH._QTYPE_TOTAL_DONATIONS_)
        else:
            srl = DL.SummaryReportingLoader(query_type=FDH._QTYPE_TOTAL_)
            
        srl.run_query(start_time, end_time, utm_campaign, min_views=-1, country=country)
        
        total_summary_results = srl.get_results()
        
        if not(total_summary_results):
            html_table = html_table + '<div class="spacer"></div><div class="spacer"></div><h3>No data available for %s Totals.</h3>' % utm_campaign
        
        else: 
            html_table = html_table + '<div class="spacer"></div><div class="spacer"></div>' + DR.DataReporting()._write_html_table(total_summary_results, srl.get_column_names(), use_standard_metric_names=True)
        
        metric_legend_table = DR.DataReporting().get_standard_metrics_legend()
        conf_legend_table = DR.ConfidenceReporting(query_type='bannerlp', hyp_test='TTest').get_confidence_legend_table()
        
        html_table = '<h4><u>Metrics Legend:</u></h4><div class="spacer"></div>' + metric_legend_table + \
        '<div class="spacer"></div><h4><u>Confidence Legend for Hypothesis Testing:</u></h4><div class="spacer"></div>' + conf_legend_table + '<div class="spacer"></div><div class="spacer"></div>' + html_table
        
        """ 
            DETERMINE PAYMENT METHODS 
            =========================
        """
        
        ccl = DL.CiviCRMLoader()
        pm_data_counts, pm_data_conversions  = ccl.get_payment_methods(utm_campaign, start_time, end_time, country=country)

        html_table_pm_counts = DR.IntervalReporting().write_html_table_from_rowlists(pm_data_counts, ['Payment Method', 'Portion of Donations (%)'], 'Landing Page')
        html_table_pm_conversions = DR.IntervalReporting().write_html_table_from_rowlists(pm_data_conversions, ['Payment Method', 'Visits', 'Conversions', 'Conversion Rate (%)', 'Amount', 'Amount 25'], 'Landing Page')
        
        html_table = html_table + '<div class="spacer"></div><h4><u>Payment Methods Breakdown:</u></h4><div class="spacer"></div>' + html_table_pm_counts + \
        '<div class="spacer"></div><div class="spacer"></div>' + html_table_pm_conversions + '<div class="spacer"></div><div class="spacer"></div>'
        
        return render_to_response('tests/table_summary.html', {'html_table' : html_table, 'utm_campaign' : utm_campaign}, context_instance=RequestContext(request))

    except Exception as inst:
        
        if cmp(err_msg, '') == 0:
            err_msg = 'Could not generate campaign tabular results.'
        
        return index(request, err_msg=err_msg)
Example #15
0
def daily_totals(request):    

    err_msg = ''
    
    start_day_ts = TP.timestamp_from_obj(datetime.datetime.utcnow() + datetime.timedelta(days=-1), 1, 0)
    end_day_ts = TP.timestamp_from_obj(datetime.datetime.utcnow(), 1, 0)
    country = '.{2}'
    min_donation = 0
    order_str = 'order by 1 desc,3 desc'
    
    """
        PROCESS POST
    """
    
    if 'start_day_ts' in request.POST:
        if cmp(request.POST['start_day_ts'], '') != 0:
            start_day_ts = MySQLdb._mysql.escape_string(request.POST['start_day_ts'].strip())
            format = TP.getTimestampFormat(start_day_ts)
            
            if format == 2:
                start_day_ts = TP.timestamp_convert_format(start_day_ts, 2, 1)
                # start_day_ts = start_day_ts[:8] + '000000'
            elif format == -1:
                err_msg = err_msg + 'Start timestamp is formatted incorrectly\n'

    if 'end_day_ts' in request.POST:
        if cmp(request.POST['end_day_ts'], '') != 0:
            end_day_ts = MySQLdb._mysql.escape_string(request.POST['end_day_ts'].strip())
            format = TP.getTimestampFormat(start_day_ts)
            
            if format == 2:
                end_day_ts = TP.timestamp_convert_format(end_day_ts, 2, 1)
                # end_day_ts = end_day_ts[:8] + '000000'
            elif format == -1:
                err_msg = err_msg + 'End timestamp is formatted incorrectly\n'
            
    if 'country' in request.POST:
        if cmp(request.POST['country'], '') != 0:
            country = MySQLdb._mysql.escape_string(request.POST['country'])

    if 'min_donation' in request.POST:
        if cmp(request.POST['min_donation'], '') != 0:
            try:                
                min_donation = int(MySQLdb._mysql.escape_string(request.POST['min_donation'].strip()))
            except:
                logging.error('live_results/daily_totals -- Could not process minimum donation for "%s" ' % request.POST['min_donation'].strip())
                min_donation = 0
    
    if 'order_metric' in request.POST:
        if cmp(request.POST['order_metric'], 'Date') == 0:
            order_str = 'order by 1 desc,3 desc'
        elif cmp(request.POST['order_metric'], 'Country') == 0:
            order_str = 'order by 2 asc,1 desc'
            
    """
        === END POST ===
    """
    
    query_name = 'report_daily_totals_by_country'
    filename = projSet.__sql_home__+ query_name + '.sql'
    sql_stmnt = Hlp.file_to_string(filename)
    sql_stmnt = QD.format_query(query_name, sql_stmnt, [start_day_ts, end_day_ts], country=country, min_donation=min_donation, order_str=order_str)
    
    dl = DL.DataLoader()    
    results = dl.execute_SQL(sql_stmnt)
    html_table = DR.DataReporting()._write_html_table(results, dl.get_column_names(), use_standard_metric_names=True)
    
    return render_to_response('live_results/daily_totals.html', \
                              {'html_table' : html_table, 'start_time' : TP.timestamp_convert_format(start_day_ts, 1, 2), 'end_time' : TP.timestamp_convert_format(end_day_ts, 1, 2)}, \
                              context_instance=RequestContext(request))
Example #16
0
    def execute_process(self, key, **kwargs):

        logging.info('Commencing caching of long term trends data at:  %s' %
                     self.CACHING_HOME)

        end_time, start_time = TP.timestamps_for_interval(datetime.datetime.utcnow(), 1, \
                                                          hours=-self.VIEW_DURATION_HRS, resolution=1)
        """ DATA CONFIG """

        countries = DL.CiviCRMLoader().get_ranked_donor_countries(start_time)
        countries = countries[1:6]
        """ set the metrics to plot """
        lttdl = DL.LongTermTrendsLoader(db='storage3')
        """ Dictionary object storing lists of regexes - each expression must pass for a label to persist """
        # country_groups = {'US': ['(US)'], 'CA': ['(CA)'], 'JP': ['(JP)'], 'IN': ['(IN)'], 'NL': ['(NL)']}
        payment_groups = {'Credit Card': ['^cc$'], 'Paypal': ['^pp$']}
        currency_groups = {
            'USD': ['(USD)'],
            'CAD': ['(CAD)'],
            'JPY': ['(JPY)'],
            'EUR': ['(EUR)']
        }
        lang_cntry_groups = {
            'US': ['US..', '.{4}'],
            'EN': ['[^U^S]en', '.{4}']
        }

        top_cntry_groups = dict()
        for country in countries:
            top_cntry_groups[country] = [country, '.{2}']

        # To include click rate
        # groups = [ lang_cntry_groups] metrics = ['click_rate'] metrics_index = [3]
        # group_metrics = [DL.LongTermTrendsLoader._MT_RATE_] metric_types = ['country', 'language'] include_totals = [True] include_others = [True]

        metrics = [
            'impressions', 'views', 'donations', 'donations', 'amount',
            'amount', 'diff_don', 'diff_don', 'donations', 'conversion_rate'
        ]
        weights = ['', '', '', '', '', '', 'donations', 'donations', '', '']
        metrics_index = [0, 1, 2, 2, 2, 4, 5, 5, 6, 6]
        groups = [lang_cntry_groups, lang_cntry_groups, lang_cntry_groups, top_cntry_groups, lang_cntry_groups, currency_groups, \
                  lang_cntry_groups, lang_cntry_groups, payment_groups, payment_groups]
        """  The metrics that are used to build a group string to be qualified via regex - the values of the list metrics are concatenated """
        group_metrics = [['country', 'language'], ['country', 'language'], ['country', 'language'], \
                         ['country', 'language'], ['country', 'language'], ['currency'], ['country', 'language'], \
                         ['country', 'language'], ['payment_method'], ['payment_method']]

        metric_types = [DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, \
                        DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, DL.LongTermTrendsLoader._MT_AMOUNT_, \
                        DL.LongTermTrendsLoader._MT_RATE_WEIGHTED_, DL.LongTermTrendsLoader._MT_RATE_WEIGHTED_, DL.LongTermTrendsLoader._MT_AMOUNT_, \
                        DL.LongTermTrendsLoader._MT_RATE_]

        include_totals = [
            True, True, True, False, True, True, False, False, False, True
        ]
        include_others = [
            True, True, True, False, True, True, True, True, True, False
        ]
        hours_back = [0, 0, 0, 0, 0, 0, 24, 168, 0, 0]
        time_unit = [
            TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR, TP.HOUR,
            TP.HOUR, TP.HOUR, TP.HOUR
        ]

        data = list()
        """ END CONFIG """
        """ For each metric use the LongTermTrendsLoader to generate the data to plot """
        for index in range(len(metrics)):

            dr = DR.DataReporting()

            times, counts = lttdl.run_query(start_time, end_time, metrics_index[index], metric_name=metrics[index], metric_type=metric_types[index], \
                                            groups=groups[index], group_metric=group_metrics[index], include_other=include_others[index], \
                                            include_total=include_totals[index], hours_back=hours_back[index], weight_name=weights[index], \
                                            time_unit=time_unit[index])

            times = TP.normalize_timestamps(times, False, time_unit[index])

            dr._counts_ = counts
            dr._times_ = times

            empty_data = [0] * len(times[times.keys()[0]])
            data.append(dr.get_data_lists([''], empty_data))

        dict_param = Hlp.combine_data_lists(data)
        dict_param['interval'] = self.VIEW_DURATION_HRS
        dict_param['end_time'] = TP.timestamp_convert_format(end_time, 1, 2)

        self.clear_cached_data(key)
        self.cache_data(dict_param, key)

        logging.info('Caching complete.')
Example #17
0
def index(request, **kwargs):

    crl = DL.CampaignReportingLoader(query_type='totals')
    filter_data = True
    """ Determine the start and end times for the query """
    start_time_obj = datetime.datetime.utcnow() + datetime.timedelta(days=-1)
    end_time = TP.timestamp_from_obj(datetime.datetime.utcnow(), 1, 3)
    start_time = TP.timestamp_from_obj(start_time_obj, 1, 3)
    """ 
        PROCESS POST KWARGS 
        ===================
    """

    err_msg = ''
    try:
        err_msg = str(kwargs['kwargs']['err_msg'])
    except:
        pass
    """ 
        PROCESS POST VARS 
        =================                
    """
    """ Process error message """
    try:
        err_msg = MySQLdb._mysql.escape_string(request.POST['err_msg'])
    except KeyError:
        pass
    """ If the filter form was submitted extract the POST vars  """
    try:
        min_donations_var = MySQLdb._mysql.escape_string(
            request.POST['min_donations'].strip())
        earliest_utc_ts_var = MySQLdb._mysql.escape_string(
            request.POST['utc_ts'].strip())
        """ If the user timestamp is earlier than the default start time run the query for the earlier start time  """
        ts_format = TP.getTimestampFormat(earliest_utc_ts_var)
        """ Ensure the validity of the timestamp input """
        if ts_format == TP.TS_FORMAT_FORMAT1:
            start_time = TP.timestamp_convert_format(earliest_utc_ts_var,
                                                     TP.TS_FORMAT_FORMAT1,
                                                     TP.TS_FORMAT_FLAT)
        elif ts_format == TP.TS_FORMAT_FLAT:
            start_time = earliest_utc_ts_var
        elif cmp(earliest_utc_ts_var, '') == 0:
            start_time = TP.timestamp_from_obj(start_time_obj, 1, 3)
        else:
            raise Exception()

        if cmp(min_donations_var, '') == 0:
            min_donations_var = -1
        else:
            min_donations_var = int(min_donations_var)

    except KeyError:  # In the case the form was not submitted set minimum donations and retain the default start time

        min_donations_var = -1
        pass

    except Exception:  # In the case the form was incorrectly formatted notify the user

        min_donations_var = -1
        start_time = TP.timestamp_from_obj(start_time_obj, 1, 3)
        err_msg = 'Filter fields are incorrect.'
    """ 
        GENERATE CAMPAIGN DATA 
        ======================
        
    """
    campaigns, all_data = crl.run_query({
        'metric_name': 'earliest_timestamp',
        'start_time': start_time,
        'end_time': end_time
    })
    """ Sort campaigns by earliest access """
    sorted_campaigns = sorted(campaigns.iteritems(),
                              key=operator.itemgetter(1))
    sorted_campaigns.reverse()
    """ 
        FILTER CAMPAIGN DATA
        ====================
        
    """

    new_sorted_campaigns = list()
    for campaign in sorted_campaigns:
        key = campaign[0]

        if campaign[1] > 0:
            name = all_data[key][0]
            if name == None:
                name = 'none'

            timestamp = TP.timestamp_convert_format(all_data[key][3], 1, 2)

            if filter_data:
                if all_data[key][2] > min_donations_var:
                    new_sorted_campaigns.append([
                        campaign[0], campaign[1], name, timestamp,
                        all_data[key][2], all_data[key][4]
                    ])
            else:
                new_sorted_campaigns.append([
                    campaign[0], campaign[1], name, timestamp,
                    all_data[key][2], all_data[key][4]
                ])

    sorted_campaigns = new_sorted_campaigns

    return render_to_response('campaigns/index.html', {
        'campaigns': sorted_campaigns,
        'err_msg': err_msg
    },
                              context_instance=RequestContext(request))
Example #18
0
def test(request):

    try:
        """ 
            PROCESS POST DATA
            ================= 
            
            Escape all user input that can be entered in text fields 
            
        """
        test_name_var = MySQLdb._mysql.escape_string(
            request.POST['test_name'].strip())
        utm_campaign_var = MySQLdb._mysql.escape_string(
            request.POST['utm_campaign'].strip())
        start_time_var = MySQLdb._mysql.escape_string(
            request.POST['start_time'].strip())
        end_time_var = MySQLdb._mysql.escape_string(
            request.POST['end_time'].strip())
        one_step_var = MySQLdb._mysql.escape_string(
            request.POST['one_step'].strip())
        country = MySQLdb._mysql.escape_string(request.POST['iso_filter'])
        """ Convert timestamp format if necessary """
        if TP.is_timestamp(start_time_var, 2):
            start_time_var = TP.timestamp_convert_format(start_time_var, 2, 1)
        if TP.is_timestamp(end_time_var, 2):
            end_time_var = TP.timestamp_convert_format(end_time_var, 2, 1)

        if cmp(one_step_var, 'True') == 0:
            one_step_var = True
        else:
            one_step_var = False

        try:
            test_type_var = MySQLdb._mysql.escape_string(
                request.POST['test_type'])
            labels = request.POST['artifacts']

        except KeyError:

            test_type_var, labels = FDH.get_test_type(
                utm_campaign_var, start_time_var, end_time_var,
                DL.CampaignReportingLoader(
                    query_type=''))  # submit an empty query type
            labels = labels.__str__()

        label_dict = dict()
        label_dict_full = dict()

        labels = labels[1:-1].split(',')
        """ Parse the labels """
        for i in range(len(labels)):
            labels[i] = labels[i]
            label = labels[i].split('\'')[1]
            label = label.strip()
            pieces = label.split(' ')
            label = pieces[0]
            for j in range(len(pieces) - 1):
                label = label + '_' + pieces[j + 1]
            """ Escape the label parameters """
            label = MySQLdb._mysql.escape_string(label)
            label_dict_full[label] = label
        """ Look at the artifact names and map them into a dict() - Determine if artifacts were chosen by the user """

        if request.POST.__contains__('artifacts_chosen'):

            artifacts_chosen = request.POST.getlist('artifacts_chosen')
            """ Ensure that only two items are selected """
            if len(artifacts_chosen) > 2:
                raise Exception(
                    'Please select (checkboxes) exactly two items to test')

            for elem in artifacts_chosen:
                esc_elem = MySQLdb._mysql.escape_string(str(elem))
                label_dict[esc_elem] = esc_elem
        else:
            label_dict = label_dict_full
        """ Parse the added labels IF they are not empty """
        for key in label_dict.keys():

            try:
                if not (request.POST[key] == ''):

                    label_dict[key] = MySQLdb._mysql.escape_string(
                        str(request.POST[key]))
                else:
                    label_dict[key] = key
            except:
                logging.error('Could not find %s in the POST QueryDict.' % key)

        for key in label_dict_full.keys():
            try:
                if not (request.POST[key] == ''):
                    label_dict_full[key] = MySQLdb._mysql.escape_string(
                        str(request.POST[key]))
                else:
                    label_dict_full[key] = key
            except:
                logging.error('Could not find %s in the POST QueryDict.' % key)
        """ 
            EXECUTE REPORT GENERATION
            =========================
        
            setup time parameters
            determine test metrics
            execute queries
        """

        sample_interval = 1

        start_time_obj = TP.timestamp_to_obj(start_time_var, 1)
        end_time_obj = TP.timestamp_to_obj(end_time_var, 1)

        time_diff = end_time_obj - start_time_obj
        time_diff_min = time_diff.seconds / 60.0
        test_interval = int(math.floor(time_diff_min /
                                       sample_interval))  # 2 is the interval

        metric_types = FDH.get_test_type_metrics(test_type_var)
        metric_types_full = dict()
        """ Get the full (descriptive) version of the metric names 
            !! FIXME / TODO -- order these properly !! """

        for i in range(len(metric_types)):
            metric_types_full[metric_types[i]] = QD.get_metric_full_name(
                metric_types[i])
        """ USE generate_reporting_objects() TO GENERATE THE REPORT DATA - dependent on test type """

        measured_metric, winner, loser, percent_win, confidence, html_table_pm_banner, html_table_pm_lp, html_table_language, html_table \
        =  generate_reporting_objects(test_name_var, start_time_var, end_time_var, utm_campaign_var, label_dict, label_dict_full, \
                                      sample_interval, test_interval, test_type_var, metric_types, one_step_var, country)

        winner_var = winner[0]

        results = list()
        for index in range(len(winner)):
            results.append({
                'metric': measured_metric[index],
                'winner': winner[index],
                'loser': loser[index],
                'percent_win': percent_win[index],
                'confidence': confidence[index]
            })

        template_var_dict = {'results' : results,  \
                  'utm_campaign' : utm_campaign_var, 'metric_names_full' : metric_types_full, \
                  'summary_table': html_table, 'sample_interval' : sample_interval, \
                  'banner_pm_table' : html_table_pm_banner, 'lp_pm_table' : html_table_pm_lp, 'html_table_language' : html_table_language, \
                  'start_time' : TP.timestamp_convert_format(start_time_var, 1, 2) , 'end_time' : TP.timestamp_convert_format(end_time_var, 1, 2)}

        html = render_to_response('tests/results_' + test_type_var + '.html',
                                  template_var_dict,
                                  context_instance=RequestContext(request))
        """ 
            WRITE TO TEST TABLE
            =================== 
        
        """

        ttl = DL.TestTableLoader()
        """ Format the html string """
        html_string = html.__str__()
        html_string = html_string.replace('"', '\\"')

        if ttl.record_exists(utm_campaign=utm_campaign_var):
            ttl.update_test_row(test_name=test_name_var,
                                test_type=test_type_var,
                                utm_campaign=utm_campaign_var,
                                start_time=start_time_var,
                                end_time=end_time_var,
                                html_report=html_string,
                                winner=winner_var)
        else:
            ttl.insert_row(test_name=test_name_var,
                           test_type=test_type_var,
                           utm_campaign=utm_campaign_var,
                           start_time=start_time_var,
                           end_time=end_time_var,
                           html_report=html_string,
                           winner=winner_var)

        return html

    except Exception as inst:

        logging.error('Failed to correctly generate test report.')
        logging.error(type(inst))
        logging.error(inst.args)
        logging.error(inst)
        """ Return to the index page with an error """
        try:
            err_msg = 'Test Generation failed for: %s.  Check the fields submitted for generation. <br><br>ERROR:<br><br>%s' % (
                utm_campaign_var, inst.__str__())
        except:
            err_msg = 'Test Generation failed.  Check the fields submitted for generation. <br><br>ERROR:<br><br>%s' % inst.__str__(
            )
            return campaigns_index(request, kwargs={'err_msg': err_msg})

        return show_campaigns(request,
                              utm_campaign_var,
                              kwargs={'err_msg': err_msg})
Example #19
0
def process_filter_data(request):
        
    err_msg = ''
    
    time_curr = datetime.datetime.utcnow()
    time_dayback = time_curr + datetime.timedelta(hours = -4)
     
    _beginning_time_ = TP.timestamp_from_obj(time_dayback, 1, 3)
    _end_time_ = TP.timestamp_from_obj(time_curr, 1, 3)
    
    
    """ 
        PROCESS POST VARS 
        =================
    """
    
    try:
        
        latest_utc_ts_var = MySQLdb._mysql.escape_string(request.POST['latest_utc_ts'].strip())
        
        if not(TP.is_timestamp(latest_utc_ts_var, 1)) and not(TP.is_timestamp(latest_utc_ts_var, 2)):
            raise TypeError

        if latest_utc_ts_var == '':
            latest_utc_ts_var = _end_time_
        
        ts_format = TP.getTimestampFormat(latest_utc_ts_var)
        if ts_format == TP.TS_FORMAT_FORMAT1:
            latest_utc_ts_var = TP.timestamp_convert_format(latest_utc_ts_var, TP.TS_FORMAT_FORMAT1, TP.TS_FORMAT_FLAT)
            
    except KeyError:        
        latest_utc_ts_var = _end_time_
    
    except TypeError:        
        err_msg = 'Please enter a valid end-timestamp.'        
        latest_utc_ts_var = _end_time_


    try:
        
        earliest_utc_ts_var = MySQLdb._mysql.escape_string(request.POST['earliest_utc_ts'].strip())
        
        if not(TP.is_timestamp(earliest_utc_ts_var, 1)) and not(TP.is_timestamp(earliest_utc_ts_var, 2)):
            raise TypeError

        if earliest_utc_ts_var == '':
            earliest_utc_ts_var = _beginning_time_
        
        ts_format = TP.getTimestampFormat(earliest_utc_ts_var)
        if ts_format == TP.TS_FORMAT_FORMAT1:
            earliest_utc_ts_var = TP.timestamp_convert_format(earliest_utc_ts_var, TP.TS_FORMAT_FORMAT1, TP.TS_FORMAT_FLAT)
            
    except KeyError:
        earliest_utc_ts_var = _beginning_time_
    
    except TypeError:        
        err_msg = 'Please enter a valid start-timestamp.'        
        earliest_utc_ts_var = _beginning_time_
        
        
    return err_msg, earliest_utc_ts_var, latest_utc_ts_var
Example #20
0
def generate_summary(request):

    try:

        err_msg = ''
        """ 
            PROCESS POST DATA
            ================= 
            
            Escape all user input that can be entered in text fields 
            
        """
        if 'utm_campaign' in request.POST:
            utm_campaign = MySQLdb._mysql.escape_string(
                request.POST['utm_campaign'])

        if 'start_time' in request.POST:
            start_time = MySQLdb._mysql.escape_string(
                request.POST['start_time'].strip())

            if not (TP.is_timestamp(start_time, 1)) and not (TP.is_timestamp(
                    start_time, 2)):
                err_msg = 'Incorrectly formatted start timestamp.'
                raise Exception()

        if 'end_time' in request.POST:
            end_time = MySQLdb._mysql.escape_string(
                request.POST['end_time'].strip())

            if not (TP.is_timestamp(end_time, 1)) and not (TP.is_timestamp(
                    end_time, 2)):
                err_msg = 'Incorrectly formatted end timestamp.'
                raise Exception()

        if 'iso_filter' in request.POST:
            country = MySQLdb._mysql.escape_string(request.POST['iso_filter'])
        else:
            country = '.{2}'

        if 'measure_confidence' in request.POST:
            if cmp(request.POST['measure_confidence'], 'yes') == 0:
                measure_confidence = True
            else:
                measure_confidence = False
        else:
            measure_confidence = False

        if 'one_step' in request.POST:
            if cmp(request.POST['one_step'], 'yes') == 0:
                use_one_step = True
            else:
                use_one_step = False
        else:
            use_one_step = False

        if 'donations_only' in request.POST:
            if cmp(request.POST['donations_only'], 'yes') == 0:
                donations_only = True
            else:
                donations_only = False
        else:
            donations_only = False
        """ Convert timestamp format if necessary """

        if TP.is_timestamp(start_time, 2):
            start_time = TP.timestamp_convert_format(start_time, 2, 1)
        if TP.is_timestamp(end_time, 2):
            end_time = TP.timestamp_convert_format(end_time, 2, 1)
        """ =============================================== """
        """ 
            GENERATE A REPORT SUMMARY TABLE
            ===============================
        """

        if donations_only:
            srl = DL.SummaryReportingLoader(
                query_type=FDH._TESTTYPE_DONATIONS_)
        else:
            srl = DL.SummaryReportingLoader(
                query_type=FDH._TESTTYPE_BANNER_LP_)

        srl.run_query(start_time,
                      end_time,
                      utm_campaign,
                      min_views=-1,
                      country=country)

        column_names = srl.get_column_names()
        summary_results = srl.get_results()

        if not (summary_results):
            html_table = '<h3>No artifact summary data available for %s.</h3>' % utm_campaign

        else:
            summary_results_list = list()
            for row in summary_results:
                summary_results_list.append(list(row))
            summary_results = summary_results_list
            """ 
                Format results to encode html table cell markup in results        
            """
            if measure_confidence:

                ret = DR.ConfidenceReporting(
                    query_type='', hyp_test='').get_confidence_on_time_range(
                        start_time,
                        end_time,
                        utm_campaign,
                        one_step=use_one_step,
                        country=country)  # first get color codes on confidence
                conf_colour_code = ret[0]

                for row_index in range(len(summary_results)):

                    artifact_index = summary_results[row_index][
                        0] + '-' + summary_results[row_index][
                            1] + '-' + summary_results[row_index][2]

                    for col_index in range(len(column_names)):

                        is_coloured_cell = False
                        if column_names[col_index] in conf_colour_code.keys():
                            if artifact_index in conf_colour_code[
                                    column_names[col_index]].keys():
                                summary_results[row_index][
                                    col_index] = '<td style="background-color:' + conf_colour_code[
                                        column_names[col_index]][
                                            artifact_index] + ';">' + str(
                                                summary_results[row_index]
                                                [col_index]) + '</td>'
                                is_coloured_cell = True

                        if not (is_coloured_cell):
                            summary_results[row_index][
                                col_index] = '<td>' + str(
                                    summary_results[row_index]
                                    [col_index]) + '</td>'

                html_table = DR.DataReporting()._write_html_table(
                    summary_results,
                    column_names,
                    use_standard_metric_names=True,
                    omit_cell_markup=True)

            else:

                html_table = DR.DataReporting()._write_html_table(
                    summary_results,
                    column_names,
                    use_standard_metric_names=True)
        """ Generate totals only if it's a non-donation-only query """

        if donations_only:
            srl = DL.SummaryReportingLoader(
                query_type=FDH._QTYPE_TOTAL_DONATIONS_)
        else:
            srl = DL.SummaryReportingLoader(query_type=FDH._QTYPE_TOTAL_)

        srl.run_query(start_time,
                      end_time,
                      utm_campaign,
                      min_views=-1,
                      country=country)

        total_summary_results = srl.get_results()

        if not (total_summary_results):
            html_table = html_table + '<div class="spacer"></div><div class="spacer"></div><h3>No data available for %s Totals.</h3>' % utm_campaign

        else:
            html_table = html_table + '<div class="spacer"></div><div class="spacer"></div>' + DR.DataReporting(
            )._write_html_table(total_summary_results,
                                srl.get_column_names(),
                                use_standard_metric_names=True)

        metric_legend_table = DR.DataReporting().get_standard_metrics_legend()
        conf_legend_table = DR.ConfidenceReporting(
            query_type='bannerlp',
            hyp_test='TTest').get_confidence_legend_table()

        html_table = '<h4><u>Metrics Legend:</u></h4><div class="spacer"></div>' + metric_legend_table + \
        '<div class="spacer"></div><h4><u>Confidence Legend for Hypothesis Testing:</u></h4><div class="spacer"></div>' + conf_legend_table + '<div class="spacer"></div><div class="spacer"></div>' + html_table
        """ 
            DETERMINE PAYMENT METHODS 
            =========================
        """

        ccl = DL.CiviCRMLoader()
        pm_data_counts, pm_data_conversions = ccl.get_payment_methods(
            utm_campaign, start_time, end_time, country=country)

        html_table_pm_counts = DR.IntervalReporting(
        ).write_html_table_from_rowlists(
            pm_data_counts, ['Payment Method', 'Portion of Donations (%)'],
            'Landing Page')
        html_table_pm_conversions = DR.IntervalReporting(
        ).write_html_table_from_rowlists(pm_data_conversions, [
            'Payment Method', 'Visits', 'Conversions', 'Conversion Rate (%)',
            'Amount', 'Amount 25'
        ], 'Landing Page')

        html_table = html_table + '<div class="spacer"></div><h4><u>Payment Methods Breakdown:</u></h4><div class="spacer"></div>' + html_table_pm_counts + \
        '<div class="spacer"></div><div class="spacer"></div>' + html_table_pm_conversions + '<div class="spacer"></div><div class="spacer"></div>'

        return render_to_response('tests/table_summary.html', {
            'html_table': html_table,
            'utm_campaign': utm_campaign
        },
                                  context_instance=RequestContext(request))

    except Exception as inst:

        if cmp(err_msg, '') == 0:
            err_msg = 'Could not generate campaign tabular results.'

        return index(request, err_msg=err_msg)
Example #21
0
def test(request):
    
    try:
                
        """ 
            PROCESS POST DATA
            ================= 
            
            Escape all user input that can be entered in text fields 
            
        """
        test_name_var = MySQLdb._mysql.escape_string(request.POST['test_name'].strip())
        utm_campaign_var = MySQLdb._mysql.escape_string(request.POST['utm_campaign'].strip())
        start_time_var = MySQLdb._mysql.escape_string(request.POST['start_time'].strip())
        end_time_var = MySQLdb._mysql.escape_string(request.POST['end_time'].strip())    
        one_step_var = MySQLdb._mysql.escape_string(request.POST['one_step'].strip())        
        country = MySQLdb._mysql.escape_string(request.POST['iso_filter'])
                
        
        """ Convert timestamp format if necessary """
        if TP.is_timestamp(start_time_var, 2):
            start_time_var = TP.timestamp_convert_format(start_time_var, 2, 1)
        if TP.is_timestamp(end_time_var, 2):
            end_time_var = TP.timestamp_convert_format(end_time_var, 2, 1)
                
        if cmp(one_step_var, 'True') == 0:
            one_step_var = True
        else:
            one_step_var = False
            
        try: 
            test_type_var = MySQLdb._mysql.escape_string(request.POST['test_type'])
            labels = request.POST['artifacts']
                            
        except KeyError:
    
            test_type_var, labels = FDH.get_test_type(utm_campaign_var, start_time_var, end_time_var, DL.CampaignReportingLoader(query_type=''))  # submit an empty query type           
            labels = labels.__str__() 
        
        label_dict = dict()
        label_dict_full = dict()
        
        labels = labels[1:-1].split(',')        
                                    
        """ Parse the labels """     
        for i in range(len(labels)):
            labels[i] = labels[i]
            label = labels[i].split('\'')[1]
            label = label.strip()            
            pieces = label.split(' ')
            label = pieces[0]
            for j in range(len(pieces) - 1):
                label = label + '_' + pieces[j+1]
            
            """ Escape the label parameters """
            label = MySQLdb._mysql.escape_string(label)
            label_dict_full[label] = label
                
        """ Look at the artifact names and map them into a dict() - Determine if artifacts were chosen by the user """
        
        if request.POST.__contains__('artifacts_chosen'):
            
            artifacts_chosen =  request.POST.getlist('artifacts_chosen')
            
            """ Ensure that only two items are selected """
            if len(artifacts_chosen) > 2:
                raise Exception('Please select (checkboxes) exactly two items to test')
            
            for elem in artifacts_chosen:
                esc_elem = MySQLdb._mysql.escape_string(str(elem))
                label_dict[esc_elem] = esc_elem
        else:
            label_dict = label_dict_full
    
        
        """ Parse the added labels IF they are not empty """
        for key in label_dict.keys():
            
            try:
                if not(request.POST[key] == ''):
                    
                    label_dict[key] = MySQLdb._mysql.escape_string(str(request.POST[key]))
                else:
                    label_dict[key] = key
            except:
                logging.error('Could not find %s in the POST QueryDict.' % key)
        
        for key in label_dict_full.keys():
            try:
                if not(request.POST[key] == ''):
                    label_dict_full[key] = MySQLdb._mysql.escape_string(str(request.POST[key]))
                else:
                    label_dict_full[key] = key
            except:
                logging.error('Could not find %s in the POST QueryDict.' % key)
    
        
        """ 
            EXECUTE REPORT GENERATION
            =========================
        
            setup time parameters
            determine test metrics
            execute queries
        """
        
        sample_interval = 1
        
        start_time_obj = TP.timestamp_to_obj(start_time_var, 1)
        end_time_obj = TP.timestamp_to_obj(end_time_var, 1)
        
        time_diff = end_time_obj - start_time_obj
        time_diff_min = time_diff.seconds / 60.0
        test_interval = int(math.floor(time_diff_min / sample_interval)) # 2 is the interval
            
        metric_types = FDH.get_test_type_metrics(test_type_var)
        metric_types_full = dict()
        
        
        """ Get the full (descriptive) version of the metric names 
            !! FIXME / TODO -- order these properly !! """
        
        for i in range(len(metric_types)):
            metric_types_full[metric_types[i]] = QD.get_metric_full_name(metric_types[i])
        
        
        """ USE generate_reporting_objects() TO GENERATE THE REPORT DATA - dependent on test type """
        
        measured_metric, winner, loser, percent_win, confidence, html_table_pm_banner, html_table_pm_lp, html_table_language, html_table \
        =  generate_reporting_objects(test_name_var, start_time_var, end_time_var, utm_campaign_var, label_dict, label_dict_full, \
                                      sample_interval, test_interval, test_type_var, metric_types, one_step_var, country)
        
        winner_var = winner[0]
        
        results = list()
        for index in range(len(winner)):
            results.append({'metric' : measured_metric[index], 'winner' : winner[index], 'loser': loser[index], 'percent_win' : percent_win[index], 'confidence' : confidence[index]})
            
        template_var_dict = {'results' : results,  \
                  'utm_campaign' : utm_campaign_var, 'metric_names_full' : metric_types_full, \
                  'summary_table': html_table, 'sample_interval' : sample_interval, \
                  'banner_pm_table' : html_table_pm_banner, 'lp_pm_table' : html_table_pm_lp, 'html_table_language' : html_table_language, \
                  'start_time' : TP.timestamp_convert_format(start_time_var, 1, 2) , 'end_time' : TP.timestamp_convert_format(end_time_var, 1, 2)}
        
        html = render_to_response('tests/results_' + test_type_var + '.html', template_var_dict, context_instance=RequestContext(request))
    
        """ 
            WRITE TO TEST TABLE
            =================== 
        
        """
        
        ttl = DL.TestTableLoader()
        
        """ Format the html string """
        html_string = html.__str__()
        html_string = html_string.replace('"', '\\"')
    
        if ttl.record_exists(utm_campaign=utm_campaign_var):
            ttl.update_test_row(test_name=test_name_var,test_type=test_type_var,utm_campaign=utm_campaign_var,start_time=start_time_var,end_time=end_time_var,html_report=html_string, winner=winner_var)
        else:
            ttl.insert_row(test_name=test_name_var,test_type=test_type_var,utm_campaign=utm_campaign_var,start_time=start_time_var,end_time=end_time_var,html_report=html_string, winner=winner_var)
        
        return html

    except Exception as inst:
        
        logging.error('Failed to correctly generate test report.')
        logging.error(type(inst))
        logging.error(inst.args)
        logging.error(inst)
    
        """ Return to the index page with an error """
        try:
            err_msg = 'Test Generation failed for: %s.  Check the fields submitted for generation. <br><br>ERROR:<br><br>%s' % (utm_campaign_var, inst.__str__())
        except:
            err_msg = 'Test Generation failed.  Check the fields submitted for generation. <br><br>ERROR:<br><br>%s' % inst.__str__()
            return campaigns_index(request, kwargs={'err_msg' : err_msg})
        
        return show_campaigns(request, utm_campaign_var, kwargs={'err_msg' : err_msg})
Example #22
0
    def execute_process(self, key, **kwargs):

        logging.info('Commencing caching of live results data at:  %s' %
                     self.CACHING_HOME)
        shelve_key = key
        """ Find the earliest and latest page views for a given campaign  """
        lptl = DL.LandingPageTableLoader(db='db1025')

        query_name = 'report_summary_results_country.sql'
        query_name_1S = 'report_summary_results_country_1S.sql'
        campaign_regexp_filter = '^C_|^C11_'

        dl = DL.DataLoader(db='db1025')
        end_time, start_time = TP.timestamps_for_interval(
            datetime.datetime.utcnow(), 1, hours=-self.DURATION_HRS)
        """ Should a one-step query be used? """
        use_one_step = lptl.is_one_step(
            start_time, end_time, 'C11'
        )  # Assume it is a one step test if there are no impressions for this campaign in the landing page table
        """ 
            Retrieve the latest time for which impressions have been loaded
            ===============================================================
        """

        sql_stmnt = 'select max(end_time) as latest_ts from squid_log_record where log_completion_pct = 100.00'

        results = dl.execute_SQL(sql_stmnt)
        latest_timestamp = results[0][0]
        latest_timestamp = TP.timestamp_from_obj(latest_timestamp, 2, 3)
        latest_timestamp_flat = TP.timestamp_convert_format(
            latest_timestamp, 2, 1)

        ret = DR.ConfidenceReporting(query_type='', hyp_test='',
                                     db='db1025').get_confidence_on_time_range(
                                         start_time,
                                         end_time,
                                         campaign_regexp_filter,
                                         one_step=use_one_step)
        measured_metrics_counts = ret[1]
        """ Prepare Summary results """

        sql_stmnt = Hlp.file_to_string(projSet.__sql_home__ + query_name)
        sql_stmnt = sql_stmnt % (start_time, latest_timestamp_flat, start_time, latest_timestamp_flat, campaign_regexp_filter, start_time, latest_timestamp_flat, \
                                 start_time, end_time, campaign_regexp_filter, start_time, end_time, campaign_regexp_filter, start_time, end_time, campaign_regexp_filter, \
                                 start_time, latest_timestamp_flat, campaign_regexp_filter, start_time, latest_timestamp_flat, campaign_regexp_filter)

        logging.info('Executing report_summary_results ...')

        results = dl.execute_SQL(sql_stmnt)
        column_names = dl.get_column_names()

        if use_one_step:

            logging.info('... including one step artifacts ...')

            sql_stmnt_1S = Hlp.file_to_string(projSet.__sql_home__ +
                                              query_name_1S)
            sql_stmnt_1S = sql_stmnt_1S % (start_time, latest_timestamp_flat, start_time, latest_timestamp_flat, campaign_regexp_filter, start_time, latest_timestamp_flat, \
                                     start_time, end_time, campaign_regexp_filter, start_time, end_time, campaign_regexp_filter, start_time, end_time, campaign_regexp_filter, \
                                     start_time, latest_timestamp_flat, campaign_regexp_filter, start_time, latest_timestamp_flat, campaign_regexp_filter)

            results = list(results)
            results_1S = dl.execute_SQL(sql_stmnt_1S)
            """ Ensure that the results are unique """
            one_step_keys = list()
            for row in results_1S:
                one_step_keys.append(str(row[0]) + str(row[1]) + str(row[2]))

            new_results = list()
            for row in results:
                key = str(row[0]) + str(row[1]) + str(row[2])
                if not (key in one_step_keys):
                    new_results.append(row)
            results = new_results

            results.extend(list(results_1S))

        metric_legend_table = DR.DataReporting().get_standard_metrics_legend()
        conf_legend_table = DR.ConfidenceReporting(
            query_type='bannerlp',
            hyp_test='TTest').get_confidence_legend_table()
        """ Create a interval loader objects """

        sampling_interval = 5  # 5 minute sampling interval for donation plots

        ir_cmpgn = DR.IntervalReporting(query_type=FDH._QTYPE_CAMPAIGN_ +
                                        FDH._QTYPE_TIME_,
                                        generate_plot=False,
                                        db='db1025')
        ir_banner = DR.IntervalReporting(query_type=FDH._QTYPE_BANNER_ +
                                         FDH._QTYPE_TIME_,
                                         generate_plot=False,
                                         db='db1025')
        ir_lp = DR.IntervalReporting(query_type=FDH._QTYPE_LP_ +
                                     FDH._QTYPE_TIME_,
                                     generate_plot=False,
                                     db='db1025')
        """ Execute queries """
        ir_cmpgn.run(start_time, end_time, sampling_interval, 'donations', '',
                     {})
        ir_banner.run(start_time, end_time, sampling_interval, 'donations', '',
                      {})
        ir_lp.run(start_time, end_time, sampling_interval, 'donations', '', {})
        """ Prepare serialized objects """

        dict_param = dict()

        dict_param['metric_legend_table'] = metric_legend_table
        dict_param['conf_legend_table'] = conf_legend_table

        dict_param['measured_metrics_counts'] = measured_metrics_counts
        dict_param['results'] = results
        dict_param['column_names'] = column_names

        dict_param['interval'] = sampling_interval
        dict_param['duration'] = self.DURATION_HRS

        dict_param['start_time'] = TP.timestamp_convert_format(
            start_time, 1, 2)
        dict_param['end_time'] = TP.timestamp_convert_format(end_time, 1, 2)

        dict_param['ir_cmpgn_counts'] = ir_cmpgn._counts_
        dict_param['ir_banner_counts'] = ir_banner._counts_
        dict_param['ir_lp_counts'] = ir_lp._counts_

        dict_param['ir_cmpgn_times'] = ir_cmpgn._times_
        dict_param['ir_banner_times'] = ir_banner._times_
        dict_param['ir_lp_times'] = ir_lp._times_

        self.clear_cached_data(shelve_key)
        self.cache_data(dict_param, shelve_key)

        logging.info('Caching complete.')
Example #23
0
def index(request, **kwargs):
    
    crl = DL.CampaignReportingLoader(query_type='totals')
    filter_data = True
            
    """ Determine the start and end times for the query """ 
    start_time_obj =  datetime.datetime.utcnow() + datetime.timedelta(days=-1)
    end_time = TP.timestamp_from_obj(datetime.datetime.utcnow(),1,3)    
    start_time = TP.timestamp_from_obj(start_time_obj,1,3)
    
    """ 
        PROCESS POST KWARGS 
        ===================
    """
    
    err_msg = ''
    try:
        err_msg = str(kwargs['kwargs']['err_msg'])
    except:
        pass
    
    """ 
        PROCESS POST VARS 
        =================                
    """
    
    """ Process error message """
    try:
        err_msg = MySQLdb._mysql.escape_string(request.POST['err_msg'])
    except KeyError:
        pass

    """ If the filter form was submitted extract the POST vars  """
    try:
        min_donations_var = MySQLdb._mysql.escape_string(request.POST['min_donations'].strip())
        earliest_utc_ts_var = MySQLdb._mysql.escape_string(request.POST['utc_ts'].strip())
        
        """ If the user timestamp is earlier than the default start time run the query for the earlier start time  """
        ts_format = TP.getTimestampFormat(earliest_utc_ts_var)
    
        """ Ensure the validity of the timestamp input """
        if ts_format == TP.TS_FORMAT_FORMAT1:
            start_time = TP.timestamp_convert_format(earliest_utc_ts_var, TP.TS_FORMAT_FORMAT1, TP.TS_FORMAT_FLAT)
        elif ts_format == TP.TS_FORMAT_FLAT:
            start_time = earliest_utc_ts_var
        elif cmp(earliest_utc_ts_var, '') == 0:
            start_time = TP.timestamp_from_obj(start_time_obj,1,3)
        else:
            raise Exception()
        
        if cmp(min_donations_var, '') == 0:
            min_donations_var = -1
        else:
            min_donations_var = int(min_donations_var)
    
    except KeyError: # In the case the form was not submitted set minimum donations and retain the default start time 
        
        min_donations_var = -1
        pass
    
    except Exception: # In the case the form was incorrectly formatted notify the user
        
        min_donations_var = -1
        start_time = TP.timestamp_from_obj(start_time_obj,1,3)      
        err_msg = 'Filter fields are incorrect.'
    


    """ 
        GENERATE CAMPAIGN DATA 
        ======================
        
    """
    campaigns, all_data = crl.run_query({'metric_name' : 'earliest_timestamp', 'start_time' : start_time, 'end_time' : end_time})

    """ Sort campaigns by earliest access """    
    sorted_campaigns = sorted(campaigns.iteritems(), key=operator.itemgetter(1))
    sorted_campaigns.reverse()
    
    """ 
        FILTER CAMPAIGN DATA
        ====================
        
    """

    new_sorted_campaigns = list()
    for campaign in sorted_campaigns:
        key = campaign[0]
        
        if campaign[1] > 0:
            name = all_data[key][0]
            if name  == None:
                name = 'none'
            
            timestamp = TP.timestamp_convert_format(all_data[key][3], 1, 2)
            
            if filter_data: 
                if all_data[key][2] > min_donations_var:
                    new_sorted_campaigns.append([campaign[0], campaign[1], name, timestamp, all_data[key][2], all_data[key][4]])
            else:
                new_sorted_campaigns.append([campaign[0], campaign[1], name, timestamp, all_data[key][2], all_data[key][4]])
    
    sorted_campaigns = new_sorted_campaigns

    return render_to_response('campaigns/index.html', {'campaigns' : sorted_campaigns, 'err_msg' : err_msg}, context_instance=RequestContext(request))