Esempio n. 1
0
def all_time_top_programs():
    channel_dict = channleID_to_channelName()
    spark_io = SparkParquetIO()
    ucis = spark_io.get_all_interactions()
    top_programs = top_programs_by_viewtime(ucis, row_limit=20)
    table = Table()
    table.title = "all time top programs by viewing time"
    table.subtitle = "programs most watched"

    def row_dict(x):
        return {
            "content": {
                "names": [{
                    "value": x[0]
                }]
            },
            "source": {
                "ID": x[1],
                "name": channel_dict[x[1]]
            },
            "values": [{
                "name": "minutes",
                "data": x[2]
            }]
        }

    table_rows = [TableRow(**row_dict(x)) for x in top_programs]
    table.rows = table_rows
    return table.as_ui_dict()
def channel_overview_trigger(dt):
    spark_io = SparkParquetIO()
    week_ucis = spark_io.get_weekly_interactions(dt)
    # for channel_id in channel_ids:
    for channel_id in ['eid25']:
        channel_ucis = channel_overview(
            dt, week_ucis.filter(week_ucis.channelID == channel_id),
            channel_id)
Esempio n. 3
0
def content_overview_trigger(dt):
    spark_io = SparkParquetIO()
    week_ucis = spark_io.get_weekly_interactions(dt)
    for title in all_titles:
        content_ucis = content_overview(
            dt,
            week_ucis.filter(week_ucis.title==title),
            all_titles[title],
            title
        )
def content_trigger(dt):
    spark_io = SparkParquetIO()
    week_ucis = spark_io.get_weekly_interactions(dt)
    for title in all_titles:
        print title
        run_weekly_kpis(
            dt,
            week_ucis.filter(week_ucis.title==title),
            all_titles[title],
            title
        )
Esempio n. 5
0
def service_overview(dt):
    spark_io = SparkParquetIO()
    week_ucis = spark_io.get_weekly_interactions(dt)
    total_views = week_ucis.count()
    total_viewtime = week_ucis.groupBy().sum('duration').collect()[0]['sum(duration)'] / 60
    hour_of_day = normalize(
        view_count_by_hour_of_day(week_ucis), total_views)
    day_of_week = normalize(
        view_count_by_day_of_week(week_ucis), total_views)
    device_overview = normalize(
        top_tag_by_view_count(week_ucis, 'device'), total_views)
    device_conpletion_ratio = top_tag_by_completion_ratio(week_ucis, 'device')
    location_overview = normalize(
        top_tag_by_view_count(week_ucis, 'city'), total_views)
    ###################
    # this frist one should be wraped with genre title count as well
    genre_by_views = normalize(
        top_tag_by_view_count(week_ucis, 'category', row_limit= 20), total_views)
    ###################
    genre_by_viewtime = normalize(
        top_tag_by_total_viewtime(week_ucis, 'category', row_limit=20), total_viewtime)
    genre_by_completion_ratio = top_tag_by_completion_ratio(week_ucis, 'category', row_limit=20)
    genre_by_user_viewtime = top_tag_by_user_viewtime(week_ucis, 'category', row_limit=20)
    # about users
    user_complete_view = user_by_complete_views(week_ucis)
    user_viewtime = user_by_viewtime(week_ucis, 7)
    action_type_overview = top_tag_by_total_viewtime(week_ucis, 'actionType')
    action_type_overview = normalize(
        action_type_overview, sum([x[1] for x in action_type_overview])
    )
    res = [
        {"id": "hour-of-day", "data": hour_of_day},
        {"id": "day-of-week", "data": day_of_week},
        {"id": "genre-by-started-views", "data": genre_by_views},
        {"id": "genre-by-viewtime", "data": genre_by_viewtime},
        {"id": "genre-by-user-viewtime", "data": genre_by_user_viewtime},
        {"id": "genre-by-completion-ratio", "data": genre_by_completion_ratio},
        {"id": "device-by-started-views", "data": device_overview},
        {"id": "device-by-completion-ratio", "data": device_conpletion_ratio},
        {"id": "location-by-started-views", "data": location_overview},
        {"id": "genre-by-completion-ratio", "data": genre_by_completion_ratio},
        {"id": "user-by-complete-views", "data": user_complete_view},
        {"id": "user-by-viewtime", "data": user_viewtime},
        {"id": "action-type-overview", "data": action_type_overview}
    ]

    for x in res:
        print x
    r.db('gettv_insight_api').table('overview').insert(res, conflict='replace').run()
Esempio n. 6
0
        "data": top_program
    }, {
        "title": 'top-provider',
        "id": 'top-provider',
        "data": top_channel
    }, {
        "title": 'hour-of-day',
        "id": 'hour-of-day',
        "data": hour_of_day
    }, {
        "title": 'day-of-week',
        "id": 'day-of-week',
        "data": day_of_week
    }, {
        "title": 'package-overview',
        "id": 'package-overview',
        "data": package_overview
    }]
    r.db('telenortv_insight_api').table(view_type).insert(
        res, conflict='replace').run()


if __name__ == '__main__':
    dt_start = datetime(2016, 12, 10)
    dt_end = datetime(2017, 6, 27)
    view_type = 'svod'
    spark_io = SparkParquetIO()
    vod_ucis = spark_io.get_all_interactions()
    vod_ucis = vod_ucis.filter(vod_ucis.actionType == view_type)
    run_vod_kpis(vod_ucis, view_type)
from diagnostic.data_interface.input_data import SparkParquetIO
from diagnostic.calculation.calculation import *
from diagnostic.calculation.user import *
from diagnostic.calculation.utils import normalize
from datetime import datetime, timedelta
import rethinkdb as r
import pytz
import os

RDB_HOST = os.getenv('RDB_HOST', 'localhost')
RDB_PORT = os.getenv('RDB_PORT', 28015)
r.connect(host=RDB_HOST, port=RDB_PORT).repl()
spark_io = SparkParquetIO()


def calculate_sum(data):
    return sum([x[1] for x in data])


def run_daily_kpis(timestamp, users):
    daily_ucis = spark_io.get_daily_interactions(timestamp)
    date_string = timestamp.strftime('%Y-%m-%d')
    started_views = view_count(daily_ucis)
    daily_active_user = user_number(daily_ucis)
    user_viewtime = avg_user_viewtime(daily_ucis)
    views_by_action = normalize(action_type_view_count(daily_ucis),
                                started_views)
    complete_views = avg_finished_program_by_user(daily_ucis)
    completion_ratio = avg_completion_ratio(daily_ucis)
    daily_top_program = top_programs_by_view_count(daily_ucis, 10)
    genre = top_tag_by_view_count(daily_ucis, 'category', 10)
Esempio n. 8
0
def service_overview(dt):
    spark_io = SparkParquetIO()
    week_ucis = spark_io.get_weekly_interactions(dt)
    total_views = week_ucis.count()
    total_viewtime = week_ucis.groupBy().sum(
        'duration').collect()[0]['sum(duration)'] / 60
    hour_of_day = normalize(view_count_by_hour_of_day(week_ucis), total_views)
    day_of_week = normalize(view_count_by_day_of_week(week_ucis), total_views)
    # about channels
    channel_by_views = normalize(
        top_tag_by_view_count(week_ucis, 'channelName', row_limit=20),
        total_views)
    channel_by_viewtime = normalize(
        top_tag_by_total_viewtime(week_ucis, 'channelName', row_limit=20),
        total_viewtime)
    channel_by_completion_ratio = top_tag_by_completion_ratio(week_ucis,
                                                              'channelName',
                                                              row_limit=20)
    channel_by_user_viewtime = top_tag_by_user_viewtime(week_ucis,
                                                        'channelName',
                                                        row_limit=20)
    # about genres
    ###################
    # this frist one should be wraped with genre title count as well
    genre_by_views = normalize(
        top_tag_by_view_count(week_ucis, 'category', row_limit=20),
        total_views)
    genre_by_title_count = normalize([(x[0], title_count_for_genre(x[0]))
                                      for x in genre_by_views],
                                     total_title_count())
    genre_overview = {"views": genre_by_views, "titles": genre_by_title_count}
    ###################
    genre_by_viewtime = normalize(
        top_tag_by_total_viewtime(week_ucis, 'category', row_limit=20),
        total_viewtime)
    genre_by_completion_ratio = top_tag_by_completion_ratio(week_ucis,
                                                            'category',
                                                            row_limit=20)
    genre_by_user_viewtime = top_tag_by_user_viewtime(week_ucis,
                                                      'category',
                                                      row_limit=20)
    # about users
    user_complete_view = user_by_complete_views(week_ucis)
    user_viewtime = user_by_viewtime(week_ucis, 7)
    action_type_overview = top_tag_by_total_viewtime(week_ucis, 'actionType')
    action_type_overview = normalize(action_type_overview,
                                     sum([x[1] for x in action_type_overview]))
    basic_package_by_viewing_time = basic_package_user_viewing_time(week_ucis)
    primeium_package_by_viewing_time = primeium_package_user_viewing_time(
        week_ucis)
    package_overview = basic_additional_package_overview()
    res = [{
        "id": "user-package-overview",
        "data": package_overview
    }, {
        "id": "basic-package-user-viewing-time",
        "data": basic_package_by_viewing_time
    }, {
        "id": "premium-package-user-viewing-time",
        "data": primeium_package_by_viewing_time
    }, {
        "id": "hour-of-day",
        "data": hour_of_day
    }, {
        "id": "day-of-week",
        "data": day_of_week
    }, {
        "id": "channel-by-completion-ratio",
        "data": channel_by_completion_ratio
    }, {
        "id": "channel-by-views",
        "data": channel_by_views
    }, {
        "id": "channel-by-viewtime",
        "data": channel_by_viewtime
    }, {
        "id": "channel-by-user-viewtime",
        "data": channel_by_user_viewtime
    }, {
        "id": "genre-overview",
        "data": genre_overview
    }, {
        "id": "genre-by-viewtime",
        "data": genre_by_viewtime
    }, {
        "id": "genre-by-user-viewtime",
        "data": genre_by_user_viewtime
    }, {
        "id": "genre-by-completion-ratio",
        "data": genre_by_completion_ratio
    }, {
        "id": "user-by-complete-views",
        "data": user_complete_view
    }, {
        "id": "user-by-viewtime",
        "data": user_viewtime
    }, {
        "id": "action-type-overview",
        "data": action_type_overview
    }]

    print res
    r.db('telenortv_insight_api').table('overview').insert(
        res, conflict='replace').run()
Esempio n. 9
0
    table_rows = [TableRow(**row_dict(x)) for x in top_programs]
    table.rows = table_rows
    return table.as_ui_dict()


def run_discovery(week_ucis, month_ucis):
    res = [
        all_time_top_programs(),
        popular_uncompleted_channels(),
        unpopular_completed_channels(),
        midnight_favorites(week_ucis),
        genre_user_viewtime(week_ucis),
        genre_completion(week_ucis),
        channel_completion(week_ucis),
        channel_user_viewtime(week_ucis)
    ]
    res = res + run_gerne_discovery(month_ucis)
    print res
    r.db('telenortv_insight_api').table('discovery').insert(
        res, conflict='replace').run()
    print "#" * 10


if __name__ == '__main__':
    dt = datetime(2017, 6, 27)
    spark_io = SparkParquetIO()
    week_ucis = spark_io.get_weekly_interactions(dt)
    month_ucis = spark_io.get_interactions(dt - timedelta(days=30), dt)
    run_discovery(week_ucis, month_ucis)
Esempio n. 10
0
from diagnostic.calculation.utils import normalize
from pyspark.sql import functions as func
from pyspark.sql.types import BooleanType, StringType
from datetime import datetime, timedelta
from functional import seq


# scala like functional processing package is used

# __all__ is defined for easy explicit import
__all__ = [
    'weekly_new_user', 'unique_user',
    'users_package_overview', 'basic_additional_package_overview',
    'basic_package_user_viewing_time', 'primeium_package_user_viewing_time'
]
spark_io = SparkParquetIO()

def transform_name(name_list):
    if len(name_list) == 0:
        return 'None'
    else:
        return ' & '.join(name_list)

def weekly_new_user(df, dt):
    return df\
        .filter(
            (df.firstActivity>dt-timedelta(days=6)) & (df.firstActivity<dt+timedelta(days=1))
        )\
        .count()

def unique_user(df, dt):
def channel_trigger(dt):
    spark_io = SparkParquetIO()
    week_ucis = spark_io.get_weekly_interactions(dt)
    for channel_id in channel_ids:
        run_daily_kpis(dt, week_ucis.filter(week_ucis.channelID == channel_id),
                       channel_id)
Esempio n. 12
0
 def test_load_interactions(self):
     dt = datetime(2017, 1, 1)
     spark = SparkParquetIO()
     interactions = spark.get_daily_interactions(dt)
     print interactions.count()
from datetime import datetime, timedelta
import rethinkdb as r
import pytz
import os

RDB_HOST = os.getenv('RDB_HOST', 'localhost')
RDB_PORT = os.getenv('RDB_PORT', 28015)
r.connect(host=RDB_HOST, port=RDB_PORT).repl()


def library_overiew(ucis):
    channel_num = len(channel_ids)
    genre_num = genre_number(ucis)
    print channel_num, genre_num
    res = [{
        "channel-number": channel_num,
        "id": "channel-number"
    }, {
        "genre-number": genre_num,
        "id": "genre-number"
    }]
    r.db('telenortv_insight_api')\
        .table('overview')\
        .insert(res, conflict='replace').run()


if __name__ == '__main__':
    spark_io = SparkParquetIO()
    ucis = spark_io.get_all_interactions(cols=['category'])
    library_overiew(ucis)