Example #1
0
def save_playback_history(media_type, playback_percentage):
    # Record in json when things got played to help predict which widgets will change after playback
    # if playback_percentage < 0.7:
    #    return
    history = utils.read_json(_playback_history_path, default={})
    plays = history.setdefault("plays", [])
    plays.append((time.time(), media_type))
    utils.write_json(_playback_history_path, history)
Example #2
0
def widgets_for_path(path):
    hash = path2hash(path)
    history_path = os.path.join(_addon_data, "{}.history".format(hash))
    cache_data = utils.read_json(history_path) if os.path.exists(history_path) else None
    if cache_data is None:
        cache_data = {}
    widgets = cache_data.setdefault("widgets", [])
    return set(widgets)
Example #3
0
def widgets_changed_by_watching(media_type):
    # Predict which widgets the skin might have that could have changed based on recently finish
    # watching something

    all_cache = filter(
        os.path.isfile, glob.glob(os.path.join(_addon_data, "*.history"))
    )

    # Simple version. Anything updated recently (since startup?)
    # priority = sorted(all_cache, key=os.path.getmtime)
    # Sort by chance of it updating
    plays = utils.read_json(_playback_history_path, default={}).setdefault("plays", [])
    plays_for_type = [(time, t) for time, t in plays if t == media_type]
    priority = sorted(
        [
            (
                chance_playback_updates_widget(path, plays_for_type),
                utils.read_json(path).get("path", ""),
                path,
            )
            for path in all_cache
        ],
        reverse=True,
    )

    for chance, path, history_path in priority:
        hash = path2hash(path)
        last_update = os.path.getmtime(history_path) - _startup_time
        if last_update < 0:
            utils.log(
                "widget not updated since startup {} {}".format(last_update, hash[:5]),
                "notice",
            )
        # elif chance < 0.3:
        #     log("chance widget changed after play {}% {}".format(chance, hash[:5]), 'notice')
        else:
            utils.log(
                "chance widget changed after play {}% {}".format(chance, hash[:5]),
                "notice",
            )
            yield hash, path
def get_group_by_id(group_id):
    if not group_id:
        return

    filename = '{}.group'.format(group_id)
    path = os.path.join(_addon_path, filename)

    try:
        group_def = utils.read_json(path)
    except ValueError:
        utils.log('Unable to parse: {}'.format(path))

    return group_def
Example #5
0
def chance_playback_updates_widget(history_path, plays, cutoff_time=60 * 5):
    cache_data = utils.read_json(history_path)
    history = cache_data.setdefault("history", [])
    # Complex version
    # - for each widget
    #    - come up with chance it will update after a playback
    #    - each pair of updates, is there a playback inbetween and updated with X min after playback
    #    - num playback with change / num playback with no change
    changes, non_changes, unrelated_changes = 0, 0, 0
    update = ""
    time_since_play = 0
    for play_time, media_type in plays:
        while True:
            last_update = update
            if not history:
                break
            update_time, update = history.pop(0)
            time_since_play = update_time - play_time
            # log("{} {} {} {}".format(update[:5],last_update[:5], unrelated_changes, time_since_play), 'notice')
            if time_since_play > 0:
                break
            elif update != last_update:
                unrelated_changes += 1

        if update == last_update:
            non_changes += 1
        elif (
            time_since_play > cutoff_time
        ):  # update too long after playback to be releated
            pass
        else:
            changes += 1
        # TODO: what if the previous update was a long time before playback?

    # There is probably a more statistically correct way of doing this but the idea is that
    # with few datapoints we should tend towards 0.5 probability but as we get more datapoints
    # then error goes down and rely on actual changes vs nonchanges
    # We will do a simple weighted average with 0.5 to simulate this
    # TODO: currently random widgets score higher than recently played widgets. need to score them lower
    # as they are less relevent
    utils.log(
        "changes={}, non_changes={}, unrelated_changes={}".format(
            changes, non_changes, unrelated_changes
        ),
        "debug",
    )
    datapoints = float(changes + non_changes)
    prob = changes / float(changes + non_changes + unrelated_changes)
    unknown_weight = 4
    prob = (prob * datapoints + 0.5 * unknown_weight) / (datapoints + unknown_weight)
    return prob
Example #6
0
def read_history(path, create_if_missing=True):
    hash = path2hash(path)
    history_path = os.path.join(_addon_data, "{}.history".format(hash))
    if not os.path.exists(history_path):
        if create_if_missing:
            cache_data = {}
            history = cache_data.setdefault("history", [])
            widgets = cache_data.setdefault("widgets", [])
            utils.write_json(history_path, cache_data)
        else:
            cache_data = None
    else:
        cache_data = utils.read_json(history_path)
    return cache_data
Example #7
0
def get_group_by_id(group_id):
    if not group_id:
        return {}

    filename = "{}.group".format(group_id)
    path = os.path.join(_addon_data, filename)

    try:
        group_def = utils.read_json(path)
    except ValueError:
        utils.log("Unable to parse: {}".format(path))
        return

    return group_def
Example #8
0
def find_defined_widgets(group_id=None):
    addon_files = os.listdir(utils._addon_path)
    widgets = []

    widget_files = [x for x in addon_files if x.endswith(".widget")]
    for widget_file in widget_files:
        widget_def = utils.read_json(os.path.join(utils._addon_path, widget_file))

        if widget_def:
            if not group_id:
                widgets.append(widget_def)
            elif group_id == widget_def["group"]:
                widgets.append(widget_def)

    return widgets
Example #9
0
def find_defined_paths(group_id=None):
    if group_id:
        filename = "{}.group".format(group_id)
        path = os.path.join(utils._addon_path, filename)

        group_def = utils.read_json(path)
        if group_def:
            return group_def.get("paths", [])
    else:
        paths = []
        for group in find_defined_groups():
            group_paths = find_defined_paths(group_id=group.get("id"))
            for path in group_paths:
                paths.append(path)
        return paths
Example #10
0
def find_defined_groups(_type=""):
    groups = []

    for filename in [x for x in os.listdir(utils._addon_path) if x.endswith(".group")]:
        path = os.path.join(utils._addon_path, filename)

        group_def = utils.read_json(path)
        if group_def:
            if _type:
                if group_def["type"] == _type:
                    groups.append(group_def)
            else:
                groups.append(group_def)

    return groups
Example #11
0
def next_cache_queue():
    # Simple queue by creating a .queue file
    # TODO: use watchdog to use less resources
    for path in iter_queue():
        # TODO: sort by path instead so load plugins at the same time
        hash = path2hash(path)
        queue_path = os.path.join(_addon_data, "{}.queue".format(hash))
        if not os.path.exists(queue_path):
            # a widget update has already taken care of updating this path
            continue
        # We will let the update operation remove the item from the queue

        # TODO: need to workout if a blocking write is happen while it was queued or right now.
        # probably need a .lock file to ensure foreground calls can get priority.
        cache_data = read_history(path, create_if_missing=True)
        widget_id = utils.read_json(queue_path).get("widget_id", None)
        yield path, cache_data, widget_id
Example #12
0
def find_defined_paths(group_id=None):
    paths = []
    if group_id:
        filename = '{}.group'.format(group_id)
        path = os.path.join(_addon_path, filename)

        try:
            group_def = utils.read_json(path)
        except ValueError:
            utils.log('Unable to parse: {}'.format(path))

        if group_def:
            return group_def.get('paths', [])
    else:
        for group in find_defined_groups():
            paths.append(find_defined_paths(group_id=group.get('id')))

    return paths
Example #13
0
def find_defined_groups(_type=''):
    groups = []

    for filename in [
            x for x in os.listdir(_addon_path) if x.endswith('.group')
    ]:
        path = os.path.join(_addon_path, filename)

        try:
            group_def = utils.read_json(path)
        except ValueError:
            utils.log('Unable to parse: {}'.format(path))

        if group_def:
            if _type:
                if group_def['type'] == _type:
                    groups.append(group_def)
            else:
                groups.append(group_def)

    return groups
Example #14
0
def iter_queue():
    queued = filter(os.path.isfile, glob.glob(os.path.join(_addon_data, "*.queue")))
    # TODO: sort by path instead so load plugins at the same time
    for path in sorted(queued, key=os.path.getmtime):
        queue_data = utils.read_json(path)
        yield queue_data.get("path", "")
Example #15
0
def cache_expiry(path, widget_id, add=None, background=True):
    # Predict how long to cache for with a min of 5min so updates don't go in a loop
    # TODO: find better way to prevents loops so that users trying to manually refresh can do so
    # TODO: manage the cache files to remove any too old or no longer used
    # TODO: update paths on autowidget refresh based on predicted update frequency. e.g. plugins with random paths should
    # update when autowidget updates.
    hash = path2hash(path)
    cache_path = os.path.join(_addon_data, "{}.cache".format(hash))

    # Read file every time as we might be called from multiple processes
    history_path = os.path.join(_addon_data, "{}.history".format(hash))
    cache_data = utils.read_json(history_path) if os.path.exists(history_path) else None
    if cache_data is None:
        cache_data = {}
        since_read = 0
    else:
        since_read = time.time() - last_read(hash)

    history = cache_data.setdefault("history", [])
    widgets = cache_data.setdefault("widgets", [])
    if widget_id not in widgets:
        widgets.append(widget_id)

    expiry = time.time() - 20
    contents = None
    changed = True
    size = 0

    if add is not None:
        cache_json = json.dumps(add)
        if not add or not cache_json.strip():
            result = "Invalid Write"

        elif "error" in add or not add.get("result", {}).get("files"):
            # In this case we don't want to cache a bad result
            result = "Error"
            # TODO: do we schedule a new update? or put dummy content up even if we have
            # good cached content?
        else:
            utils.write_json(cache_path, add)
            contents = add
            size = len(cache_json)
            content_hash = path2hash(cache_json)
            changed = history[-1][1] != content_hash if history else True
            history.append((time.time(), content_hash))
            if cache_data.get("path") != path:
                cache_data["path"] = path
            utils.write_json(history_path, cache_data)
            # expiry = history[-1][0] + DEFAULT_CACHE_TIME
            pred_dur = predict_update_frequency(history)
            expiry = (
                history[-1][0] + pred_dur * 0.75
            )  # less than prediction to ensure pred keeps up to date
            result = "Wrote"
    else:
        # write any updated widget_ids so we know what to update when we dequeue
        # Also important as wwe use last modified of .history as accessed time
        utils.write_json(history_path, cache_data)
        if not os.path.exists(cache_path):
            result = "Empty"
            if background:
                contents = utils.make_holding_path(utils.get_string(30145), "refresh")
                push_cache_queue(path)
        else:
            contents = utils.read_json(cache_path, log_file=True)
            if contents is None:
                result = "Invalid Read"
                if background:
                    contents = utils.make_holding_path(
                        utils.get_string(30139).format(hash), "alert"
                    )
                    push_cache_queue(path)
            else:
                # write any updated widget_ids so we know what to update when we dequeue
                # Also important as wwe use last modified of .history as accessed time
                utils.write_json(history_path, cache_data)
                size = len(json.dumps(contents))
                if history:
                    expiry = history[-1][0] + predict_update_frequency(history)

                #                queue_len = len(list(iter_queue()))
                if expiry > time.time():
                    result = "Read"
                elif not background:
                    result = "Skip already updated"
                # elif queue_len > 3:
                #     # Try to give system more breathing space by returning empty cache but ensuring refresh
                #     # better way is to just do this the first X accessed after startup.
                #     # or how many accessed in the last 30s?
                #     push_cache_queue(hash)
                #     result = "Skip (queue={})".format(queue_len)
                #     contents = dict(result=dict(files=[]))
                else:
                    push_cache_queue(path)
                    result = "Read and queue"
    # TODO: some metric that tells us how long to the first and last widgets becomes visible and then get updated
    # not how to measure the time delay when when the cache is read until it appears on screen?
    # Is the first cache read always the top visibible widget?
    utils.log(
        "{} cache {}B (exp:{}, last:{}): {} {}".format(
            result,
            size,
            utils.ft(expiry - time.time()),
            utils.ft(since_read),
            hash[:5],
            widgets,
        ),
        "notice",
    )
    return expiry, contents, changed