Ejemplo n.º 1
0
    def get_articles(self,
                     after_date=None,
                     most_recent_first=False,
                     easiest_first=False):
        from zeeguu_core.model import Article

        if hasattr(Language, "cached_articles") and (self.cached_articles.get(
                self.id, None)):
            zeeguu_core.logp(
                f"found {len(Language.cached_articles[self.id])} cached articles for {self.name}"
            )
            all_ids = Language.cached_articles[self.id]
            return Article.query.filter(Article.id.in_(all_ids)).all()

        if not hasattr(Language, "cached_articles"):
            Language.cached_articles = {}

        zeeguu_core.logp("computing and caching the articles for language: " +
                         self.name)
        Language.cached_articles[self.id] = [
            each.id for each in self._get_articles(
                after_date, most_recent_first, easiest_first)
        ]

        all_ids = Language.cached_articles[self.id]
        return Article.query.filter(Article.id.in_(all_ids)).all()
Ejemplo n.º 2
0
    def all_articles(self, limit=2000):

        from zeeguu_core.model import Article

        if hasattr(Topic, 'cached_articles') and (self.cached_articles.get(self.id, None)):
            zeeguu_core.logp (f"Topic: getting the cached articles for topic: {self.title}")
            all_ids = Topic.cached_articles[self.id]
            return Article.query.filter(Article.id.in_(all_ids)).all()

        if not hasattr(Topic, 'cached_articles'):
            Topic.cached_articles = {}

        zeeguu_core.logp("computing and caching the articles for topic: " + self.title)
        Topic.cached_articles[self.id] = [each.id for each in
                                          Article.query.order_by(Article.published_time.desc()).filter(Article.topics.any(id=self.id)).limit(limit)]

        all_ids = Topic.cached_articles[self.id]
        return Article.query.filter(Article.id.in_(all_ids)).all()
def recompute_for_users():
    """

        recomputes only those caches that are already in the table
        and belong to a user. if multiple users have the same preferences
        the computation is done only for the first because this is how
        _recompute_recommender_cache_if_needed does.

        To think about:
        - what happens when this script is triggered simultaneously
        with triggering _recompute_recommender_cache_if_needed from
        the UI? will there end up be duplicated recommendations?
        should we add a uninque constraint on (hash x article)?

        Note:

        in theory, the recomputing should be doable independent of users
        in practice, the _recompute_recommender_cache takes the user as input.
        for that function to become independent of the user we need to be
        able to recover the ids of the languages, topics, searchers, etc. from the
        content_hash
        to do this their ids would need to be comma separated

        OTOH, in the future we might still want to have a per-user cache
        because the recommendations might be different for each user
        since every user has different language levels!!!

    :param existing_hashes:
    :return:
    """
    already_done = []
    for user_id in User.all_recent_user_ids():
        try:
            user = User.find_by_id(user_id)
            reading_pref_hash = _reading_preferences_hash(user)
            if reading_pref_hash not in already_done:
                _recompute_recommender_cache_if_needed(user, session)
                zeeguu_core.logp(f"Success for {reading_pref_hash} and {user}")
                already_done.append(reading_pref_hash)
            else:
                zeeguu_core.logp(
                    f"nno need to do for {user}. hash {reading_pref_hash} already done"
                )
        except Exception as e:
            zeeguu_core.logp(f"Failed for user {user}")