Exemplo n.º 1
0
    def test_authorize_anonymous(self):
        random_uuid = str(uuid.uuid4())
        new_password = self.faker.password()
        anonymous_user = User.create_anonymous(random_uuid, new_password)
        self.db.session.add(anonymous_user)
        self.db.session.commit()

        result = User.authorize_anonymous(random_uuid, new_password)

        assert result is not None and result == anonymous_user
Exemplo n.º 2
0
    def calculate_hash(user, topics, filters, searches, search_filters, user_languages):
        def _join_ids(a_list: list):
            return ",".join([str(l.id) for l in a_list])

        """

         This method is to calculate the hash with all the content filters.
         It simply adds a letter for the type of content and the sorted ids
         of all the content that has been added.
        :return:

        """

        result = "lan: "
        from zeeguu.core.model import User

        for each in user_languages:
            result += f"{each.code} " + str(User.levels_for(user, each))

        return (
            result
            + " top: "
            + _join_ids(topics)
            + " sear: "
            + _join_ids(searches)
            + " filt: "
            + _join_ids(filters)
            + " sear-filt: "
            + _join_ids(search_filters)
        )
Exemplo n.º 3
0
        def _upgrade_to_teacher(email):
            from zeeguu.core.model import User, Teacher

            db = zeeguu.core.db

            u = User.find(email)
            db.session.add(Teacher(u))
            db.session.commit()
def recompute_for_users():
    """

        recomputes only those caches that are already in the table
        and belong to a user. if multiple users have the same preferences
        the computation is done only for the first because this is how
        _recompute_recommender_cache_if_needed does.

        To think about:
        - what happens when this script is triggered simultaneously
        with triggering _recompute_recommender_cache_if_needed from
        the UI? will there end up be duplicated recommendations?
        should we add a uninque constraint on (hash x article)?

        Note:

        in theory, the recomputing should be doable independent of users
        in practice, the _recompute_recommender_cache takes the user as input.
        for that function to become independent of the user we need to be
        able to recover the ids of the languages, topics, searchers, etc. from the
        content_hash
        to do this their ids would need to be comma separated

        OTOH, in the future we might still want to have a per-user cache
        because the recommendations might be different for each user
        since every user has different language levels!!!

    :param existing_hashes:
    :return:
    """
    already_done = []
    for user_id in User.all_recent_user_ids():
        try:
            user = User.find_by_id(user_id)
            reading_pref_hash = _reading_preferences_hash(user)
            if reading_pref_hash not in already_done:
                _recompute_recommender_cache_if_needed(user, session)
                zeeguu.core.logp(f"Success for {reading_pref_hash} and {user}")
                already_done.append(reading_pref_hash)
            else:
                zeeguu.core.logp(
                    f"nno need to do for {user}. hash {reading_pref_hash} already done"
                )
        except Exception as e:
            zeeguu.core.logp(f"Failed for user {user}")
def print_liked_articles():
    user = User.find_by_id(USER_ID)

    for user_article in user.liked_articles():
        article = user_article.article
        if article.language.code != LANG:
            continue

        print(
            f"{article.word_count}, {article.fk_difficulty}, {article.title}")
Exemplo n.º 6
0
def remove_user_from_cohort(user_id):

    check_permission_for_user(user_id)

    u = User.find_by_id(user_id)
    u.cohort_id = None
    db.session.add(u)
    db.session.commit()

    return "OK"
Exemplo n.º 7
0
def fix_bookmark_priorities(USER_ID):
    print(f"fixing for user {USER_ID}")
    user = User.find_by_id(USER_ID)

    all_users_bookmarks = user.all_bookmarks()
    for each in all_users_bookmarks:
        each.update_fit_for_study()
    db.session.commit()

    BookmarkPriorityUpdater.update_bookmark_priority(db, user)
    print(f"... OK for {len(all_users_bookmarks)} bookmarks")
def bookmarks_to_study_for_article(article_id):

    user = User.find_by_id(flask.g.user.id)
    article = Article.query.filter_by(id=article_id).one()

    bookmarks = user.bookmarks_for_article(article_id,
                                           with_context=True,
                                           with_title=True,
                                           good_for_study=True)

    return json_result(dict(bookmarks=bookmarks, article_title=article.title))
Exemplo n.º 9
0
    def _link_teacher_cohort(user_id, cohort_id):
        """
        Takes user_id and cohort_id and links them together in teacher_cohort_map table.
        """
        from zeeguu.core.model import TeacherCohortMap

        user = User.find_by_id(user_id)
        cohort = Cohort.find(cohort_id)
        db.session.add(TeacherCohortMap(user, cohort))
        db.session.commit()
        return "added teacher_cohort relationship"
Exemplo n.º 10
0
def analyze_user(user_id, language, only_liked=ANALYZE_ONLY_LIKED):
    user = User.find_by_id(user_id)
    language_id = Language.find(language).id

    macro_sessions = extract_macro_sessions_from_db(user, language_id)
    macro_sessions = filter_sessions(macro_sessions, only_liked)

    if PRINT_DETAIL:
        for macro_session in macro_sessions:
            macro_session.print_details()
            input("<Enter to continue>")

    summarize_yearly_reading_speed(macro_sessions)
Exemplo n.º 11
0
def add_colleague_to_cohort():

    cohort_id = request.form.get("cohort_id")
    colleague_email = request.form.get("colleague_email")

    check_permission_for_cohort(id)

    colleague = User.find(colleague_email)
    cohort = Cohort.find(cohort_id)
    db.session.add(TeacherCohortMap(colleague, cohort))
    db.session.commit()

    return "OK"
Exemplo n.º 12
0
def print_sorted_activity():
    all_users = User.find_all()

    user_activity = {}
    for user in all_users:
        print(f"({user.id}) Analyzing {user.name}... ")
        events = UserActivityData.find(user)
        user_activity[user] = events

    sorted_activity = sorted(user_activity.items(),
                             key=lambda item: len(item[1]))
    for user, activity in sorted_activity:
        print(f"{user.id}, {user.name}, {len(activity)}")
Exemplo n.º 13
0
def users_correlations():
    users_df = pd.DataFrame(columns=[
        "id", "reading_lang", "native_lang", "opened", "translated", "spoken",
        "liked", "closed"
    ])
    all_users = User.find_all()
    print(len(all_users))
    #for reading_language in languages_to_analyze:
    #	print("\nLANGUAGE:", reading_language)
    #	language_id = Language.find(reading_language).id
    for user in tqdm(all_users):
        #if user.learned_language_id == language_id:

        df = {
            "id": user.id,
            "reading_lang": str(user.learned_language),
            "native_lang": str(user.native_language),
            "opened": 0,
            "translated": 0,
            "spoken": 0,
            "liked": 0,
            "closed": 0
        }
        users_df = users_df.append(df, ignore_index=True)

        # todo: check all possible events
        events = UserActivityData.find(user)
        for event in events:
            article_id = event.article_id
            if article_id:
                if event.event == "UMR - OPEN ARTICLE":
                    users_df.loc[users_df.id == user.id, 'opened'] += 1
                if event.event == "UMR - TRANSLATE TEXT":
                    users_df.loc[users_df.id == user.id, 'translated'] += 1
                if event.event == "UMR - SPEAK TEXT":
                    users_df.loc[users_df.id == user.id, 'spoken'] += 1
                if event.event == "UMR - LIKE ARTICLE":
                    users_df.loc[users_df.id == user.id, 'liked'] += 1
                if event.event == "UMR - ARTICLE CLOSED":
                    users_df.loc[users_df.id == user.id, 'closed'] += 1

    # keep only users that opened at least 1 article
    users_df.drop(users_df[users_df.opened < 1].index, inplace=True)

    print("Users:", len(users_df))

    print(users_df['native_lang'].value_counts())
    print("---")
    print(print(users_df['reading_lang'].value_counts()))
Exemplo n.º 14
0
    def setUp(self, mock_invite_code):
        # idea from here:
        # https: // docs.pytest.org / en / latest / example / simple.html  # detect-if-running-from-within-a-pytest-run
        # allows the api translate_and_Bookmark to know that it's being called from the unit test
        # and use the reverse translator instead of the real translators

        app.testing = True
        self.app = app.test_client()
        zeeguu.core.db.create_all()

        response = self.app.post(f"/add_user/{TEST_EMAIL}",
                                 data=test_user_data)

        self.session = str(int(response.data))
        self.user = User.find(TEST_EMAIL)
Exemplo n.º 15
0
    def test_create_anonymous(self):
        self.user = UserRule().user
        new_password = self.faker.password()
        self.user.update_password(new_password)

        user_to_check = User.create_anonymous(
            str(self.user.id),
            new_password,
            self.user.learned_language.code,
            self.user.native_language.code,
        )

        assert user_to_check.email == str(
            self.user.id) + User.ANONYMOUS_EMAIL_DOMAIN
        assert user_to_check.name == str(self.user.id)
        assert user_to_check.learned_language == self.user.learned_language
        assert user_to_check.native_language == self.user.native_language
def bookmarks_for_article(article_id, user_id):
    """
    Returns the bookmarks of this user organized by date. Based on the
    POST arguments, it can return also the context of the bookmark as
    well as it can return only the bookmarks after a given date.

    :param (POST) with_context: If this parameter is "true", the endpoint
    also returns the text where the bookmark was found.

    :param (POST) after_date: the date after which to start retrieving
     the bookmarks. if no date is specified, all the bookmarks are returned.
     The date format is: %Y-%m-%dT%H:%M:%S. E.g. 2001-01-01T01:55:00

    """

    user = User.find_by_id(user_id)
    article = Article.query.filter_by(id=article_id).one()

    bookmarks = user.bookmarks_for_article(article_id,
                                           with_context=True,
                                           with_title=True)

    return json_result(dict(bookmarks=bookmarks, article_title=article.title))
Exemplo n.º 17
0
 def test_validate_name(self):
     random_name = self.faker.name()
     assert User.validate_name("", random_name)
Exemplo n.º 18
0
from zeeguu.core.model import User

user = User.find_by_id(534)
print(user.name)
Exemplo n.º 19
0
 def test_exists(self):
     assert User.exists(self.user)
Exemplo n.º 20
0
 def test_validate_password(self):
     random_password = self.faker.password()
     assert User.validate_password("", random_password)
Exemplo n.º 21
0
    def test_authorize(self):
        new_password = self.faker.password()
        self.user.update_password(new_password)
        result = User.authorize(self.user.email, new_password)

        assert result is not None and result == self.user
Exemplo n.º 22
0
def articles_correlations():
    articles_df = pd.DataFrame(columns=[
        "id", "lang", "difficulty", "word_count", "title_length", "opened",
        "translated", "spoken", "liked", "closed"
    ])
    all_users = User.find_all()
    print(len(all_users))
    for reading_language in languages_to_analyze:
        print("\nLANGUAGE:", reading_language)
        language_id = Language.find(reading_language).id
        for user in tqdm(all_users):
            if user.learned_language_id == language_id:
                events = UserActivityData.find(user)
                for event in events:
                    article_id = event.article_id
                    if article_id:
                        article_data = Article.find_by_id(article_id)
                        if article_data.language_id == language_id:
                            if not (articles_df['id'] == article_id).any():
                                title_len = len(article_data.title.split())
                                df = {
                                    "id": article_id,
                                    "lang": article_data.language_id,
                                    "difficulty": article_data.fk_difficulty,
                                    "word_count": article_data.word_count,
                                    "title_length": title_len,
                                    "opened": 0,
                                    "translated": 0,
                                    "spoken": 0,
                                    "liked": 0,
                                    "closed": 0
                                }
                                articles_df = articles_df.append(
                                    df, ignore_index=True)
                            if event.event == "UMR - OPEN ARTICLE":
                                articles_df.loc[articles_df.id == article_id,
                                                'opened'] += 1
                            if event.event == "UMR - TRANSLATE TEXT":
                                articles_df.loc[articles_df.id == article_id,
                                                'translated'] += 1
                            if event.event == "UMR - SPEAK TEXT":
                                articles_df.loc[articles_df.id == article_id,
                                                'spoken'] += 1
                            if event.event == "UMR - LIKE ARTICLE":
                                articles_df.loc[articles_df.id == article_id,
                                                'liked'] += 1
                            if event.event == "UMR - ARTICLE CLOSED":
                                articles_df.loc[articles_df.id == article_id,
                                                'closed'] += 1

        print("Articles:", len(articles_df))

        correlation_variables = [
            "word_count", "difficulty", "liked", "translated", "spoken",
            "opened", "closed", "title_length"
        ]
        # word count & fk_difficulty
        spearman_corr = stats.spearmanr(articles_df[correlation_variables[0]],
                                        articles_df[correlation_variables[1]])
        print(correlation_variables[0], correlation_variables[1],
              spearman_corr[0], spearman_corr[1])
        # liked & fk_difficulty
        spearman_corr = stats.spearmanr(articles_df[correlation_variables[2]],
                                        articles_df[correlation_variables[1]])
        print(correlation_variables[2], correlation_variables[1],
              spearman_corr[0], spearman_corr[1])
        # number of translations & difficulty
        spearman_corr = stats.spearmanr(articles_df[correlation_variables[3]],
                                        articles_df[correlation_variables[1]])
        print(correlation_variables[3], correlation_variables[1],
              spearman_corr[0], spearman_corr[1])
        # number of spoken words & difficulty
        spearman_corr = stats.spearmanr(articles_df[correlation_variables[4]],
                                        articles_df[correlation_variables[1]])
        print(correlation_variables[4], correlation_variables[1],
              spearman_corr[0], spearman_corr[1])
        # number of times article is opened & difficulty
        spearman_corr = stats.spearmanr(articles_df[correlation_variables[5]],
                                        articles_df[correlation_variables[1]])
        print(correlation_variables[5], correlation_variables[1],
              spearman_corr[0], spearman_corr[1])
        # number of times article is closed & difficulty
        spearman_corr = stats.spearmanr(articles_df[correlation_variables[6]],
                                        articles_df[correlation_variables[1]])
        print(correlation_variables[6], correlation_variables[1],
              spearman_corr[0], spearman_corr[1])
        # title length & fk_difficulty
        spearman_corr = stats.spearmanr(articles_df[correlation_variables[7]],
                                        articles_df[correlation_variables[1]])
        print(correlation_variables[7], correlation_variables[1],
              spearman_corr[0], spearman_corr[1])
        # title length & number of times article is opened
        spearman_corr = stats.spearmanr(articles_df[correlation_variables[5]],
                                        articles_df[correlation_variables[7]])
        print(correlation_variables[5], correlation_variables[7],
              spearman_corr[0], spearman_corr[1])
def past_exercises_for(user_id):
    user = User.find_by_id(USER_ID)

    q = (db.session.query(Exercise).join(bookmark_exercise_mapping).join(
        Bookmark).join(User).filter(User.id == USER_ID).order_by(
            Exercise.time))

    for ex in q.all():
        bookmark = ex.get_bookmark()
        past = ""

        sorted_log = sorted(
            bookmark.exercise_log,
            key=lambda x: datetime.datetime.strftime(x.time, "%Y-%m-%d"),
            reverse=True,
        )

        corrects_in_a_row = 0
        for each in sorted_log:
            if each.time < ex.time:
                if each.outcome.outcome == "Correct":
                    corrects_in_a_row += 1
                else:
                    corrects_in_a_row = 0

                past += f"{each.time.day}/{each.time.month} {each.outcome.outcome} < "

        if ex.outcome.outcome == "Correct":
            corrects_in_a_row += 1
        else:
            corrects_in_a_row = 0

        if corrects_in_a_row:
            print(
                f"{ex.time.day}/{ex.time.month} {bookmark.origin.word}({bookmark.id}) {ex.outcome.outcome}:{corrects_in_a_row} < ({past})"
            )
        else:
            print(
                f"{ex.time.day}/{ex.time.month} {bookmark.origin.word}({bookmark.id}) {ex.outcome.outcome} < ({past})"
            )

        if bookmark.learned and ex.time == bookmark.learned_time:
            print("Learned!")
            print(" ")

    print("All Bookmarks")
    for bookmark in user.all_bookmarks():
        btime = datetime.datetime.strftime(bookmark.time, "%Y-%m-%d")
        print(f"{btime} " +
              ("[fit_for_study] " if bookmark.fit_for_study else "") +
              ("[Learned] " if bookmark.learned else "") + f"{bookmark.id} " +
              f"{bookmark.origin.word} / {bookmark.translation.word}")

    print("")
    print("Bookmarks to Study")
    for bookmark in user.bookmarks_to_study():
        btime = datetime.datetime.strftime(bookmark.time, "%Y-%m-%d")
        print(f"{btime} " +
              ("[Quality] " if bookmark.quality_bookmark() else "") +
              ("[fit_for_study] " if bookmark.fit_for_study else "") +
              ("[Learned] " if bookmark.learned else "") + f"{bookmark.id} " +
              f"{bookmark.origin.word} / {bookmark.translation.word}")
Exemplo n.º 24
0
#!/usr/bin/env python
"""

   Script that lists recent users

   To be called from a cron job.

"""
from sortedcontainers import SortedList
from zeeguu.core.model import User, Bookmark

from wordstats import Word

user = User.find_by_id(1890)
language = 'nl'

months_dict = dict()

for bookmark in Bookmark.query.filter_by(user=user):

    if not bookmark.origin.language.code == language:
        continue

    # if not bookmark.quality_bookmark():
    #     continue

    if len(bookmark.origin.word) < 4:
        continue

    date_key = bookmark.time.strftime("%y-%m")
#!/usr/bin/env python

"""

   Script that lists recent users

   To be called from a cron job.

"""

from zeeguu.core.model import User

for user_id in User.all_recent_user_ids():
    user = User.find_by_id(user_id)
    print (user.name)
    print (user.email)


            
Exemplo n.º 26
0
def user_article_history(user_id):
    user = User.find_by_id(user_id)

    sessions = UserReadingSession.find_by_user(user.id)

    dates = {}
    for each in sessions[:-650:-1]:
        if each.article and each.duration > 1000:
            if not dates.get(each.human_readable_date()):
                dates[each.human_readable_date()] = []

            # user_article = UserArticle.find(user, each.article)
            events_in_this_session = each.events_in_this_session()

            has_like = False
            feedback = ""
            difficulty = ""
            for event in events_in_this_session:
                if event.is_like():
                    has_like = True
                if event.is_feedback():
                    feedback = event.value
                    difficulty = "fk: " + str(each.article.fk_difficulty)

            dates[each.human_readable_date()].append({
                "date":
                each.human_readable_date(),
                "duration":
                each.human_readable_duration(),
                "start":
                each.start_time.strftime("%H:%M:%S"),
                "article":
                each.article.title,
                "liked":
                has_like,
                "feedback":
                feedback,
                "difficulty":
                difficulty,
            })

    text_result = f"<title>{user.name}</title>"
    text_result += f"<h1>{user.name} ({user.id})</h1><br/>"
    previous_title = ""
    for date in dates:
        text_result += date + "<br/>"
        for session in dates[date]:
            if previous_title != session["article"]:
                text_result += f"<br/>&nbsp;&nbsp;<b> {session['article']} </b><br/>"
            text_result += (
                f"&nbsp;&nbsp;&nbsp;&nbsp; {session['duration']} ({session['start']})"
            )
            if session["liked"]:
                text_result += " (LIKED) "
            text_result += session["difficulty"] + " " + session[
                "feedback"] + " <br/>"
            previous_title = session["article"]

        text_result += "<br/><br/>"

    return text_result
Exemplo n.º 27
0
from zeeguu.core.model import User, UserActivityData, Bookmark, UserArticle, UserReadingSession, UserExerciseSession
from sys import argv

if len(argv) < 3:
    print("CALL: consolidate_accounts <primary_id> <secondary_id>")
    exit(-1)

PRIMARY_ID = argv[1]
SECONDARY_ID = argv[2]

tables_to_modify = [
    Bookmark, UserActivityData, UserArticle, UserReadingSession,
    UserExerciseSession
]

primary_user = User.find_by_id(PRIMARY_ID)
secondary_user = User.find_by_id(SECONDARY_ID)

for each_table in tables_to_modify:

    primary_user_items = each_table.query.filter_by(
        user_id=primary_user.id).all()
    secondary_user_items = each_table.query.filter_by(
        user_id=secondary_user.id).all()

    print(each_table.__tablename__)
    print(f"= Primary User Before:{len(primary_user_items)}")
    print(f"= Secondary User Before:{len(secondary_user_items)}")

    for each in secondary_user_items:
        each.user = primary_user
Exemplo n.º 28
0
def create_account(
    db_session,
    username,
    password,
    invite_code,
    email,
    learned_language_code,
    native_language_code,
    learned_cefr_level,
):
    cohort_name = ""
    if password is None or len(password) < 4:
        raise Exception("Password should be at least 4 characters long")

    if not valid_invite_code(invite_code):
        raise Exception("Invitation code is not recognized. Please contact us.")

    cohort = Cohort.query.filter_by(inv_code=invite_code).first()
    if cohort:
        if cohort.cohort_still_has_capacity():
            cohort_name = cohort.name
        else:
            raise Exception(
                "No more places in this class. Please contact us ([email protected])."
            )

    try:

        learned_language = Language.find_or_create(learned_language_code)
        native_language = Language.find_or_create(native_language_code)

        new_user = User(
            email,
            username,
            password,
            invitation_code=invite_code,
            cohort=cohort,
            learned_language=learned_language,
            native_language=native_language,
        )

        db_session.add(new_user)

        learned_language = UserLanguage.find_or_create(
            db_session, new_user, learned_language
        )
        learned_language.cefr_level = int(learned_cefr_level)
        # TODO: although these are required... they should simply
        # be functions of CEFR level so at some further point should
        # removed
        learned_language.declared_level_min = 0
        learned_language.declared_level_max = 11

        db_session.add(learned_language)

        if cohort:
            if cohort.is_cohort_of_teachers:
                teacher = Teacher(new_user)
                db_session.add(teacher)

        db_session.commit()

        send_new_user_account_email(username, invite_code, cohort_name)

        return new_user

    except sqlalchemy.exc.IntegrityError:
        raise Exception("There is already an account for this email.")
    except Exception as e:
        print(e)
        raise Exception("Could not create the account")
Exemplo n.º 29
0
from zeeguu.core.model import User

import sys


def forget_user(user):
    old_email = user.email
    old_name = user.name

    fake = Faker()
    user.name = "Forgotten Learner"
    user.email = fake.email()

    session = zeeguu.core.db.session
    session.add(user)
    session.commit()

    print(
        f"Before: {old_name} / {old_email} \nAfter: {user.name} / {user.email}"
    )


if __name__ == "__main__":

    if len(sys.argv) < 2:
        print(f"Usage: {sys.argv[0]} <email>")
        exit(-1)

    user = User.find(sys.argv[1])
    forget_user(user)
Exemplo n.º 30
0
 def test_validate_email(self):
     random_email = self.faker.email()
     assert User.validate_email("", random_email)