Exemplo n.º 1
0
def create_user_goal():
    user_data = models.UserData.current()
    if not user_data:
        return api_invalid_param_response("User is not logged in.")

    user_override = request.request_user_data("email")
    if user_data.developer and user_override and user_override.key_email != user_data.key_email:
        user_data = user_override

    json = request.json
    title = json.get('title')
    if not title:
        return api_invalid_param_response('Title is invalid.')

    objective_descriptors = []

    goal_exercises = GoalList.exercises_in_current_goals(user_data)
    goal_videos = GoalList.videos_in_current_goals(user_data)

    if json:
        for obj in json['objectives']:
            if obj['type'] == 'GoalObjectiveAnyExerciseProficiency':
                objective_descriptors.append(obj)

            if obj['type'] == 'GoalObjectiveAnyVideo':
                objective_descriptors.append(obj)

            if obj['type'] == 'GoalObjectiveExerciseProficiency':
                obj['exercise'] = models.Exercise.get_by_name(obj['internal_id'])
                if not obj['exercise'] or not obj['exercise'].is_visible_to_current_user():
                    return api_invalid_param_response("Internal error: Could not find exercise.")
                if user_data.is_proficient_at(obj['exercise'].name):
                    return api_invalid_param_response("Exercise has already been completed.")
                if obj['exercise'].name in goal_exercises:
                    return api_invalid_param_response("Exercise is already an objective in a current goal.")
                objective_descriptors.append(obj)

            if obj['type'] == 'GoalObjectiveWatchVideo':
                obj['video'] = models.Video.get_for_readable_id(obj['internal_id'])
                if not obj['video']:
                    return api_invalid_param_response("Internal error: Could not find video.")
                user_video = models.UserVideo.get_for_video_and_user_data(obj['video'], user_data)
                if user_video and user_video.completed:
                    return api_invalid_param_response("Video has already been watched.")
                if obj['video'].readable_id in goal_videos:
                    return api_invalid_param_response("Video is already an objective in a current goal.")
                objective_descriptors.append(obj)

    if objective_descriptors:
        objectives = GoalObjective.from_descriptors(objective_descriptors,
            user_data)

        goal = Goal(parent=user_data, title=title, objectives=objectives)
        user_data.save_goal(goal)

        return goal.get_visible_data(None)
    else:
        return api_invalid_param_response("No objectives specified.")
Exemplo n.º 2
0
def delete_user_goals():
    user_data = models.UserData.current()
    if not user_data.developer:
        return api_unauthorized_response("UNAUTHORIZED")

    user_override = request.request_user_data("email")
    if user_override and user_override.key_email != user_data.key_email:
        user_data = user_override

    GoalList.delete_all_goals(user_data)

    return "Goals deleted"
Exemplo n.º 3
0
def exercise_graph_dict_json(user_data, user_exercise_graph, admin=False):
    graph_dicts = user_exercise_graph.graph_dicts()
    if admin:
        suggested_graph_dicts = []
        proficient_graph_dicts = []
        recent_graph_dicts = []
        review_graph_dicts = []
    else:
        suggested_graph_dicts = user_exercise_graph.suggested_graph_dicts()
        proficient_graph_dicts = user_exercise_graph.proficient_graph_dicts()
        recent_graph_dicts = user_exercise_graph.recent_graph_dicts()
        review_graph_dicts = user_exercise_graph.review_graph_dicts()

    for graph_dict in suggested_graph_dicts:
        graph_dict["status"] = "Suggested"

    for graph_dict in proficient_graph_dicts:
        graph_dict["status"] = "Proficient"

    for graph_dict in recent_graph_dicts:
        graph_dict["recent"] = True

    for graph_dict in review_graph_dicts:
        graph_dict["status"] = "Review"

        try:
            suggested_graph_dicts.remove(graph_dict)
        except ValueError:
            pass

    goal_exercises = GoalList.exercises_in_current_goals(user_data)

    graph_dict_data = []
    for graph_dict in graph_dicts:
        row = {
            'name': graph_dict["name"],
            'points': graph_dict.get("points", ''),
            'display_name': graph_dict["display_name"],
            'status': graph_dict.get("status"),
            'recent': graph_dict.get("recent", False),
            'progress': graph_dict["progress"],
            'progress_display': exercise_models.UserExercise.to_progress_display(graph_dict["progress"]),
            'longest_streak': graph_dict["longest_streak"],
            'h_position': graph_dict["h_position"],
            'v_position': graph_dict["v_position"],
            'goal_req': (graph_dict["name"] in goal_exercises),
            'states': user_exercise_graph.states(graph_dict["name"]),

            # get_by_name returns only exercises visible to current user
            'prereqs': [prereq["name"] for prereq in graph_dict["prerequisites"] if exercise_models.Exercise.get_by_name(prereq["name"])],
        }

        if admin:
            exercise = exercise_models.Exercise.get_by_name(graph_dict["name"])
            row["live"] = exercise and exercise.live
        graph_dict_data.append(row)

    return json.dumps(graph_dict_data)
Exemplo n.º 4
0
Arquivo: main.py Projeto: di445/server
    def get(self, url_id=""):
        url = Url.get_by_id(int(url_id))
        if url is None:
            raise MissingUrlException("Missing url '%s'" % url_id)

        user_data = UserData.current()
        if user_data:
            goals_updated = GoalList.update_goals(user_data, lambda goal: goal.just_visited_url(user_data, url))

        self.redirect(url.url.encode("utf8"))
Exemplo n.º 5
0
def get_user_goals():
    student = models.UserData.current() or models.UserData.pre_phantom()
    user_override = request.request_user_data("email")
    if user_override and user_override.key_email != student.key_email:
        if not user_override.is_visible_to(student):
            return api_unauthorized_response("Cannot view this profile")
        else:
            # Allow access to this student's profile
            student = user_override

    goals = GoalList.get_all_goals(student)
    return [g.get_visible_data() for g in goals]
Exemplo n.º 6
0
def recent_activity_for(user_data, dt_start, dt_end):
    query_user_badges = models_badges.UserBadge.get_for_user_data_between_dts(
            user_data, dt_start, dt_end)
    query_problem_logs = models.ProblemLog.get_for_user_data_between_dts(
            user_data, dt_start, dt_end)
    query_video_logs = models.VideoLog.get_for_user_data_between_dts(
            user_data, dt_start, dt_end)
    query_goals = GoalList.get_updated_between_dts(user_data, dt_start, dt_end)

    results = util.async_queries([query_user_badges, query_problem_logs,
                                  query_video_logs, query_goals], limit=200)

    list_recent_activity_types = [
        recent_badge_activity(results[0].get_result()),
        recent_exercise_activity(results[1].get_result()),
        recent_video_activity(results[2].get_result(), user_data),
        recent_goal_activity(results[3].get_result()),
    ]
    list_recent_activity = [activity
                            for sublist in list_recent_activity_types
                            for activity in sublist]

    return _collapse_recent_activity(list_recent_activity)
Exemplo n.º 7
0
def recent_activity_for(user_data, dt_start, dt_end):
    query_user_badges = models_badges.UserBadge.get_for_user_data_between_dts(
            user_data, dt_start, dt_end)
    query_problem_logs = exercise_models.ProblemLog.get_for_user_data_between_dts(
            user_data, dt_start, dt_end)
    query_video_logs = video_models.VideoLog.get_for_user_data_between_dts(
            user_data, dt_start, dt_end)
    query_goals = GoalList.get_updated_between_dts(user_data, dt_start, dt_end)

    results = util.async_queries([query_user_badges, query_problem_logs,
                                  query_video_logs, query_goals], limit=200)

    list_recent_activity_types = [
        recent_badge_activity(results[0].get_result()),
        recent_exercise_activity(results[1].get_result()),
        recent_video_activity(results[2].get_result(), user_data),
        recent_goal_activity(results[3].get_result()),
    ]
    list_recent_activity = [activity
                            for sublist in list_recent_activity_types
                            for activity in sublist]

    return _collapse_recent_activity(list_recent_activity)
Exemplo n.º 8
0
def attempt_problem(user_data, user_exercise, problem_number, attempt_number,
    attempt_content, sha1, seed, completed, count_hints, time_taken,
    review_mode, exercise_non_summative, problem_type, ip_address):

    if user_exercise and user_exercise.belongs_to(user_data):
        dt_now = datetime.datetime.now()
        exercise = user_exercise.exercise_model

        old_graph = user_exercise.get_user_exercise_graph()

        user_exercise.last_done = dt_now
        user_exercise.seconds_per_fast_problem = exercise.seconds_per_fast_problem
        user_exercise.summative = exercise.summative

        user_data.record_activity(user_exercise.last_done)

        # If a non-admin tries to answer a problem out-of-order, just ignore it
        if problem_number != user_exercise.total_done + 1 and not user_util.is_current_user_developer():
            # Only admins can answer problems out of order.
            raise QuietException("Problem number out of order (%s vs %s) for user_id: %s submitting attempt content: %s with seed: %s" % (problem_number, user_exercise.total_done + 1, user_data.user_id, attempt_content, seed))

        if len(sha1) <= 0:
            raise Exception("Missing sha1 hash of problem content.")

        if len(seed) <= 0:
            raise Exception("Missing seed for problem content.")

        if len(attempt_content) > 500:
            raise Exception("Attempt content exceeded maximum length.")

        # Build up problem log for deferred put
        problem_log = models.ProblemLog(
                key_name=models.ProblemLog.key_for(user_data, user_exercise.exercise, problem_number),
                user=user_data.user,
                exercise=user_exercise.exercise,
                problem_number=problem_number,
                time_taken=time_taken,
                time_done=dt_now,
                count_hints=count_hints,
                hint_used=count_hints > 0,
                correct=completed and not count_hints and (attempt_number == 1),
                sha1=sha1,
                seed=seed,
                problem_type=problem_type,
                count_attempts=attempt_number,
                attempts=[attempt_content],
                ip_address=ip_address,
                review_mode=review_mode,
        )

        if exercise.summative:
            problem_log.exercise_non_summative = exercise_non_summative

        first_response = (attempt_number == 1 and count_hints == 0) or (count_hints == 1 and attempt_number == 0)

        if user_exercise.total_done > 0 and user_exercise.streak == 0 and first_response:
            bingo('hints_keep_going_after_wrong')

        just_earned_proficiency = False

        # Users can only attempt problems for themselves, so the experiment
        # bucket always corresponds to the one for this current user
        struggling_model = StrugglingExperiment.get_alternative_for_user(
                 user_data, current_user=True) or StrugglingExperiment.DEFAULT
        if completed:

            user_exercise.total_done += 1

            if problem_log.correct:

                proficient = user_data.is_proficient_at(user_exercise.exercise)
                explicitly_proficient = user_data.is_explicitly_proficient_at(user_exercise.exercise)
                suggested = user_data.is_suggested(user_exercise.exercise)
                problem_log.suggested = suggested

                problem_log.points_earned = points.ExercisePointCalculator(user_exercise, suggested, proficient)
                user_data.add_points(problem_log.points_earned)

                # Streak only increments if problem was solved correctly (on first attempt)
                user_exercise.total_correct += 1
                user_exercise.streak += 1
                user_exercise.longest_streak = max(user_exercise.longest_streak, user_exercise.streak)

                user_exercise.update_proficiency_model(correct=True)

                bingo([
                    'struggling_problems_correct',
                    'suggested_activity_problems_correct',
                ])

                if user_exercise.progress >= 1.0 and not explicitly_proficient:
                    bingo([
                        'hints_gained_proficiency_all',
                        'struggling_gained_proficiency_all',
                        'suggested_activity_gained_proficiency_all',
                    ])
                    if not user_exercise.has_been_proficient():
                        bingo('hints_gained_new_proficiency')

                    if user_exercise.history_indicates_struggling(struggling_model):
                        bingo('struggling_gained_proficiency_post_struggling')

                    user_exercise.set_proficient(user_data)
                    user_data.reassess_if_necessary()

                    just_earned_proficiency = True
                    problem_log.earned_proficiency = True

            util_badges.update_with_user_exercise(
                user_data,
                user_exercise,
                include_other_badges=True,
                action_cache=last_action_cache.LastActionCache.get_cache_and_push_problem_log(user_data, problem_log))

            # Update phantom user notifications
            util_notify.update(user_data, user_exercise)

            bingo([
                'hints_problems_done',
                'struggling_problems_done',
                'suggested_activity_problems_done',
            ])

        else:
            # Only count wrong answer at most once per problem
            if first_response:
                user_exercise.update_proficiency_model(correct=False)
                bingo([
                    'hints_wrong_problems',
                    'struggling_problems_wrong',
                    'suggested_activity_problems_wrong',
                ])

            if user_exercise.is_struggling(struggling_model):
                bingo('struggling_struggled_binary')

        # If this is the first attempt, update review schedule appropriately
        if attempt_number == 1:
            user_exercise.schedule_review(completed)

        user_exercise_graph = models.UserExerciseGraph.get_and_update(user_data, user_exercise)

        goals_updated = GoalList.update_goals(user_data,
            lambda goal: goal.just_did_exercise(user_data, user_exercise,
                just_earned_proficiency))

        user_data.uservideocss_version += 1
        if user_exercise.progress >= 1.0:
            UserVideoCss.set_completed(user_data.key(), exercise.key(), user_data.uservideocss_version)
        else:
            UserVideoCss.set_started(user_data.key(), exercise.key(), user_data.uservideocss_version)

        # Bulk put
        db.put([user_data, user_exercise, user_exercise_graph.cache])

        # Defer the put of ProblemLog for now, as we think it might be causing hot tablets
        # and want to shift it off to an automatically-retrying task queue.
        # http://ikaisays.com/2011/01/25/app-engine-datastore-tip-monotonically-increasing-values-are-bad/
        deferred.defer(models.commit_problem_log, problem_log,
                       _queue="problem-log-queue")

        if user_data is not None and user_data.coaches:
            # Making a separate queue for the log summaries so we can clearly see how much they are getting used
            deferred.defer(models.commit_log_summary_coaches, problem_log, user_data.coaches,
                       _queue="log-summary-queue",
                       )

        return user_exercise, user_exercise_graph, goals_updated
Exemplo n.º 9
0
def attempt_problem(user_data, user_exercise, problem_number, attempt_number,
    attempt_content, sha1, seed, completed, count_hints, time_taken,
    review_mode, topic_mode, problem_type, ip_address, card, stack_uid,
    topic_id, cards_done, cards_left, async_problem_log_put=True,
    async_stack_log_put=True):

    if user_exercise and user_exercise.belongs_to(user_data):
        dt_now = datetime.datetime.now()
        exercise = user_exercise.exercise_model

        user_exercise.last_done = dt_now
        user_exercise.seconds_per_fast_problem = exercise.seconds_per_fast_problem

        user_data.record_activity(user_exercise.last_done)

        # If somebody tries to answer a problem out-of-order, we need to raise
        # an exception.
        if problem_number != user_exercise.total_done + 1:

            # If we hit this error, make absolutely sure the UserExerciseCache
            # is up-to-date with this exercise's latest UserExercise state.
            # If they are out of sync, the user may repeatedly hit this crash.
            user_exercise_cache = exercise_models.UserExerciseCache.get(user_data)
            user_exercise_cache.update(user_exercise)
            user_exercise_cache.put()

            # If the client thinks it is ahead of the server, then we have an
            # issue worth paying attention to.
            error_class = Exception

            # If the client is behind the server, keep the error log quiet
            # because this is so easily caused by having two exercise tabs open
            # and letting one tab's client fall behind the server's state.
            if problem_number < user_exercise.total_done + 1:
                error_class = custom_exceptions.QuietException

            # If the client isn't aware that the user has recently logged out,
            # keep the error log quiet because this is so easily caused by
            # logging out of one tab and continuing work in another.
            if user_exercise.total_done == 0 and user_data.is_phantom:
                error_class = custom_exceptions.QuietException

            msg = "Problem out of order (%s, %s) for uid:%s, content:%s, seed:%s"

            raise error_class(msg % (problem_number,
                user_exercise.total_done + 1, user_data.user_id,
                attempt_content, seed))

        if len(sha1) <= 0:
            raise Exception("Missing sha1 hash of problem content.")

        if len(seed) <= 0:
            raise Exception("Missing seed for problem content.")

        if len(attempt_content) > 500:
            raise Exception("Attempt content exceeded maximum length.")

        # Build up problem log for deferred put
        problem_log = exercise_models.ProblemLog(
                key_name=exercise_models.ProblemLog.key_for(user_data, user_exercise.exercise, problem_number),
                user=user_data.user,
                user_id=user_data.user_id,
                exercise=user_exercise.exercise,
                problem_number=problem_number,
                time_taken=time_taken,
                time_done=dt_now,
                count_hints=count_hints,
                hint_used=count_hints > 0,
                correct=completed and not count_hints and (attempt_number == 1),
                sha1=sha1,
                seed=seed,
                problem_type=problem_type,
                count_attempts=attempt_number,
                attempts=[attempt_content],
                ip_address=ip_address,
                review_mode=review_mode,
                topic_mode=topic_mode,
        )

        first_response = (attempt_number == 1 and count_hints == 0) or (count_hints == 1 and attempt_number == 0)


        just_earned_proficiency = False

        # Users can only attempt problems for themselves, so the experiment
        # bucket always corresponds to the one for this current user
        struggling_model = StrugglingExperiment.get_alternative_for_user(
                 user_data, current_user=True) or StrugglingExperiment.DEFAULT
        if completed:

            user_exercise.total_done += 1

            if problem_log.correct:

                proficient = user_data.is_proficient_at(user_exercise.exercise)
                explicitly_proficient = user_data.is_explicitly_proficient_at(user_exercise.exercise)
                suggested = user_data.is_suggested(user_exercise.exercise)
                problem_log.suggested = suggested

                problem_log.points_earned = points.ExercisePointCalculator(user_exercise, topic_mode, suggested, proficient)
                user_data.add_points(problem_log.points_earned)

                # Streak only increments if problem was solved correctly (on first attempt)
                user_exercise.total_correct += 1
                user_exercise.streak += 1
                user_exercise.longest_streak = max(user_exercise.longest_streak, user_exercise.streak)

                user_exercise.update_proficiency_model(correct=True)

                gae_bingo.gae_bingo.bingo([
                    'struggling_problems_correct',
                    'problem_correct_binary', # Core metric
                    'problem_correct_count', # Core metric
                ])

                if user_exercise.progress >= 1.0 and not explicitly_proficient:
                    gae_bingo.gae_bingo.bingo([
                        'struggling_gained_proficiency_all',
                    ])
                    if not user_exercise.has_been_proficient():
                        gae_bingo.gae_bingo.bingo([
                            'new_proficiency_binary', # Core metric
                            'new_proficiency_count', # Core metric
                            ])

                    if user_exercise.history_indicates_struggling(struggling_model):
                        gae_bingo.gae_bingo.bingo(
                            'struggling_gained_proficiency_post_struggling')

                    user_exercise.set_proficient(user_data)
                    user_data.reassess_if_necessary()

                    just_earned_proficiency = True
                    problem_log.earned_proficiency = True

            badges.util_badges.update_with_user_exercise(
                user_data,
                user_exercise,
                include_other_badges=True,
                action_cache=badges.last_action_cache.LastActionCache.get_cache_and_push_problem_log(user_data, problem_log))

            # Update phantom user notifications
            phantom_users.util_notify.update(user_data, user_exercise)

            gae_bingo.gae_bingo.bingo([
                'struggling_problems_done',
                'problem_attempt_binary', # Core metric
                'problem_attempt_count', # Core metric
            ])

        else:
            # Only count wrong answer at most once per problem
            if first_response:
                user_exercise.update_proficiency_model(correct=False)
                gae_bingo.gae_bingo.bingo([
                    'struggling_problems_wrong',
                    'problem_incorrect_count', # Core metric
                    'problem_incorrect_binary', # Core metric
                ])

            if user_exercise.is_struggling(struggling_model):
                gae_bingo.gae_bingo.bingo('struggling_struggled_binary')

        # If this is the first attempt, update review schedule appropriately
        if attempt_number == 1:
            user_exercise.schedule_review(completed)

        user_exercise_graph = exercise_models.UserExerciseGraph.get_and_update(user_data, user_exercise)

        goals_updated = GoalList.update_goals(user_data,
            lambda goal: goal.just_did_exercise(user_data, user_exercise,
                just_earned_proficiency))

        # Bulk put
        db.put([user_data, user_exercise, user_exercise_graph.cache])

        if async_problem_log_put:
            # Defer the put of ProblemLog for now, as we think it might be causing hot tablets
            # and want to shift it off to an automatically-retrying task queue.
            # http://ikaisays.com/2011/01/25/app-engine-datastore-tip-monotonically-increasing-values-are-bad/
            deferred.defer(exercise_models.commit_problem_log, problem_log,
                           _queue="problem-log-queue",
                           _url="/_ah/queue/deferred_problemlog")
        else:
            exercise_models.commit_problem_log(problem_log)

        if user_data is not None and user_data.coaches:
            # Making a separate queue for the log summaries so we can clearly see how much they are getting used
            deferred.defer(video_models.commit_log_summary_coaches, problem_log, user_data.coaches,
                       _queue="log-summary-queue",
                       _url="/_ah/queue/deferred_log_summary")

        if user_data is not None and completed and stack_uid:
            # Update the stack log iff the user just finished this card.
            # If the request is missing the UID of the stack we're on, then
            # this is an old stack and we just won't log it
            stack_log_source = exercise_models.StackLog(
                    key_name=exercise_models.StackLog.key_for(user_data.user_id, stack_uid),
                    user_id=user_data.user_id,
                    time_last_done=datetime.datetime.now(),
                    review_mode=review_mode,
                    topic_mode=topic_mode,
                    exercise_id=user_exercise.exercise,
                    topic_id=topic_id,
                    cards_list=[],
                    extra_data={},
            )

            if topic_mode:
                stack_log_source.extra_data['topic_mode'] = calc_topic_mode_log_stats(
                        user_exercise_graph, topic_id, just_earned_proficiency)

            if async_stack_log_put:
                deferred.defer(exercise_models.commit_stack_log,
                               stack_log_source, card, cards_done, cards_left,
                               problem_log.__class__.__name__,
                               str(problem_log.key()),
                               _queue="stack-log-queue",
                               _url="/_ah/queue/deferred_stacklog")
            else:
                exercise_models.commit_stack_log(
                    stack_log_source, card, cards_done, cards_left,
                    problem_log.__class__.__name__, str(problem_log.key()))

        return user_exercise, user_exercise_graph, goals_updated
Exemplo n.º 10
0
    user_data_coach = get_user_data_coach_from_request()

    try:
        students = get_students_data_from_request(user_data_coach)
    except Exception, e:
        return api_invalid_param_response(e.message)

    students = sorted(students, key=lambda student: student.nickname)
    user_exercise_graphs = models.UserExerciseGraph.get(students)

    return_data = []
    for student, uex_graph in izip(students, user_exercise_graphs):
        student_data = {}
        student_data['email'] = student.email
        student_data['nickname'] = student.nickname
        goals = GoalList.get_current_goals(student)
        student_data['goals'] = [g.get_visible_data(uex_graph) for g in goals]
        return_data.append(student_data)

    return return_data

@route("/api/v1/user/goals", methods=["POST"])
@oauth_optional()
@api_create_phantom
@jsonp
@jsonify
def create_user_goal():
    user_data = models.UserData.current()
    if not user_data:
        return api_invalid_param_response("User is not logged in.")
Exemplo n.º 11
0
def exercise_graph_dict_json(user_data, admin=False):
    user_exercise_graph = exercise_models.UserExerciseGraph.get(user_data)
    if user_data.reassess_from_graph(user_exercise_graph):
        user_data.put()

    graph_dicts = user_exercise_graph.graph_dicts()
    if admin:
        suggested_graph_dicts = []
        proficient_graph_dicts = []
        recent_graph_dicts = []
        review_graph_dicts = []
    else:
        suggested_graph_dicts = user_exercise_graph.suggested_graph_dicts()
        proficient_graph_dicts = user_exercise_graph.proficient_graph_dicts()
        recent_graph_dicts = user_exercise_graph.recent_graph_dicts()
        review_graph_dicts = user_exercise_graph.review_graph_dicts()

    for graph_dict in suggested_graph_dicts:
        graph_dict["status"] = "Suggested"

    for graph_dict in proficient_graph_dicts:
        graph_dict["status"] = "Proficient"

    for graph_dict in recent_graph_dicts:
        graph_dict["recent"] = True

    for graph_dict in review_graph_dicts:
        graph_dict["status"] = "Review"

        try:
            suggested_graph_dicts.remove(graph_dict)
        except ValueError:
            pass

    goal_exercises = GoalList.exercises_in_current_goals(user_data)

    graph_dict_data = []
    for graph_dict in graph_dicts:
        row = {
            'name':
            graph_dict["name"],
            'points':
            graph_dict.get("points", ''),
            'display_name':
            graph_dict["display_name"],
            'status':
            graph_dict.get("status"),
            'recent':
            graph_dict.get("recent", False),
            'progress':
            graph_dict["progress"],
            'progress_display':
            exercise_models.UserExercise.to_progress_display(
                graph_dict["progress"]),
            'longest_streak':
            graph_dict["longest_streak"],
            'h_position':
            graph_dict["h_position"],
            'v_position':
            graph_dict["v_position"],
            'goal_req': (graph_dict["name"] in goal_exercises),
            'states':
            user_exercise_graph.states(graph_dict["name"]),

            # get_by_name returns only exercises visible to current user
            'prereqs': [
                prereq["name"] for prereq in graph_dict["prerequisites"]
                if exercise_models.Exercise.get_by_name(prereq["name"])
            ],
        }

        if admin:
            exercise = exercise_models.Exercise.get_by_name(graph_dict["name"])
            row["live"] = exercise and exercise.live
        graph_dict_data.append(row)

    return json.dumps(graph_dict_data)
Exemplo n.º 12
0
def attempt_problem(user_data,
                    user_exercise,
                    problem_number,
                    attempt_number,
                    attempt_content,
                    sha1,
                    seed,
                    completed,
                    count_hints,
                    time_taken,
                    review_mode,
                    topic_mode,
                    problem_type,
                    ip_address,
                    async_problem_log_put=True):

    if user_exercise and user_exercise.belongs_to(user_data):
        dt_now = datetime.datetime.now()
        exercise = user_exercise.exercise_model

        old_graph = user_exercise.get_user_exercise_graph()

        user_exercise.last_done = dt_now
        user_exercise.seconds_per_fast_problem = exercise.seconds_per_fast_problem

        user_data.record_activity(user_exercise.last_done)

        # If a non-admin tries to answer a problem out-of-order, just ignore it
        if problem_number != user_exercise.total_done + 1 and not user_util.is_current_user_developer(
        ):
            # Only admins can answer problems out of order.
            raise custom_exceptions.QuietException(
                "Problem number out of order (%s vs %s) for user_id: %s submitting attempt content: %s with seed: %s"
                % (problem_number, user_exercise.total_done + 1,
                   user_data.user_id, attempt_content, seed))

        if len(sha1) <= 0:
            raise Exception("Missing sha1 hash of problem content.")

        if len(seed) <= 0:
            raise Exception("Missing seed for problem content.")

        if len(attempt_content) > 500:
            raise Exception("Attempt content exceeded maximum length.")

        # Build up problem log for deferred put
        problem_log = exercise_models.ProblemLog(
            key_name=exercise_models.ProblemLog.key_for(
                user_data, user_exercise.exercise, problem_number),
            user=user_data.user,
            exercise=user_exercise.exercise,
            problem_number=problem_number,
            time_taken=time_taken,
            time_done=dt_now,
            count_hints=count_hints,
            hint_used=count_hints > 0,
            correct=completed and not count_hints and (attempt_number == 1),
            sha1=sha1,
            seed=seed,
            problem_type=problem_type,
            count_attempts=attempt_number,
            attempts=[attempt_content],
            ip_address=ip_address,
            review_mode=review_mode,
            topic_mode=topic_mode,
        )

        first_response = (attempt_number == 1
                          and count_hints == 0) or (count_hints == 1
                                                    and attempt_number == 0)

        if user_exercise.total_done > 0 and user_exercise.streak == 0 and first_response:
            bingo('hints_keep_going_after_wrong')

        just_earned_proficiency = False

        # Users can only attempt problems for themselves, so the experiment
        # bucket always corresponds to the one for this current user
        struggling_model = StrugglingExperiment.get_alternative_for_user(
            user_data, current_user=True) or StrugglingExperiment.DEFAULT
        if completed:

            user_exercise.total_done += 1

            if problem_log.correct:

                proficient = user_data.is_proficient_at(user_exercise.exercise)
                explicitly_proficient = user_data.is_explicitly_proficient_at(
                    user_exercise.exercise)
                suggested = user_data.is_suggested(user_exercise.exercise)
                problem_log.suggested = suggested

                problem_log.points_earned = points.ExercisePointCalculator(
                    user_exercise, topic_mode, suggested, proficient)
                user_data.add_points(problem_log.points_earned)

                # Streak only increments if problem was solved correctly (on first attempt)
                user_exercise.total_correct += 1
                user_exercise.streak += 1
                user_exercise.longest_streak = max(
                    user_exercise.longest_streak, user_exercise.streak)

                user_exercise.update_proficiency_model(correct=True)

                bingo([
                    'struggling_problems_correct',
                ])

                if user_exercise.progress >= 1.0 and not explicitly_proficient:
                    bingo([
                        'hints_gained_proficiency_all',
                        'struggling_gained_proficiency_all',
                    ])
                    if not user_exercise.has_been_proficient():
                        bingo('hints_gained_new_proficiency')

                    if user_exercise.history_indicates_struggling(
                            struggling_model):
                        bingo('struggling_gained_proficiency_post_struggling')

                    user_exercise.set_proficient(user_data)
                    user_data.reassess_if_necessary()

                    just_earned_proficiency = True
                    problem_log.earned_proficiency = True

            badges.util_badges.update_with_user_exercise(
                user_data,
                user_exercise,
                include_other_badges=True,
                action_cache=badges.last_action_cache.LastActionCache.
                get_cache_and_push_problem_log(user_data, problem_log))

            # Update phantom user notifications
            phantom_users.util_notify.update(user_data, user_exercise)

            bingo([
                'hints_problems_done',
                'struggling_problems_done',
            ])

        else:
            # Only count wrong answer at most once per problem
            if first_response:
                user_exercise.update_proficiency_model(correct=False)
                bingo([
                    'hints_wrong_problems',
                    'struggling_problems_wrong',
                ])

            if user_exercise.is_struggling(struggling_model):
                bingo('struggling_struggled_binary')

        # If this is the first attempt, update review schedule appropriately
        if attempt_number == 1:
            user_exercise.schedule_review(completed)

        user_exercise_graph = exercise_models.UserExerciseGraph.get_and_update(
            user_data, user_exercise)

        goals_updated = GoalList.update_goals(
            user_data, lambda goal: goal.just_did_exercise(
                user_data, user_exercise, just_earned_proficiency))

        # Bulk put
        db.put([user_data, user_exercise, user_exercise_graph.cache])

        if async_problem_log_put:
            # Defer the put of ProblemLog for now, as we think it might be causing hot tablets
            # and want to shift it off to an automatically-retrying task queue.
            # http://ikaisays.com/2011/01/25/app-engine-datastore-tip-monotonically-increasing-values-are-bad/
            deferred.defer(exercise_models.commit_problem_log,
                           problem_log,
                           _queue="problem-log-queue",
                           _url="/_ah/queue/deferred_problemlog")
        else:
            exercise_models.commit_problem_log(problem_log)

        if user_data is not None and user_data.coaches:
            # Making a separate queue for the log summaries so we can clearly see how much they are getting used
            deferred.defer(video_models.commit_log_summary_coaches,
                           problem_log,
                           user_data.coaches,
                           _queue="log-summary-queue",
                           _url="/_ah/queue/deferred_log_summary")

        return user_exercise, user_exercise_graph, goals_updated
Exemplo n.º 13
0
def attempt_problem(user_data,
                    user_exercise,
                    problem_number,
                    attempt_number,
                    attempt_content,
                    sha1,
                    seed,
                    completed,
                    count_hints,
                    time_taken,
                    review_mode,
                    topic_mode,
                    problem_type,
                    ip_address,
                    card,
                    stack_uid,
                    topic_id,
                    cards_done,
                    cards_left,
                    async_problem_log_put=True,
                    async_stack_log_put=True):

    if user_exercise and user_exercise.belongs_to(user_data):
        dt_now = datetime.datetime.now()
        exercise = user_exercise.exercise_model

        user_exercise.last_done = dt_now
        user_exercise.seconds_per_fast_problem = exercise.seconds_per_fast_problem

        user_data.record_activity(user_exercise.last_done)

        # If somebody tries to answer a problem out-of-order, we need to raise
        # an exception.
        if problem_number != user_exercise.total_done + 1:

            # If we hit this error, make absolutely sure the UserExerciseCache
            # is up-to-date with this exercise's latest UserExercise state.
            # If they are out of sync, the user may repeatedly hit this crash.
            user_exercise_cache = exercise_models.UserExerciseCache.get(
                user_data)
            user_exercise_cache.update(user_exercise)
            user_exercise_cache.put()

            # If the client thinks it is ahead of the server, then we have an
            # issue worth paying attention to.
            error_class = Exception

            # If the client is behind the server, keep the error log quiet
            # because this is so easily caused by having two exercise tabs open
            # and letting one tab's client fall behind the server's state.
            if problem_number < user_exercise.total_done + 1:
                error_class = custom_exceptions.QuietException

            # If the client isn't aware that the user has recently logged out,
            # keep the error log quiet because this is so easily caused by
            # logging out of one tab and continuing work in another.
            if user_exercise.total_done == 0 and user_data.is_phantom:
                error_class = custom_exceptions.QuietException

            msg = "Problem out of order (%s, %s) for uid:%s, content:%s, seed:%s"

            raise error_class(msg %
                              (problem_number, user_exercise.total_done + 1,
                               user_data.user_id, attempt_content, seed))

        if len(sha1) <= 0:
            raise Exception("Missing sha1 hash of problem content.")

        if len(seed) <= 0:
            raise Exception("Missing seed for problem content.")

        if len(attempt_content) > 500:
            raise Exception("Attempt content exceeded maximum length.")

        # Build up problem log for deferred put
        problem_log = exercise_models.ProblemLog(
            key_name=exercise_models.ProblemLog.key_for(
                user_data, user_exercise.exercise, problem_number),
            user=user_data.user,
            user_id=user_data.user_id,
            exercise=user_exercise.exercise,
            problem_number=problem_number,
            time_taken=time_taken,
            time_done=dt_now,
            count_hints=count_hints,
            hint_used=count_hints > 0,
            correct=completed and not count_hints and (attempt_number == 1),
            sha1=sha1,
            seed=seed,
            problem_type=problem_type,
            count_attempts=attempt_number,
            attempts=[attempt_content],
            ip_address=ip_address,
            review_mode=review_mode,
            topic_mode=topic_mode,
        )

        first_response = (attempt_number == 1
                          and count_hints == 0) or (count_hints == 1
                                                    and attempt_number == 0)

        just_earned_proficiency = False

        # Users can only attempt problems for themselves, so the experiment
        # bucket always corresponds to the one for this current user
        struggling_model = StrugglingExperiment.get_alternative_for_user(
            user_data, current_user=True) or StrugglingExperiment.DEFAULT
        if completed:

            user_exercise.total_done += 1

            if problem_log.correct:

                proficient = user_data.is_proficient_at(user_exercise.exercise)
                explicitly_proficient = user_data.is_explicitly_proficient_at(
                    user_exercise.exercise)
                suggested = user_data.is_suggested(user_exercise.exercise)
                problem_log.suggested = suggested

                problem_log.points_earned = points.ExercisePointCalculator(
                    user_exercise, topic_mode, suggested, proficient)
                user_data.add_points(problem_log.points_earned)

                # Streak only increments if problem was solved correctly (on first attempt)
                user_exercise.total_correct += 1
                user_exercise.streak += 1
                user_exercise.longest_streak = max(
                    user_exercise.longest_streak, user_exercise.streak)

                user_exercise.update_proficiency_model(correct=True)

                gae_bingo.gae_bingo.bingo([
                    'struggling_problems_correct',
                    'problem_correct_binary',  # Core metric
                    'problem_correct_count',  # Core metric
                ])

                if user_exercise.progress >= 1.0 and not explicitly_proficient:
                    gae_bingo.gae_bingo.bingo([
                        'struggling_gained_proficiency_all',
                    ])
                    if not user_exercise.has_been_proficient():
                        gae_bingo.gae_bingo.bingo([
                            'new_proficiency_binary',  # Core metric
                            'new_proficiency_count',  # Core metric
                        ])

                    if user_exercise.history_indicates_struggling(
                            struggling_model):
                        gae_bingo.gae_bingo.bingo(
                            'struggling_gained_proficiency_post_struggling')

                    user_exercise.set_proficient(user_data)
                    user_data.reassess_if_necessary()

                    just_earned_proficiency = True
                    problem_log.earned_proficiency = True

            badges.util_badges.update_with_user_exercise(
                user_data,
                user_exercise,
                include_other_badges=True,
                action_cache=badges.last_action_cache.LastActionCache.
                get_cache_and_push_problem_log(user_data, problem_log))

            # Update phantom user notifications
            phantom_users.util_notify.update(user_data, user_exercise)

            gae_bingo.gae_bingo.bingo([
                'struggling_problems_done',
                'problem_attempt_binary',  # Core metric
                'problem_attempt_count',  # Core metric
            ])

        else:
            # Only count wrong answer at most once per problem
            if first_response:
                user_exercise.update_proficiency_model(correct=False)
                gae_bingo.gae_bingo.bingo([
                    'struggling_problems_wrong',
                    'problem_incorrect_count',  # Core metric
                    'problem_incorrect_binary',  # Core metric
                ])

            if user_exercise.is_struggling(struggling_model):
                gae_bingo.gae_bingo.bingo('struggling_struggled_binary')

        # If this is the first attempt, update review schedule appropriately
        if attempt_number == 1:
            user_exercise.schedule_review(completed)

        user_exercise_graph = exercise_models.UserExerciseGraph.get_and_update(
            user_data, user_exercise)

        goals_updated = GoalList.update_goals(
            user_data, lambda goal: goal.just_did_exercise(
                user_data, user_exercise, just_earned_proficiency))

        # Bulk put
        db.put([user_data, user_exercise, user_exercise_graph.cache])

        if async_problem_log_put:
            # Defer the put of ProblemLog for now, as we think it might be causing hot tablets
            # and want to shift it off to an automatically-retrying task queue.
            # http://ikaisays.com/2011/01/25/app-engine-datastore-tip-monotonically-increasing-values-are-bad/
            deferred.defer(exercise_models.commit_problem_log,
                           problem_log,
                           _queue="problem-log-queue",
                           _url="/_ah/queue/deferred_problemlog")
        else:
            exercise_models.commit_problem_log(problem_log)

        if user_data is not None and user_data.coaches:
            # Making a separate queue for the log summaries so we can clearly see how much they are getting used
            deferred.defer(video_models.commit_log_summary_coaches,
                           problem_log,
                           user_data.coaches,
                           _queue="log-summary-queue",
                           _url="/_ah/queue/deferred_log_summary")

        if user_data is not None and completed and stack_uid:
            # Update the stack log iff the user just finished this card.
            # If the request is missing the UID of the stack we're on, then
            # this is an old stack and we just won't log it
            stack_log_source = exercise_models.StackLog(
                key_name=exercise_models.StackLog.key_for(
                    user_data.user_id, stack_uid),
                user_id=user_data.user_id,
                time_last_done=datetime.datetime.now(),
                review_mode=review_mode,
                topic_mode=topic_mode,
                exercise_id=user_exercise.exercise,
                topic_id=topic_id,
                cards_list=[],
                extra_data={},
            )

            if topic_mode:
                stack_log_source.extra_data[
                    'topic_mode'] = calc_topic_mode_log_stats(
                        user_exercise_graph, topic_id, just_earned_proficiency)

            if async_stack_log_put:
                deferred.defer(exercise_models.commit_stack_log,
                               stack_log_source,
                               card,
                               cards_done,
                               cards_left,
                               problem_log.__class__.__name__,
                               str(problem_log.key()),
                               _queue="stack-log-queue",
                               _url="/_ah/queue/deferred_stacklog")
            else:
                exercise_models.commit_stack_log(
                    stack_log_source, card, cards_done, cards_left,
                    problem_log.__class__.__name__, str(problem_log.key()))

        return user_exercise, user_exercise_graph, goals_updated