Example #1
0
def compute(response, user_skill, difficulty, time, count_res):
    response = float(response)
    user_skill = float(user_skill)
    difficulty = float(difficulty)
    time = float(time)
    count_res = float(count_res)

    expected_response = elo.compute_expected_response(user_skill, difficulty, time)
    user_skill_delta = elo.compute_user_skill_delta(response, expected_response)
    question_difficulty_delta = elo.compute_difficulty_delta(response, expected_response, count_res)

    print("respose:", response, "expected response:", expected_response, "skill delta:", user_skill_delta, "difficulty delta:", question_difficulty_delta)
def question_priority(question: Question, user: User, choosen_types_counts: {}, answered_counts: {},
                      last_answer_dates: {}, skill_value: float, user_speed: float) -> float:
    """
    Returns a question priority for the specified parameters
    :param question: question to compute priority for
    :param user: user to compute priority for
    :param choosen_types_counts: types already chosen in the generated task run
    :param answered_counts: answered counts of specific questions
    :param last_answer_dates: last answered dates of specific questions
    :param skill_value: skill of the user aplicable for the question
    :param user_speed: speed of the user aplicable for the question
    :return: priority of the question
    """

    # a hack to keep order of questions consistent when using testing task
    if question.tasks[0].identifier == "test":
        return question.id

    RESPONSE_GOAL = 0.75

    ANSWERED_COUNT_WEIGHT = 10
    SAME_TYPE_PENALTY_WEIGHT = 10
    TIME_WEIGHT = 120
    PROBABILITY_WEIGHT = 10

    # score for total answered count for the user
    answered_count_score = 1. / sqrt(1 + answered_counts.get(question.id, 0))
    # score for number of selected questions of the same type
    same_type_penalty_score = 1. / sqrt(1 + choosen_types_counts.get(question.type, 0))

    # score for time from last answer of the question
    if question.id in last_answer_dates:
        time_score = -1. / (last_answer_dates[question.id]) if last_answer_dates[question.id] > 0 else -1
    else:
        time_score = 0

    # score for probability of correct answer
    expected_time = compute_expected_response_time(user_speed, question.target_time)
    expected_response = compute_expected_response(skill_value, question.difficulty)
    if RESPONSE_GOAL > expected_response:
        probability_score = expected_response / RESPONSE_GOAL
    else:
        probability_score = (1 - expected_response) / (1 - RESPONSE_GOAL)

    return answered_count_score * ANSWERED_COUNT_WEIGHT + same_type_penalty_score * SAME_TYPE_PENALTY_WEIGHT + time_score * TIME_WEIGHT + probability_score * PROBABILITY_WEIGHT