Example #1
0
    def _update_abilities(self, history, use_mean=True, num_steps=200,
                          ignore_analytics=False):
        """Take the history and update the ability estimate."""
        # TODO(jace) - check to see if history has actually changed
        # to avoid needless re-estimation
        # If ignore_analytics is true, only learn from non-analytics cards
        # This is to evaluate the quality of various models for predicting
        # the analytics card.
        if history and ignore_analytics:
            history = [
                h for h in history if h['metadata'] and
                not h['metadata'].get('analytics')]
        ex = lambda h: engine.ItemResponse(h).exercise
        exercises = np.asarray([ex(h) for h in history])
        exercises_ind = mirt_util.get_exercises_ind(
            exercises, self.exercise_ind_dict)

        is_correct = lambda h: engine.ItemResponse(h).correct
        correct = np.asarray([is_correct(h) for h in history]).astype(int)

        time_taken = lambda h: engine.ItemResponse(h).time_taken
        time_taken = np.asarray([time_taken(h) for h in history]).astype(float)
        # deal with out of range or bad values for the response time
        time_taken[~np.isfinite(time_taken)] = 1.
        time_taken[time_taken < 1.] = 1.
        time_taken[time_taken > self.max_time_taken] = self.max_time_taken
        log_time_taken = np.log(time_taken)

        sample_abilities, _, mean_abilities, stdev = (
            mirt_util.sample_abilities_diffusion(
                self.theta, exercises_ind, correct, log_time_taken,
                self.abilities, num_steps=num_steps))

        self.abilities = mean_abilities  # if use_mean else sample_abilities
        self.abilities_stdev = stdev
Example #2
0
 def estimated_exercise_accuracy(self, history, exercise_name,
         update_abilities=True, ignore_analytics=False):
     """Returns the expected probability of getting a future question
     correct on the specified exercise.
     """
     if update_abilities:
         self._update_abilities(history, ignore_analytics=ignore_analytics)
     try:
         exercise_ind = mirt_util.get_exercises_ind(
             exercise_name, self.exercise_ind_dict)
     except KeyError:
         # If we don't have this exercise, predict the mean predicted
         # accuracy over all exercises we do have.
         return self.score(history)
     return mirt_util.conditional_probability_correct(
         self.abilities, self.theta, exercise_ind)[0]
Example #3
0
 def estimated_exercise_accuracy(self,
                                 history,
                                 exercise_name,
                                 update_abilities=True,
                                 ignore_analytics=False):
     """Returns the expected probability of getting a future question
     correct on the specified exercise.
     """
     if update_abilities:
         self._update_abilities(history, ignore_analytics=ignore_analytics)
     try:
         exercise_ind = mirt_util.get_exercises_ind(exercise_name,
                                                    self.exercise_ind_dict)
     except KeyError:
         # If we don't have this exercise, predict the mean predicted
         # accuracy over all exercises we do have.
         return self.score(history)
     return mirt_util.conditional_probability_correct(
         self.abilities, self.theta, exercise_ind)[0]
Example #4
0
    def _update_abilities(self,
                          history,
                          use_mean=True,
                          num_steps=200,
                          ignore_analytics=False):
        """Take the history and update the ability estimate."""
        # TODO(jace) - check to see if history has actually changed
        # to avoid needless re-estimation
        # If ignore_analytics is true, only learn from non-analytics cards
        # This is to evaluate the quality of various models for predicting
        # the analytics card.
        if history and ignore_analytics:
            history = [
                h for h in history
                if h['metadata'] and not h['metadata'].get('analytics')
            ]
        ex = lambda h: engine.ItemResponse(h).exercise
        exercises = np.asarray([ex(h) for h in history])
        exercises_ind = mirt_util.get_exercises_ind(exercises,
                                                    self.exercise_ind_dict)

        is_correct = lambda h: engine.ItemResponse(h).correct
        correct = np.asarray([is_correct(h) for h in history]).astype(int)

        time_taken = lambda h: engine.ItemResponse(h).time_taken
        time_taken = np.asarray([time_taken(h) for h in history]).astype(float)
        # deal with out of range or bad values for the response time
        time_taken[~np.isfinite(time_taken)] = 1.
        time_taken[time_taken < 1.] = 1.
        time_taken[time_taken > self.max_time_taken] = self.max_time_taken
        log_time_taken = np.log(time_taken)

        sample_abilities, _, mean_abilities, stdev = (
            mirt_util.sample_abilities_diffusion(self.theta,
                                                 exercises_ind,
                                                 correct,
                                                 log_time_taken,
                                                 self.abilities,
                                                 num_steps=num_steps))

        self.abilities = mean_abilities  # if use_mean else sample_abilities
        self.abilities_stdev = stdev