def emit_features(user_states, theta, options, split_desc): """Emit a CSV data file of correctness, prediction, and abilities.""" f = open("%s_split=%s.csv" % (options.output, split_desc), 'w+') for user_state in user_states: # initialize abilities = np.zeros((options.num_abilities, 1)) correct = user_state['correct'] log_time_taken = user_state['log_time_taken'] exercise_ind = user_state['exercise_ind'] # NOTE: I currently do not output features for the first problem for i in xrange(1, correct.size): # TODO(jace) this should probably be the marginal estimation _, _, abilities, _ = mirt_util.sample_abilities_diffusion( theta, exercise_ind[:i], correct[:i], log_time_taken[:i], abilities_init=abilities, num_steps=200) prediction = mirt_util.conditional_probability_correct( abilities, theta, exercise_ind[i:(i + 1)]) f.write("%d, " % correct[i]) f.write("%.4f, " % prediction[-1]) f.write(",".join(["%.4f" % a for a in abilities])) f.write('\n') f.close()
def emit_features(user_states, theta, options, split_desc): """Emit a CSV data file of correctness, prediction, and abilities.""" f = open("%s_split=%s.csv" % (options.output, split_desc), 'w+') for user_state in user_states: # initialize abilities = np.zeros((options.num_abilities, 1)) correct = user_state['correct'] log_time_taken = user_state['log_time_taken'] exercises_ind = user_state['exercises_ind'] # NOTE: I currently do not output features for the first problem for i in xrange(1, correct.size): # TODO(jace) this should probably be the marginal estimation _, _, abilities, _ = mirt_util.sample_abilities_diffusion( theta, exercises_ind[:i], correct[:i], log_time_taken[:i], abilities_init=abilities, num_steps=200) prediction = mirt_util.conditional_probability_correct( abilities, theta, exercises_ind[i:(i + 1)]) f.write("%d, " % correct[i]) f.write("%.4f, " % prediction[-1]) f.write(",".join(["%.4f" % a for a in abilities])) f.write('\n') f.close()
def eval_conditional_probability(x, parameters, exercise_ind): """Evaluate the conditional probability of answering each question accurately for a student with ability x """ return mirt_util.conditional_probability_correct( np.ones((parameters.num_abilities, 1)) * x, parameters, exercise_ind)
def attempt(self, item): """Return whether the student responds correctly to the item. This is probabilistic - we get the probability that the student will respond correctly, and then generate a random number to see if they succeed. """ p_correct = mirt_util.conditional_probability_correct( self.student.abilities, item, 0) if random.random() <= p_correct: return True else: return False
def attempt(self, item): """Return whether the student responds correctly to the item. This is probabilistic - we get the probability that the student will respond correctly, and then generate a random number to see if they succeed. """ p_correct = mirt_util.conditional_probability_correct( self.student.abilities, item, 0) if random.random() <= p_correct: return True else: return False
def estimated_exercise_accuracy(self, history, exercise_name, update_abilities=True, ignore_analytics=False): """Returns the expected probability of getting a future question correct on the specified exercise. """ if update_abilities: self._update_abilities(history, ignore_analytics=ignore_analytics) try: exercise_ind = mirt_util.get_exercise_ind( exercise_name, self.exercise_ind_dict) except KeyError: # If we don't have this exercise, predict the mean predicted # accuracy over all exercises we do have. return self.score(history) return mirt_util.conditional_probability_correct( self.abilities, self.theta, exercise_ind)[0]