def _update_abilities(self, history, use_mean=True, num_steps=200, ignore_analytics=False): """Take the history and update the ability estimate.""" # TODO(jace) - check to see if history has actually changed # to avoid needless re-estimation # If ignore_analytics is true, only learn from non-analytics cards # This is to evaluate the quality of various models for predicting # the analytics card. if history and ignore_analytics: history = [ h for h in history if h['metadata'] and not h['metadata'].get('analytics')] ex = lambda h: engine.ItemResponse(h).exercise exercises = np.asarray([ex(h) for h in history]) exercises_ind = mirt_util.get_exercises_ind( exercises, self.exercise_ind_dict) is_correct = lambda h: engine.ItemResponse(h).correct correct = np.asarray([is_correct(h) for h in history]).astype(int) time_taken = lambda h: engine.ItemResponse(h).time_taken time_taken = np.asarray([time_taken(h) for h in history]).astype(float) # deal with out of range or bad values for the response time time_taken[~np.isfinite(time_taken)] = 1. time_taken[time_taken < 1.] = 1. time_taken[time_taken > self.max_time_taken] = self.max_time_taken log_time_taken = np.log(time_taken) sample_abilities, _, mean_abilities, stdev = ( mirt_util.sample_abilities_diffusion( self.theta, exercises_ind, correct, log_time_taken, self.abilities, num_steps=num_steps)) self.abilities = mean_abilities # if use_mean else sample_abilities self.abilities_stdev = stdev
def _update_abilities(self, history, use_mean=True, num_steps=200, ignore_analytics=False): """Take the history and update the ability estimate.""" # TODO(jace) - check to see if history has actually changed # to avoid needless re-estimation # If ignore_analytics is true, only learn from non-analytics cards # This is to evaluate the quality of various models for predicting # the analytics card. if history and ignore_analytics: history = [ h for h in history if h['metadata'] and not h['metadata'].get('analytics') ] ex = lambda h: engine.ItemResponse(h).exercise exercises = np.asarray([ex(h) for h in history]) exercises_ind = mirt_util.get_exercises_ind(exercises, self.exercise_ind_dict) is_correct = lambda h: engine.ItemResponse(h).correct correct = np.asarray([is_correct(h) for h in history]).astype(int) time_taken = lambda h: engine.ItemResponse(h).time_taken time_taken = np.asarray([time_taken(h) for h in history]).astype(float) # deal with out of range or bad values for the response time time_taken[~np.isfinite(time_taken)] = 1. time_taken[time_taken < 1.] = 1. time_taken[time_taken > self.max_time_taken] = self.max_time_taken log_time_taken = np.log(time_taken) sample_abilities, _, mean_abilities, stdev = ( mirt_util.sample_abilities_diffusion(self.theta, exercises_ind, correct, log_time_taken, self.abilities, num_steps=num_steps)) self.abilities = mean_abilities # if use_mean else sample_abilities self.abilities_stdev = stdev
def _update_abilities(self, history, use_mean=True, num_steps=200, ignore_analytics=False): """Take the history and update the ability estimate.""" # TODO(jace) - check to see if history has actually changed # to avoid needless re-estimation # If ignore_analytics is true, only learn from non-analytics cards # This is to evaluate the quality of various models for predicting # the analytics card. state = mirt_util.UserState() if history and ignore_analytics: history = [ h for h in history if h['metadata'] and not h['metadata'].get('analytics') ] ex = lambda h: engine.ItemResponse(h).exercise exercises = np.asarray([ex(h) for h in history]) state.exercise_ind = mirt_util.get_exercise_ind( exercises, self.exercise_ind_dict) is_correct = lambda h: engine.ItemResponse(h).correct state.correct = np.asarray([is_correct(h) for h in history]).astype(int) time_taken = lambda h: engine.ItemResponse(h).time_taken time_taken = np.asarray([time_taken(h) for h in history]).astype(float) state.log_time_taken = mirt_util.get_normalized_time(time_taken) state.abilities = self.abilities sample_abilities, _, mean_abilities, stdev = ( mirt_util.sample_abilities_diffusion(self.theta, state, num_steps=num_steps)) self.abilities = mean_abilities # if use_mean else sample_abilities self.abilities_stdev = stdev
def _update_abilities(self, history, use_mean=True, num_steps=200, ignore_analytics=False): """Take the history and update the ability estimate.""" # TODO(jace) - check to see if history has actually changed # to avoid needless re-estimation # If ignore_analytics is true, only learn from non-analytics cards # This is to evaluate the quality of various models for predicting # the analytics card. state = mirt_util.UserState() if history and ignore_analytics: history = [ h for h in history if h['metadata'] and not h['metadata'].get('analytics')] ex = lambda h: engine.ItemResponse(h).exercise exercises = np.asarray([ex(h) for h in history]) state.exercise_ind = mirt_util.get_exercise_ind( exercises, self.exercise_ind_dict) is_correct = lambda h: engine.ItemResponse(h).correct state.correct = np.asarray( [is_correct(h) for h in history]).astype(int) time_taken = lambda h: engine.ItemResponse(h).time_taken time_taken = np.asarray([time_taken(h) for h in history]).astype(float) state.log_time_taken = mirt_util.get_normalized_time(time_taken) state.abilities = self.abilities sample_abilities, _, mean_abilities, stdev = ( mirt_util.sample_abilities_diffusion( self.theta, state, num_steps=num_steps)) self.abilities = mean_abilities # if use_mean else sample_abilities self.abilities_stdev = stdev