def updateTest(prior: Tuple[float, float, float], result: bool, tnow: float): """Alternate to ebisu.updateRecall that returns several posterior Beta models""" (alpha, beta, t) = prior delta = tnow / t if result: gb1 = (1.0 / delta, 1.0, alpha + delta, beta, tnow) mom = gb1Moments(*gb1[:-1]) else: mom = np.array(failureMoments(prior, result, tnow, num=2)) def f(bd): b, d = bd this = np.array(gb1Moments(1 / d, 1., alpha, b, num=2)) return this - mom from scipy.optimize import least_squares res = least_squares(f, [beta, delta], bounds=((1.01, 0), (np.inf, np.inf))) # print("optimization cost", res) newBeta, newDelta = res.x gb1 = (1 / newDelta, 1, alpha, newBeta, tnow) moved2 = moveBeta(gb1ToBeta(gb1)) return dict( simple=gb1ToBeta(gb1), moved=gb1ToBeta(moveGb1(gb1, prior[2])), moved2=moved2, moved3=moveBeta2(gb1ToBeta(gb1)), moveBetasOnly=moveBeta(ebisu.updateRecall(prior, result, tnow)), postToBeta=posteriorMomToBeta(mom, moved2, tnow), )
def right(self): r = self.card_list[self.index] if not r.learning or not r.last: r.last = today r.learning = True r.alpha, r.beta, r.t = spaced_rep.a_right, spaced_rep.b_right, spaced_rep.t_right ebisu.updateRecall(prior=(r.alpha, r.beta, r.t), successes=1, total=1, tnow=(today - r.last).days + .1) self.card_list[self.index] = r self.index += 1 if self.index < len(self.card_list): self.frames[Back].update(self.card_list[self.index]) self.frames[Front].update(self.card_list[self.index]) self.show_frame(Front) else: self.show_frame(StartPage)
def update_item(hid, result): time_passed = 0.1 word = db.fetchone("SELECT model, last_date FROM spaced_repetition WHERE hid=%s", (hid,)) if word[1] is not None: lastTest = datetime.strptime(word[1], "%Y-%m-%dT%H:%M:%S.%f") time_passed = (datetime.now() - lastTest) / oneHour model = tuple(json.loads(word[0])) recall = ebisu.predictRecall(model, time_passed, exact=True) print(str(hid), str(recall)) new_model = ebisu.updateRecall(model, result, time_passed) print(hid, result) print(model) print(new_model) db.update_sr_item(hid, json.dumps(new_model), datetime.now().isoformat()) return True
def wrong(self): w = self.card_list.pop(self.index) if w.learning: w.alpha, w.beta, w.t = ebisu.updateRecall( prior=(w.alpha, w.beta, w.t), successes=0, total=1, tnow=(w.last - today).days + .1) if not w.learning: w.learning = True w.alpha, w.beta, w.t = spaced_rep.a_wrong, spaced_rep.b_wrong, spaced_rep.t_wrong w.last = today self.card_list.insert((self.index + len(self.card_list)) // 2, w) self.frames[Back].update(self.card_list[self.index]) self.frames[Front].update(self.card_list[self.index]) self.show_frame(Front)
def process_result(self, user_id: int, results: bool) -> None: """ Saves a review with the test results (for eventual statistics) """ if not isinstance(results, bool): raise ValueError( "Invalid test result for Ebisu: {}".format(results)) user = User.objects.get(id=user_id) # Compute the prior alpha, beta, t = None, None, None try: if self.deck.reviewing_card.last_review: previous_model, time_from_last_review = self._get_card_model( self.deck.reviewing_card) alpha, beta, t = ebisu.updateRecall(prior=previous_model, successes=int(results), total=1, tnow=time_from_last_review) except AssertionError as e: logging.error( "Assertion Error on card " + str(self.deck.reviewing_card) + ": ", e) finally: if (alpha, beta, t) == (None, None, None): # Set defaults if this is the first review alpha = self.deck.initial_alpha beta = self.deck.initial_beta t = self.deck.initial_t # Save review new_review = Review( alpha=alpha, beta=beta, t=t, user=user, test_results=results, review_time=datetime.utcnow(), ) self.deck.reviewing_card.update(push__reviews=new_review) self.deck.reviewing_card.save()
import ebisu t_init = 1 a_init = 3.0 b_init = 3.0 default_model = ebisu.defaultModel(t=t_init, alpha=a_init, beta=b_init) (a_wrong, b_wrong, t_wrong) = ebisu.updateRecall(default_model, 0, 1, 0.1) (a_right, b_right, t_right) = ebisu.updateRecall(default_model, 1, 1, 0.1)
def simulation(model, result, tnow): gold = ebisu.updateRecall(model, result, tnow) newModel = updateTest(model, result, tnow) print(newModel) t = np.linspace(.1, 100 * 1, 50 * 1) def trace(model): return np.vectorize(lambda t: ebisu.predictRecall(model, t))(t) def yerr(model): return np.vstack([ np.vectorize(lambda t: ebisu.alternate.predictRecallMedian( model, t, 0.25))(t), np.vectorize(lambda t: ebisu.alternate.predictRecallMedian( model, t, 0.75))(t), ]) def both(model): y = trace(model) return dict(y=y, yerr=np.abs(yerr(model) - y)) plt.ion() plt.figure() plt.semilogy(t, trace(model), linewidth=6, label='prior') # plt.semilogy(t, trace(gold), linewidth=5, label='orig') plt.semilogy(t, trace(newModel['simple']), '--', linewidth=4, label='via GB1') # plt.semilogy(t, trace(newModel['moved']), linewidth=3, label='via GB1@HL') plt.semilogy(t, trace(newModel['moved2']), '--', linewidth=2, label='Beta@HL') plt.semilogy(t, trace(newModel['postToBeta']), linewidth=1, label='post2beta') # plt.semilogy(t, trace(newModel['moveBetasOnly']), '-', linewidth=2, label='orig@HL') plt.legend() plt.title('Model=({},{},{}), Quiz={} @ Tnow={}'.format( *model, result, tnow)) result = False import scipy.stats as stats alpha, beta, tau = model N = 10 * 1000 * 1000 priorTau = stats.beta.rvs(alpha, beta, size=N) tQuiz = 3. priorTQuiz = priorTau**(tQuiz / tau) posteriorWeights = (priorTQuiz)**result * ( (1 - priorTQuiz)**(1 - result)) posteriorModel = updateGb1(model, result, tQuiz) posteriorNewTau = (priorTQuiz)**(posteriorModel[2] / tQuiz) weightedMean = np.sum( posteriorWeights * posteriorNewTau) / np.sum(posteriorWeights) weightedVar = np.sum( posteriorWeights * (posteriorNewTau - weightedMean)**2) / np.sum(posteriorWeights) print('expected', summarizeBeta(posteriorModel[0], posteriorModel[1])) print('actual', [weightedMean, weightedVar]) def summarizeBeta(a, b): return dict(mean=a / (a + b), var=a * b / (a + b)**2 / (a + b + 1))