def load(self, path): path = expand_path(path, config().basedir) if self.is_loaded(): unload_database() if not os.path.exists(path): self.load_failed = True raise LoadError try: infile = file(path, 'rb') db = cPickle.load(infile) self.start_date = db[0] self.categories = db[1] self.facts = db[2] self.fact_views = db[3] self.cards = db[4] infile.close() self.load_failed = False except: self.load_failed = True raise InvalidFormatError(stack_trace=True) # Work around a sip bug: don't store card types, but their ids. for f in self.facts: f.card_type = card_type_by_id(f.card_type) # TODO: This was to remove database inconsistencies. Still needed? #for c in self.categories: # self.remove_category_if_unused(c) config()["path"] = contract_path(path, config().basedir) log().loaded_database() for f in component_manager.get_all("function_hook", "after_load"): f.run()
def run(self): basedir = config().basedir join = os.path.join # Find out which files haven't been uploaded yet. dir = os.listdir(unicode(join(basedir, "history"))) history_files = [x for x in dir if x[-4:] == ".bz2"] uploaded = None try: upload_log = file(join(basedir, "history", "uploaded")) uploaded = [x.strip() for x in upload_log] upload_log.close() except: uploaded = [] to_upload = sets.Set(history_files) - sets.Set(uploaded) if len(to_upload) == 0: return # Upload them to our server. upload_log = file(join(basedir, "history", "uploaded"), 'a') try: for f in to_upload: print "Uploading", f, "...", filename = join(basedir, "history", f) self.upload(filename) print >> upload_log, f log().uploaded(filename) print "done!" except: log().uploading_failed() traceback.print_exc() upload_log.close()
def new(self, path): if self.is_loaded(): self.unload() self.load_failed = False self.start_date = StartDate() config()["path"] = path log().new_database() self.save(contract_path(path, config().basedir))
def unload(self): self.save(config()["path"]) log().saved_database() self.start_date = None self.categories = [] self.facts = [] self.fact_views = [] self.cards = [] scheduler().clear_queue() return True
def finalise(): global upload_thread if upload_thread: print "Waiting for uploader thread to stop..." upload_thread.join() print "done!" log().program_stopped() try: os.remove(os.path.join(config().basedir,"MNEMOSYNE_LOCK")) except OSError: print "Failed to remove lock file." print traceback_string()
def delete_fact_and_related_data(self, fact): old_cat = fact.cat for c in self.cards: if c.fact == fact: self.cards.remove(c) try: self.fact_views.remove(c.fact_view) except: pass # Its fact view is a card type fact view one. log().deleted_card(c) self.facts.remove(fact) scheduler().rebuild_queue() for cat in old_cat: self.remove_category_if_unused(cat)
def add_card(self, card): """Add new card and its fact_view.""" self.conn.execute("""insert into reviewstats(id, fact_id, view_id, grade, lapses, easiness, acq_reps, acq_reps_since_lapse, last_rep, next_rep, unseen) values(?,?,?,?,?,?,?,?,?,?,?)""", (card.id, card.fact.uid, card.fact_view.id, card.grade, card.lapses, card.easiness, card.acq_reps, card.acq_reps_since_lapse, card.last_rep, card.next_rep, card.unseen)) # Add view if doesn't exist if not self.conn.execute("select count() from views where id=?", (card.fact_view.id, )).fetchone()[0]: self.add_fact_view(card.fact_view, card) log().new_card(card)
def new(self, path): """ Create new database """ self.path = path if self.is_loaded(): self.unload() self.start_date = StartDate() config()["path"] = path log().new_database() # create tables according to schema self.conn.executescript(SCHEMA) # save start_date self.conn.execute("insert into meta(key, value) values(?,?)", ("start_date", datetime.strftime(self.start_date.start, '%Y-%m-%d %H:%M:%S'))) self.save()
def initialise_logging(): global upload_thread from mnemosyne.libmnemosyne.log_uploader import LogUploader log().archive_old_log() log().start_logging() log().program_started() if config()["upload_logs"]: upload_thread = LogUploader() upload_thread.start()
def process_answer(self, card, new_grade, dry_run=False): db = database() days_since_start = db.days_since_start() # When doing a dry run, make a copy to operate on. Note that this # leaves the original in cards and the reference in the GUI intact. if dry_run: card = copy.copy(card) # Calculate scheduled and actual interval, taking care of corner # case when learning ahead on the same day. scheduled_interval = card.next_rep - card.last_rep actual_interval = days_since_start - card.last_rep if actual_interval == 0: actual_interval = 1 # Otherwise new interval can become zero. if (card.acq_reps == 0) and (card.ret_reps == 0): # The card has not yet been given its initial grade, because it # was imported. card.easiness = db.average_easiness() card.acq_reps = 1 card.acq_reps_since_lapse = 1 new_interval = calculate_initial_interval(new_grade) # Make sure the second copy of a grade 0 card doesn't show # up again. if not dry_run and card.grade == 0 and new_grade in [2,3,4,5]: for i in self.queue: if i.id == card.id: self.queue.remove(i) break elif card.grade in [0,1] and new_grade in [0,1]: # In the acquisition phase and staying there. card.acq_reps += 1 card.acq_reps_since_lapse += 1 new_interval = 0 elif card.grade in [0,1] and new_grade in [2,3,4,5]: # In the acquisition phase and moving to the retention phase. card.acq_reps += 1 card.acq_reps_since_lapse += 1 new_interval = 1 # Make sure the second copy of a grade 0 card doesn't show # up again. if not dry_run and card.grade == 0: for i in self.queue: if i.id == card.id: self.queue.remove(i) break elif card.grade in [2,3,4,5] and new_grade in [0,1]: # In the retention phase and dropping back to the # acquisition phase. card.ret_reps += 1 card.lapses += 1 card.acq_reps_since_lapse = 0 card.ret_reps_since_lapse = 0 new_interval = 0 elif card.grade in [2,3,4,5] and new_grade in [2,3,4,5]: # In the retention phase and staying there. card.ret_reps += 1 card.ret_reps_since_lapse += 1 if actual_interval >= scheduled_interval: if new_grade == 2: card.easiness -= 0.16 if new_grade == 3: card.easiness -= 0.14 if new_grade == 5: card.easiness += 0.10 if card.easiness < 1.3: card.easiness = 1.3 new_interval = 0 if card.ret_reps_since_lapse == 1: new_interval = 6 else: if new_grade == 2 or new_grade == 3: if actual_interval <= scheduled_interval: new_interval = actual_interval * card.easiness else: new_interval = scheduled_interval if new_grade == 4: new_interval = actual_interval * card.easiness if new_grade == 5: if actual_interval < scheduled_interval: new_interval = scheduled_interval # Avoid spacing. else: new_interval = actual_interval * card.easiness # Shouldn't happen, but build in a safeguard. if new_interval == 0: print "Internal error: new interval was zero." new_interval = scheduled_interval new_interval = int(new_interval) # When doing a dry run, stop here and return the scheduled interval. if dry_run: return new_interval # Add some randomness to interval. noise = self.calculate_interval_noise(new_interval) # Update grade and interval. card.grade = new_grade card.last_rep = days_since_start card.next_rep = days_since_start + new_interval + noise card.unseen = False # Don't schedule related cards on the same day. for c in db.cards_from_fact(card.fact): if c != card and c.next_rep == card.next_rep and card.grade >= 2: card.next_rep += 1 noise += 1 db.update_card(card) # Create log entry. log().revision(card, scheduled_interval, actual_interval, new_interval, noise) return new_interval + noise
def add_card(self, card): self.load_failed = False self.cards.append(card) log().new_card(card)