def _migrateModels(self): import anki.models db = self.db times = {} mods = {} for row in db.all( "select id, name from models"): while 1: t = intTime(1000) if t not in times: times[t] = True break m = anki.models.defaultModel.copy() m['id'] = t m['name'] = row[1] m['mod'] = intTime() m['tags'] = [] m['flds'] = self._fieldsForModel(row[0]) m['tmpls'] = self._templatesForModel(row[0], m['flds']) mods[m['id']] = m db.execute("update notes set mid = ? where mid = ?", t, row[0]) # save and clean up db.execute("update col set models = ?", simplejson.dumps(mods)) db.execute("drop table fieldModels") db.execute("drop table cardModels") db.execute("drop table models")
def meta(self, cv=None): # Make sure the media database is open! if self.col.media.db is None: self.col.media.connect() if cv is not None: client, version, platform = cv.split(",") else: client = "ankidesktop" version = "2.0.12" platform = "unknown" version_int = [int(str(x).translate(None, string.ascii_letters)) for x in version.split(".")] # Some insanity added in Anki 2.0.13 if (client == "ankidroid" and version_int[0] >= 2 and version_int[1] >= 3) or ( client == "ankidesktop" and version_int[0] >= 2 and version_int[1] >= 0 and version_int[2] >= 13 ): return { "scm": self.col.scm, "ts": intTime(), "mod": self.col.mod, "usn": self.col._usn, "musn": self.col.media.lastUsn(), "msg": "", "cont": True, } else: return (self.col.mod, self.col.scm, self.col._usn, intTime(), self.col.media.lastUsn())
def meta(self, cv=None): # Make sure the media database is open! if self.col.media.db is None: self.col.media.connect() if cv is not None: client, version, platform = cv.split(',') else: client = 'ankidesktop' version = '2.0.12' platform = 'unknown' version_int = [int(x) for x in version.split('.')] # Some insanity added in Anki 2.0.13 if client == 'ankidesktop' and version_int[0] >= 2 and version_int[1] >= 0 and version_int[2] >= 13: return { 'scm': self.col.scm, 'ts': intTime(), 'mod': self.col.mod, 'usn': self.col._usn, 'musn': self.col.media.usn(), 'msg': '', 'cont': True, } else: return (self.col.mod, self.col.scm, self.col._usn, intTime(), self.col.media.usn())
def _mid(self, srcMid): "Return local id for remote MID." # already processed this mid? if srcMid in self._modelMap: return self._modelMap[srcMid] mid = srcMid srcModel = self.src.models.get(srcMid) srcScm = self.src.models.scmhash(srcModel) while True: # missing from target col? if not self.dst.models.have(mid): # copy it over model = srcModel.copy() model['id'] = mid model['mod'] = intTime() model['usn'] = self.col.usn() self.dst.models.update(model) break # there's an existing model; do the schemas match? dstModel = self.dst.models.get(mid) dstScm = self.dst.models.scmhash(dstModel) if srcScm == dstScm: # they do; we can reuse this mid model = srcModel.copy() model['id'] = mid model['mod'] = intTime() model['usn'] = self.col.usn() self.dst.models.update(model) break # as they don't match, try next id mid += 1 # save map and return new mid self._modelMap[srcMid] = mid return mid
def _checkDeckTree(self): decks = self.col.decks.all() decks.sort(key=operator.itemgetter('name')) names = set() for deck in decks: # two decks with the same name? if deck['name'] in names: print("fix duplicate deck name", deck['name'].encode("utf8")) deck['name'] += "%d" % intTime(1000) self.save(deck) # ensure no sections are blank if not all(deck['name'].split("::")): print("fix deck with missing sections", deck['name'].encode("utf8")) deck['name'] = "recovered%d" % intTime(1000) self.save(deck) # immediate parent must exist if "::" in deck['name']: immediateParent = "::".join(deck['name'].split("::")[:-1]) if immediateParent not in names: print("fix deck with missing parent", deck['name'].encode("utf8")) self._ensureParents(deck['name']) names.add(immediateParent) names.add(deck['name'])
def maybeOptimize(self): # have two weeks passed? if (intTime() - self.pm.profile['lastOptimize']) < 86400*14: return self.progress.start(label=_("Optimizing..."), immediate=True) self.col.optimize() self.pm.profile['lastOptimize'] = intTime() self.pm.save() self.progress.finish()
def updateData(self, n, id, sflds): self._ids.append(id) if not self.processFields(n, sflds): return if self._tagsMapped: self.col.tags.register(n.tags) tags = self.col.tags.join(n.tags) return [intTime(), self.col.usn(), n.fieldsStr, tags, id, n.fieldsStr, tags] else: return [intTime(), self.col.usn(), n.fieldsStr, id, n.fieldsStr]
def install(self, sid, data, fname): try: z = ZipFile(io.BytesIO(data)) except zipfile.BadZipfile: showWarning(_("The download was corrupt. Please try again.")) return name = os.path.splitext(fname)[0] # previously installed? meta = self.addonMeta(sid) base = self.addonsFolder(sid) if os.path.exists(base): self.backupUserFiles(sid) self.deleteAddon(sid) os.mkdir(base) self.restoreUserFiles(sid) # extract for n in z.namelist(): if n.endswith("/"): # folder; ignore continue path = os.path.join(base, n) # skip existing user files if os.path.exists(path) and n.startswith("user_files/"): continue z.extract(n, base) # update metadata meta['name'] = name meta['mod'] = intTime() self.writeAddonMeta(sid, meta)
def _changeCards(self, nids, oldModel, newModel, map): d = [] deleted = [] for (cid, ord) in self.col.db.execute( "select id, ord from cards where nid in "+ids2str(nids)): # if the src model is a cloze, we ignore the map, as the gui # doesn't currently support mapping them if oldModel['type'] == MODEL_CLOZE: new = ord if newModel['type'] != MODEL_CLOZE: # if we're mapping to a regular note, we need to check if # the destination ord is valid if len(newModel['tmpls']) <= ord: new = None else: # mapping from a regular note, so the map should be valid new = map[ord] if new is not None: d.append(dict( cid=cid,new=new,u=self.col.usn(),m=intTime())) else: deleted.append(cid) self.col.db.executemany( "update cards set ord=:new,usn=:u,mod=:m where id=:cid", d) self.col.remCards(deleted)
def flushSched(self): self.mod = intTime() self.usn = self.col.usn() # bug checks if self.queue == 2 and self.odue and not self.col.decks.isDyn(self.did): warn() assert self.due < 4294967296 self.col.db.execute( """update cards set mod=?, usn=?, type=?, queue=?, due=?, ivl=?, factor=?, reps=?, lapses=?, left=?, odue=?, odid=?, did=? where id = ?""", self.mod, self.usn, self.type, self.queue, self.due, self.ivl, self.factor, self.reps, self.lapses, self.left, self.odue, self.odid, self.did, self.id, )
def _undoReview(self): data = self._undo[2] wasLeech = self._undo[3] c = data.pop() if not data: self.clearUndo() # remove leech tag if it didn't have it before if not wasLeech and c.note().hasTag("leech"): c.note().delTag("leech") c.note().flush() # write old data c.flush() # and delete revlog entry last = self.db.scalar( "select id from revlog where cid = ? " "order by id desc limit 1", c.id) self.db.execute("delete from revlog where id = ?", last) # restore any siblings self.db.execute( "update cards set queue=type,mod=?,usn=? where queue=-2 and nid=?", intTime(), self.usn(), c.nid) # and finally, update daily counts n = 1 if c.queue == 3 else c.queue type = ("new", "lrn", "rev")[n] self.sched._updateStats(c, type, -1) self.sched.reps -= 1 return c.id
def remTemplate(self, m, template): "False if removing template would leave orphan notes." assert len(m['tmpls']) > 1 # find cards using this template ord = m['tmpls'].index(template) cids = self.col.db.list(""" select c.id from cards c, notes f where c.nid=f.id and mid = ? and ord = ?""", m['id'], ord) # all notes with this template must have at least two cards, or we # could end up creating orphaned notes if self.col.db.scalar(""" select nid, count() from cards where nid in (select nid from cards where id in %s) group by nid having count() < 2 limit 1""" % ids2str(cids)): return False # ok to proceed; remove cards self.col.modSchema(check=True) self.col.remCards(cids) # shift ordinals self.col.db.execute(""" update cards set ord = ord - 1, usn = ?, mod = ? where nid in (select id from notes where mid = ?) and ord > ?""", self.col.usn(), intTime(), m['id'], ord) m['tmpls'].remove(template) self._updateTemplOrds(m) self.save(m) return True
def id(self, name, create=True, type=None): "Add a deck with NAME. Reuse deck if already exists. Return id as int." if type is None: type = defaultDeck name = name.replace('"', '') name = unicodedata.normalize("NFC", name) for id, g in list(self.decks.items()): if unicodedata.normalize("NFC", g['name'].lower()) == name.lower(): return int(id) if not create: return None g = copy.deepcopy(type) if "::" in name: # not top level; ensure all parents exist name = self._ensureParents(name) g['name'] = name while 1: id = intTime(1000) if str(id) not in self.decks: break g['id'] = id self.decks[str(id)] = g self.save(g) self.maybeAddToActive() runHook("newDeck") return int(id)
def modSchema(self, check): "Mark schema modified. Call this first so user can abort if necessary." if not self.schemaChanged(): if check and not runFilter("modSchema", True): raise AnkiError("abortSchemaMod") self.scm = intTime(1000) self.setMod()
def answerCard(self, card, ease): assert ease >= 1 and ease <= 4 self.col.markReview(card) card.reps += 1 wasNew = card.queue == 0 if wasNew: # came from the new queue, move to learning card.queue = 1 # if it was a new card, it's now a learning card if card.type == 0: card.type = 1 # init reps to graduation card.left = self._startingLeft(card) # dynamic? if card.odid and card.type == 2: if self._resched(card): # reviews get their ivl boosted on first sight card.ivl = self._dynIvlBoost(card) card.odue = self.today + card.ivl self._updateStats(card, 'new') if card.queue in (1, 3): self._answerLrnCard(card, ease) if not wasNew: self._updateStats(card, 'lrn') elif card.queue == 2: self._answerRevCard(card, ease) self._updateStats(card, 'rev') else: raise Exception("Invalid queue") self._updateStats(card, 'time', card.timeTaken()) card.mod = intTime() card.usn = self.col.usn() card.flushSched()
def flush(self): self.mod = intTime() self.usn = self.col.usn() # bug check if self.queue == 2 and self.odue and not self.col.decks.isDyn(self.did): warn() assert self.due < 4294967296 self.col.db.execute( """ insert or replace into cards values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", self.id, self.nid, self.did, self.ord, self.mod, self.usn, self.type, self.queue, self.due, self.ivl, self.factor, self.reps, self.lapses, self.left, self.odue, self.odid, self.flags, self.data) self.col.log(self)
def suspendCards(self, ids): "Suspend cards." self.remFromDyn(ids) self.removeFailed(ids) self.col.db.execute( "update cards set queue=-1,mod=?,usn=? where id in "+ ids2str(ids), intTime(), self.col.usn())
def sortCards(self, cids, start=1, step=1, shuffle=False, shift=False): scids = ids2str(cids) now = intTime() nids = self.col.db.list( ("select distinct nid from cards where type = 0 and id in %s " "order by nid") % scids) if not nids: # no new cards return # determine nid ordering due = {} if shuffle: random.shuffle(nids) for c, nid in enumerate(nids): due[nid] = start+c*step high = start+c*step # shift? if shift: low = self.col.db.scalar( "select min(due) from cards where due >= ? and type = 0 " "and id not in %s" % scids, start) if low is not None: shiftby = high - low + 1 self.col.db.execute(""" update cards set mod=?, usn=?, due=due+? where id not in %s and due >= ? and queue = 0""" % scids, now, self.col.usn(), shiftby, low) # reorder cards d = [] for id, nid in self.col.db.execute( "select id, nid from cards where type = 0 and id in "+scids): d.append(dict(now=now, due=due[nid], usn=self.col.usn(), cid=id)) self.col.db.executemany( "update cards set due=:due,mod=:now,usn=:usn where id = :cid""", d)
def buryCards(self, cids): self.col.log(cids) self.remFromDyn(cids) self.removeLrn(cids) self.col.db.execute(""" update cards set queue=-2,mod=?,usn=? where id in """+ids2str(cids), intTime(), self.col.usn())
def install(self, sid, data, fname): try: z = ZipFile(io.BytesIO(data)) except zipfile.BadZipfile: showWarning(_("The download was corrupt. Please try again.")) return name = os.path.splitext(fname)[0] # previously installed? meta = self.addonMeta(sid) base = self.addonsFolder(sid) if os.path.exists(base): self.deleteAddon(sid) # extract os.mkdir(base) for n in z.namelist(): if n.endswith("/"): # folder; ignore continue # write z.extract(n, base) # update metadata meta['name'] = name meta['mod'] = intTime() self.writeAddonMeta(sid, meta)
def _burySiblings(self, card): toBury = [] nconf = self._newConf(card) buryNew = nconf.get("bury", True) rconf = self._revConf(card) buryRev = rconf.get("bury", True) # loop through and remove from queues for cid,queue in self.col.db.execute(""" select id, queue from cards where nid=? and id!=? and (queue=0 or (queue=2 and due<=?))""", card.nid, card.id, self.today): if queue == 2: if buryRev: toBury.append(cid) # if bury disabled, we still discard to give same-day spacing try: self._revQueue.remove(cid) except ValueError: pass else: # if bury disabled, we still discard to give same-day spacing if buryNew: toBury.append(cid) try: self._newQueue.remove(cid) except ValueError: pass # then bury if toBury: self.col.db.execute( "update cards set queue=-2,mod=?,usn=? where id in "+ids2str(toBury), intTime(), self.col.usn()) self.col.log(toBury)
def setup_basic(): global deck1, deck2, client, server deck1 = getEmptyDeck() # add a note to deck 1 f = deck1.newNote() f["Front"] = u"foo" f["Back"] = u"bar" f.tags = [u"foo"] deck1.addNote(f) # answer it deck1.reset() deck1.sched.answerCard(deck1.sched.getCard(), 4) # repeat for deck2 deck2 = getEmptyDeck(server=True) f = deck2.newNote() f["Front"] = u"bar" f["Back"] = u"bar" f.tags = [u"bar"] deck2.addNote(f) deck2.reset() deck2.sched.answerCard(deck2.sched.getCard(), 4) # start with same schema and sync time deck1.scm = deck2.scm = 0 # and same mod time, so sync does nothing t = intTime(1000) deck1.save(mod=t) deck2.save(mod=t) server = LocalServer(deck2) client = Syncer(deck1, server)
def unsuspendCards(self, ids): "Unsuspend cards." self.col.log(ids) self.col.db.execute( "update cards set queue=type,mod=?,usn=? " "where queue = -1 and id in "+ ids2str(ids), intTime(), self.col.usn())
def registerTags(self, tags): r = [] for t in tags: r.append({'t': t}) self.db.executemany(""" insert or ignore into tags (mod, name) values (%d, :t)""" % intTime(), r)
def downloadIds(self, ids): log = [] errs = [] self.mw.progress.start(immediate=True) for n in ids: ret = download(self.mw, n) if ret[0] == "error": errs.append(_("Error downloading %(id)s: %(error)s") % dict(id=n, error=ret[1])) continue data, fname = ret fname = fname.replace("_", " ") name = os.path.splitext(fname)[0] ret = self.install(io.BytesIO(data), manifest={"package": str(n), "name": name, "mod": intTime()}) if ret[0] is False: if ret[1] == "conflicts": continue if ret[1] == "zip": showWarning(_("The download was corrupt. Please try again.")) elif ret[1] == "manifest": showWarning(_("Invalid add-on manifest.")) log.append(_("Downloaded %(fname)s" % dict(fname=name))) self.mw.progress.finish() return log, errs
def answerCard(self, card, ease): assert ease >= 1 and ease <= 4 self.col.markReview(card) self.reps += 1 card.reps += 1 wasNew = (card.queue == 0) and card.type != 2 if wasNew: # put it in the learn queue card.queue = 1 card.type = 1 card.left = self._startingLeft(card) self._updateStats(card, 'new') if card.queue == 1: self._answerLrnCard(card, ease) if not wasNew: self._updateStats(card, 'lrn') elif card.queue == 2: self._answerRevCard(card, ease) self._updateStats(card, 'rev') else: raise Exception("Invalid queue") self._updateStats(card, 'time', card.timeTaken()) card.mod = intTime() card.usn = self.col.usn() card.flushSched()
def _migrateDeckTbl(self): db = self.db db.execute("delete from col") db.execute(""" insert or replace into col select id, cast(created as int), :t, :t, 99, 0, 0, cast(lastSync as int), "", "", "", "", "" from decks""", t=intTime()) # prepare a deck to store the old deck options g, gc, conf = _getColVars(db) # delete old selective study settings, which we can't auto-upgrade easily keys = ("newActive", "newInactive", "revActive", "revInactive") for k in keys: db.execute("delete from deckVars where key=:k", k=k) # copy other settings, ignoring deck order as there's a new default gc['new']['perDay'] = db.scalar("select newCardsPerDay from decks") gc['new']['order'] = min(1, db.scalar("select newCardOrder from decks")) # these are collection level, and can't be imported on a per-deck basis # conf['newSpread'] = db.scalar("select newCardSpacing from decks") # conf['timeLim'] = db.scalar("select sessionTimeLimit from decks") # add any deck vars and save dkeys = ("hexCache", "cssCache") for (k, v) in db.execute("select * from deckVars").fetchall(): if k in dkeys: pass else: conf[k] = v _addColVars(db, g, gc, conf) # clean up db.execute("drop table decks") db.execute("drop table deckVars")
def fixIntegrity(self): "Fix possible problems and rebuild caches." problems = [] self.save() oldSize = os.stat(self.path)[stat.ST_SIZE] if self.db.scalar("pragma integrity_check") != "ok": return _("Collection is corrupt. Please see the manual.") # delete any notes with missing cards ids = self.db.list(""" select id from notes where id not in (select distinct nid from cards)""") self._remNotes(ids) # tags self.tags.registerNotes() # field cache for m in self.models.all(): self.updateFieldCache(self.models.nids(m)) # new card position self.conf['nextPos'] = self.db.scalar( "select max(due)+1 from cards where type = 0") or 0 # reviews should have a reasonable due # ids = self.db.list( "select id from cards where queue = 2 and due > 10000") if ids: problems.append("Reviews had incorrect due date.") self.db.execute( "update cards set due = 0, mod = ?, usn = ? where id in %s" % ids2str(ids), intTime(), self.usn()) # and finally, optimize self.optimize() newSize = os.stat(self.path)[stat.ST_SIZE] txt = _("Database rebuilt and optimized.") ok = not problems problems.append(txt) self.save() return ("\n".join(problems), ok)
def _migrateModels(self): import anki.models db = self.db times = {} mods = {} for row in db.all( "select id, name from models"): # use only first 31 bits if not old anki id t = abs(row[0]) if t > 4294967296: t >>= 32 assert t > 0 m = anki.models.defaultModel.copy() m['id'] = t m['name'] = row[1] m['mod'] = intTime() m['tags'] = [] m['flds'] = self._fieldsForModel(row[0]) m['tmpls'] = self._templatesForModel(row[0], m['flds']) mods[m['id']] = m db.execute("update notes set mid = ? where mid = ?", t, row[0]) # save and clean up db.execute("update col set models = ?", json.dumps(mods)) db.execute("drop table fieldModels") db.execute("drop table cardModels") db.execute("drop table models")
def _importCards(self): if not self.needCards: return # build map of (guid, ord) -> cid and used id cache self._cards = {} existing = {} for guid, ord, cid in self.dst.db.execute( "select f.guid, c.ord, c.id from cards c, notes f " "where c.nid = f.id"): existing[cid] = True self._cards[(guid, ord)] = cid # loop through src cards = [] revlog = [] cnt = 0 usn = self.dst.usn() aheadBy = self.src.sched.today - self.dst.sched.today for card in self.src.db.execute( "select f.guid, f.mid, c.* from cards c, notes f " "where c.nid = f.id"): guid = card[0] # does the card's note exist in dst col? if guid not in self._notes: continue dnid = self._notes[guid] # does the card already exist in the dst col? ord = card[5] if (guid, ord) in self._cards: # fixme: in future, could update if newer mod time continue # doesn't exist. strip off note info, and save src id for later card = list(card[2:]) scid = card[0] # ensure the card id is unique while card[0] in existing: card[0] += 999 existing[card[0]] = True # update cid, nid, etc card[1] = self._notes[guid][0] card[2] = self._did(card[2]) card[4] = intTime() card[5] = usn # review cards have a due date relative to collection if card[7] in (2, 3): card[8] -= aheadBy cards.append(card) # we need to import revlog, rewriting card ids and bumping usn for rev in self.src.db.execute( "select * from revlog where cid = ?", scid): rev = list(rev) rev[1] = card[0] rev[2] = self.dst.usn() revlog.append(rev) cnt += 1 # apply self.dst.db.executemany(""" insert or ignore into cards values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)""", cards) self.dst.db.executemany(""" insert or ignore into revlog values (?,?,?,?,?,?,?,?,?)""", revlog) self.log.append(_("%d cards imported.") % cnt)
def updateNotes(allDb): t_0, now, db = time.time(), intTime(), mw.col.db TAG = mw.col.tags # type: TagManager ds, nid2mmi = [], {} mw.progress.start(label='Updating data', immediate=True) fidDb = allDb.fidDb(recalc=True) loc_db = allDb.locDb(recalc=False) # type: Dict[Location, Set[Morpheme]] # read tag names compTag, vocabTag, freshTag, notReadyTag, alreadyKnownTag, priorityTag, tooShortTag, tooLongTag, frequencyTag = tagNames = cfg( 'Tag_Comprehension'), cfg('Tag_Vocab'), cfg('Tag_Fresh'), cfg( 'Tag_NotReady'), cfg('Tag_AlreadyKnown'), cfg('Tag_Priority'), cfg( 'Tag_TooShort'), cfg('Tag_TooLong'), cfg('Tag_Frequency') TAG.register(tagNames) badLengthTag = cfg('Tag_BadLength') # handle secondary databases mw.progress.update(label='Creating seen/known/mature from all.db') seenDb = filterDbByMat(allDb, cfg('threshold_seen')) knownDb = filterDbByMat(allDb, cfg('threshold_known')) matureDb = filterDbByMat(allDb, cfg('threshold_mature')) mw.progress.update(label='Loading priority.db') priorityDb = MorphDb(cfg('path_priority'), ignoreErrors=True) mw.progress.update(label='Loading frequency.txt') frequencyListPath = cfg('path_frequency') frequency_map = {} frequency_has_morphemes = False try: with io.open(frequencyListPath, encoding='utf-8-sig') as csvfile: csvreader = csv.reader(csvfile, delimiter="\t") rows = [row for row in csvreader] if rows[0][0] == "#study_plan_frequency": frequency_has_morphemes = True frequency_map = dict( zip([ Morpheme(row[0], row[1], row[2], row[3], row[4], row[5]) for row in rows[1:] ], itertools.count(0))) else: frequency_map = dict( zip([row[0] for row in rows], itertools.count(0))) except FileNotFoundError: pass frequencyListLength = len(frequency_map) # prefetch cfg for fields field_focus_morph = cfg('Field_FocusMorph') field_unknown_count = cfg('Field_UnknownMorphCount') field_unmature_count = cfg('Field_UnmatureMorphCount') field_morph_man_index = cfg('Field_MorphManIndex') field_unknowns = cfg('Field_Unknowns') field_unmatures = cfg('Field_Unmatures') field_unknown_freq = cfg('Field_UnknownFreq') field_focus_morph_pos = cfg("Field_FocusMorphPos") skip_comprehension_cards = cfg('Option_SkipComprehensionCards') skip_fresh_cards = cfg('Option_SkipFreshVocabCards') # Find all morphs that changed maturity and the notes that refer to them. last_maturities = allDb.meta.get('last_maturities', {}) new_maturities = {} refresh_notes = set() # Recompute everything if preferences changed. last_preferences = allDb.meta.get('last_preferences', {}) if not last_preferences == get_preferences(): print("Preferences changed. Updating all notes...") last_updated = 0 else: last_updated = allDb.meta.get('last_updated', 0) # Todo: Remove this forced 0 once we add checks for other changes like new frequency.txt files. last_updated = 0 # If we're updating everything anyway, clear the notes set. if last_updated > 0: for m, locs in allDb.db.items(): maturity_bits = 0 if seenDb.matches(m): maturity_bits |= 1 if knownDb.matches(m): maturity_bits |= 2 if matureDb.matches(m): maturity_bits |= 4 new_maturities[m] = maturity_bits if last_maturities.get(m, -1) != maturity_bits: for loc in locs: if isinstance(loc, AnkiDeck): refresh_notes.add(loc.noteId) included_types, include_all = getModifyEnabledModels() included_mids = [ m['id'] for m in mw.col.models.all() if include_all or m['name'] in included_types ] query = ''' select id, mid, flds, guid, tags from notes WHERE mid IN ({0}) and ( mod > {2} or id in ({1}) ) '''.format(','.join([str(m) for m in included_mids]), ','.join([str(id) for id in refresh_notes]), last_updated) query_results = db.execute(query) N_notes = len(query_results) mw.progress.finish() mw.progress.start(label='Updating notes', max=N_notes, immediate=True) for i, (nid, mid, flds, guid, tags) in enumerate(query_results): ts = TAG.split(tags) if i % 500 == 0: mw.progress.update(value=i) C = partial(cfg, model_id=mid) notecfg = getFilterByMidAndTags(mid, ts) if notecfg is None or not notecfg['Modify']: continue # Get all morphemes for note morphemes = set() for fieldName in notecfg['Fields']: try: loc = fidDb[(nid, guid, fieldName)] morphemes.update(loc_db[loc]) except KeyError: continue proper_nouns_known = cfg('Option_ProperNounsAlreadyKnown') # Determine un-seen/known/mature and i+N unseens, unknowns, unmatures, new_knowns = set(), set(), set(), set() for morpheme in morphemes: if proper_nouns_known and morpheme.isProperNoun(): continue if not seenDb.matches(morpheme): unseens.add(morpheme) if not knownDb.matches(morpheme): unknowns.add(morpheme) if not matureDb.matches(morpheme): unmatures.add(morpheme) if knownDb.matches(morpheme): new_knowns.add(morpheme) # Determine MMI - Morph Man Index N, N_s, N_k, N_m = len(morphemes), len(unseens), len(unknowns), len( unmatures) # Bail early for lite update if N_k > 2 and C('only update k+2 and below'): continue # add bonus for morphs in priority.db and frequency.txt frequencyBonus = C('frequency.txt bonus') isPriority = False isFrequency = False focusMorph = None F_k = 0 usefulness = 0 for focusMorph in unknowns: F_k += allDb.frequency(focusMorph) if priorityDb.frequency(focusMorph) > 0: isPriority = True usefulness += C('priority.db weight') deinfFocusMorph = focusMorph.deinflected() if frequency_has_morphemes: focusMorphIndex = frequency_map.get(deinfFocusMorph, -1) else: focusMorphIndex = frequency_map.get(deinfFocusMorph.base, -1) if focusMorphIndex >= 0: isFrequency = True # The bigger this number, the lower mmi becomes usefulness += int( round(frequencyBonus * (1 - focusMorphIndex / frequencyListLength))) # average frequency of unknowns (ie. how common the word is within your collection) F_k_avg = F_k // N_k if N_k > 0 else F_k usefulness += F_k_avg # add bonus for studying recent learned knowns (reinforce) for morpheme in new_knowns: locs = knownDb.getMatchingLocs(morpheme) if locs: ivl = min(1, max(loc.maturity for loc in locs)) # TODO: maybe average this so it doesnt favor long sentences usefulness += C('reinforce new vocab weight') // ivl if any(morpheme.pos == '動詞' for morpheme in unknowns): # FIXME: this isn't working??? usefulness += C('verb bonus') usefulness = 99999 - min(99999, usefulness) # difference from optimal length range (too little context vs long sentence) lenDiffRaw = min(N - C('min good sentence length'), max(0, N - C('max good sentence length'))) lenDiff = min(9, abs(lenDiffRaw)) # Fill in various fields/tags on the note based on cfg fs = splitFields(flds) # clear any 'special' tags, the appropriate will be set in the next few lines ts = [ t for t in ts if t not in (notReadyTag, compTag, vocabTag, freshTag) ] # determine card type if N_m == 0: # sentence comprehension card, m+0 ts.append(compTag) if skip_comprehension_cards: usefulness += 1000000 # Add a penalty to put these cards at the end of the queue elif N_k == 1: # new vocab card, k+1 ts.append(vocabTag) setField(mid, fs, field_focus_morph, focusMorph.base) setField(mid, fs, field_focus_morph_pos, focusMorph.pos) elif N_k > 1: # M+1+ and K+2+ ts.append(notReadyTag) elif N_m == 1: # we have k+0, and m+1, so this card does not introduce a new vocabulary -> card for newly learned morpheme ts.append(freshTag) if skip_fresh_cards: usefulness += 1000000 # Add a penalty to put these cards at the end of the queue focusMorph = next(iter(unmatures)) setField(mid, fs, field_focus_morph, focusMorph.base) setField(mid, fs, field_focus_morph_pos, focusMorph.pos) else: # only case left: we have k+0, but m+2 or higher, so this card does not introduce a new vocabulary -> card for newly learned morpheme ts.append(freshTag) if skip_fresh_cards: usefulness += 1000000 # Add a penalty to put these cards at the end of the queue # calculate mmi mmi = 100000 * N_k + 1000 * lenDiff + int(round(usefulness)) if C('set due based on mmi'): nid2mmi[nid] = mmi # set type agnostic fields setField(mid, fs, field_unknown_count, '%d' % N_k) setField(mid, fs, field_unmature_count, '%d' % N_m) setField(mid, fs, field_morph_man_index, '%d' % mmi) setField(mid, fs, field_unknowns, ', '.join(u.base for u in unknowns)) setField(mid, fs, field_unmatures, ', '.join(u.base for u in unmatures)) setField(mid, fs, field_unknown_freq, '%d' % F_k_avg) # remove deprecated tag if badLengthTag is not None and badLengthTag in ts: ts.remove(badLengthTag) # other tags if priorityTag in ts: ts.remove(priorityTag) if isPriority: ts.append(priorityTag) if frequencyTag in ts: ts.remove(frequencyTag) if isFrequency: ts.append(frequencyTag) if tooShortTag in ts: ts.remove(tooShortTag) if lenDiffRaw < 0: ts.append(tooShortTag) if tooLongTag in ts: ts.remove(tooLongTag) if lenDiffRaw > 0: ts.append(tooLongTag) # remove unnecessary tags if not cfg('Option_SetNotRequiredTags'): unnecessary = [priorityTag, tooShortTag, tooLongTag] ts = [tag for tag in ts if tag not in unnecessary] # update sql db tags_ = TAG.join(TAG.canonify(ts)) flds_ = joinFields(fs) if flds != flds_ or tags != tags_: # only update notes that have changed csum = fieldChecksum(fs[0]) sfld = stripHTML(fs[getSortFieldIndex(mid)]) ds.append((tags_, flds_, sfld, csum, now, mw.col.usn(), nid)) mw.progress.update(label='Updating anki database...') mw.col.db.executemany( 'update notes set tags=?, flds=?, sfld=?, csum=?, mod=?, usn=? where id=?', ds) # Now reorder new cards based on MMI mw.progress.update(label='Updating new card ordering...') ds = [] # "type = 0": new cards # "type = 1": learning cards [is supposed to be learning: in my case no learning card had this type] # "type = 2": review cards for (cid, nid, due) in db.execute('select id, nid, due from cards where type = 0'): if nid in nid2mmi: # owise it was disabled due_ = nid2mmi[nid] if due != due_: # only update cards that have changed ds.append((due_, now, mw.col.usn(), cid)) mw.col.db.executemany('update cards set due=?, mod=?, usn=? where id=?', ds) mw.reset() allDb.meta['last_preferences'] = get_preferences() allDb.meta['last_maturities'] = new_maturities allDb.meta['last_updated'] = int(time.time() + 0.5) printf('Updated %d notes in %f sec' % (N_notes, time.time() - t_0)) if cfg('saveDbs'): mw.progress.update(label='Saving all/seen/known/mature dbs') allDb.save(cfg('path_all')) seenDb.save(cfg('path_seen')) knownDb.save(cfg('path_known')) matureDb.save(cfg('path_mature')) printf('Updated %d notes + saved dbs in %f sec' % (N_notes, time.time() - t_0)) mw.progress.finish() return knownDb
def test_threeway2(): # for this test we want ms precision of notes so we don't have to # sleep a lot import anki.notes intTime = anki.notes.intTime anki.notes.intTime = lambda x=1: intTime(1000) def setup(): # create collection 1 with a single note c1 = getEmptyDeck() f = c1.newNote() f['Front'] = u"startingpoint" nid = f.id c1.addNote(f) cid = f.cards()[0].id c1.beforeUpload() # start both clients and server off in this state s1path = c1.path.replace(".anki2", "-s1.anki2") c2path = c1.path.replace(".anki2", "-c2.anki2") shutil.copy2(c1.path, s1path) shutil.copy2(c1.path, c2path) # open them c1 = Collection(c1.path) c2 = Collection(c2path) s1 = Collection(s1path, server=True) return c1, c2, s1, nid, cid c1, c2, s1, nid, cid = setup() # modify c1 then sync c1->s1 n = c1.getNote(nid) t = "firstmod" n['Front'] = t n.flush() c1.db.execute("update cards set mod=1, usn=-1") srv = LocalServer(s1) clnt1 = Syncer(c1, srv) clnt1.sync() n.load() assert n['Front'] == t assert s1.getNote(nid)['Front'] == t assert s1.db.scalar("select mod from cards") == 1 # sync s1->c2 clnt2 = Syncer(c2, srv) clnt2.sync() assert c2.getNote(nid)['Front'] == t assert c2.db.scalar("select mod from cards") == 1 # modify c1 and sync time.sleep(0.001) t = "secondmod" n = c1.getNote(nid) n['Front'] = t n.flush() c1.db.execute("update cards set mod=2, usn=-1") clnt1.sync() # modify c2 and sync - both c2 and server should be the same time.sleep(0.001) t2 = "thirdmod" n = c2.getNote(nid) n['Front'] = t2 n.flush() c2.db.execute("update cards set mod=3, usn=-1") clnt2.sync() n.load() assert n['Front'] == t2 assert c2.db.scalar("select mod from cards") == 3 n = s1.getNote(nid) assert n['Front'] == t2 assert s1.db.scalar("select mod from cards") == 3 # and syncing c1 again should yield the updated note as well clnt1.sync() n = s1.getNote(nid) assert n['Front'] == t2 assert s1.db.scalar("select mod from cards") == 3 n = c1.getNote(nid) assert n['Front'] == t2 assert c1.db.scalar("select mod from cards") == 3
def unsuspendCards(self, ids): "Unsuspend cards." self.col.db.execute( "update cards set queue=type,mod=?,usn=? " "where queue = -1 and id in " + ids2str(ids), intTime(), self.col.usn())
def setUserFlag(self, flag, cids): assert 0 <= flag <= 7 self.db.execute( "update cards set flags = (flags & ~?) | ?, usn=?, mod=? where id in %s" % ids2str(cids), 0b111, flag, self.usn(), intTime())
def _setID(self, m): while 1: id = str(intTime(1000)) if id not in self.models: break m['id'] = id
def genCards(self, nids: List[int]) -> List[int]: "Generate cards for non-empty templates, return ids to remove." # build map of (nid,ord) so we don't create dupes snids = ids2str(nids) have: Dict[int, Dict[int, int]] = {} dids: Dict[int, Optional[int]] = {} dues: Dict[int, int] = {} for id, nid, ord, did, due, odue, odid, type in self.db.execute( "select id, nid, ord, did, due, odue, odid, type from cards where nid in " + snids ): # existing cards if nid not in have: have[nid] = {} have[nid][ord] = id # if in a filtered deck, add new cards to original deck if odid != 0: did = odid # and their dids if nid in dids: if dids[nid] and dids[nid] != did: # cards are in two or more different decks; revert to # model default dids[nid] = None else: # first card or multiple cards in same deck dids[nid] = did # save due if odid != 0: due = odue if nid not in dues and type == 0: # Add due to new card only if it's the due of a new sibling dues[nid] = due # build cards for each note data = [] ts = maxID(self.db) now = intTime() rem = [] usn = self.usn() for nid, mid, flds in self.db.execute( "select id, mid, flds from notes where id in " + snids ): model = self.models.get(mid) assert model avail = self.models.availOrds(model, flds) did = dids.get(nid) or model["did"] due = dues.get(nid) # add any missing cards for t in self._tmplsFromOrds(model, avail): doHave = nid in have and t["ord"] in have[nid] if not doHave: # check deck is not a cram deck did = t["did"] or did if self.decks.isDyn(did): did = 1 # if the deck doesn't exist, use default instead did = self.decks.get(did)["id"] # use sibling due# if there is one, else use a new id if due is None: due = self.nextID("pos") data.append((ts, nid, did, t["ord"], now, usn, due)) ts += 1 # note any cards that need removing if nid in have: for ord, id in list(have[nid].items()): if ord not in avail: rem.append(id) # bulk update self.db.executemany( """ insert into cards values (?,?,?,?,?,?,0,0,?,0,0,0,0,0,0,0,0,"")""", data, ) return rem
def localOffset(self) -> Optional[int]: "Minutes west of UTC. Only applies to V2 scheduler." if isinstance(self.sched, V1Scheduler): return None else: return self.backend.local_minutes_west(intTime())
def computeValues(): debug("Compute values") cutoff = intTime() + mw.col.get_config('collapseTime') today = mw.col.sched.today tomorrow = today + 1 yesterdayLimit = (mw.col.sched.dayCutoff - 86400) * 1000 debug(f"Yesterday limit is {yesterdayLimit}") queriesCardCount = ( [(f"flag {i}", f"(flags & 7) == {i}", "", "") for i in range(5)] + [ ("due tomorrow", f"queue in ({QUEUE_REV},{QUEUE_DAY_LRN}) and due = {tomorrow}", "", ""), ("learning now from today", f"queue = {QUEUE_LRN} and due <= {cutoff}", "", ""), ("learning today from past", f"queue = {QUEUE_DAY_LRN} and due <= {today}", "", ""), ("learning later today", f"queue = {QUEUE_LRN} and due > {cutoff}", "", ""), ("learning future", f"queue = {QUEUE_DAY_LRN} and due > {today}", "", ""), ("learning today repetition from today", f"queue = {QUEUE_LRN}", f"left/1000", ""), ("learning today repetition from past", f"queue = {QUEUE_DAY_LRN}", f"left/1000", ""), ("learning repetition from today", f"queue = {QUEUE_LRN}", f"mod%1000", ""), ("learning repetition from past", f"queue = {QUEUE_DAY_LRN}", f"mod%1000", ""), ("review due", f"queue = {QUEUE_REV} and due <= {today}", "", ""), ("reviewed today", f"queue = {QUEUE_REV} and due>0 and due-ivl = {today}", "", ""), ("repeated today", f"revlog.id>{yesterdayLimit}", "", "revlog inner join cards on revlog.cid = cards.id"), ("repeated", "", "", f"revlog inner join cards on revlog.cid = cards.id"), ("unseen", f"queue = {QUEUE_NEW_CRAM}", "", ""), ("buried", f"queue = {QUEUE_USER_BURIED} or queue = {QUEUE_SCHED_BURIED}", "", ""), ("suspended", f"queue = {QUEUE_SUSPENDED}", "", ""), ("cards", "", "", ""), ("undue", f"queue = {QUEUE_REV} and due > {today}", "", ""), ("mature", f"queue = {QUEUE_REV} and ivl >= 21", "", ""), ("young", f"queue = {QUEUE_REV} and 0<ivl and ivl <21", "", ""), ]) for name, condition, addend, table in queriesCardCount: if addend: element = f" sum({addend})" else: element = f" count(*)" if condition: condition = f" where {condition}" if not table: table = "cards" query = f"select did, {element} from {table} {condition} group by did" results = mw.col.db.all(query) debug("""For {name}: query "{query}".""") values[name] = dict() for did, value in results: debug(f"In deck {did} there are {value} cards of kind {name}") values[name][did] = value
def genCards(self, nids, changedOrNewReq=None): #new parameter: changedOrNewReq # The only differences are: # changedOrNewReq is passed to models.availOrds # if changedOrNewReq is not None, then only cards in positions belonging to changedOrNewReq may be returned """Ids of cards needed to be removed. Generate missing cards of a note with id in nids and with ord in changedOrNewReq. """ # build map of (nid,ord) so we don't create dupes snids = ids2str(nids) have = { } #Associated to each nid a dictionnary from card's order to card id. dids = { } #Associate to each nid the only deck id containing its cards. Or None if there are multiple decks dues = {} #Associate to each nid the due value of the last card seen. for id, nid, ord, did, due, odue, odid in self.db.execute( "select id, nid, ord, did, due, odue, odid from cards where nid in " + snids): # existing cards if nid not in have: have[nid] = {} have[nid][ord] = id # if in a filtered deck, add new cards to original deck if odid != 0: did = odid # and their dids if nid in dids: if dids[nid] and dids[nid] != did: # cards are in two or more different decks; revert to # model default dids[nid] = None else: # first card or multiple cards in same deck dids[nid] = did # save due if odid != 0: due = odue if nid not in dues: dues[nid] = due # build cards for each note data = [ ] #Tuples for cards to create. Each tuple is newCid, nid, did, ord, now, usn, due ts = maxID(self.db) now = intTime() rem = [] #cards to remove usn = self.usn() for nid, mid, flds in self.db.execute( "select id, mid, flds from notes where id in " + snids): model = self.models.get(mid) avail = self.models.availOrds( model, flds, changedOrNewReq) # modified: adding last parameter. did = dids.get(nid) or model['did'] due = dues.get(nid) # add any missing cards for t in self._tmplsFromOrds(model, avail): doHave = nid in have and t['ord'] in have[nid] if not doHave: # check deck is not a cram deck did = t['did'] or did if self.decks.isDyn(did): did = 1 # if the deck doesn't exist, use default instead did = self.decks.get(did)['id'] # use sibling due# if there is one, else use a new id if due is None: due = self.nextID("pos") data.append((ts, nid, did, t['ord'], now, usn, due)) ts += 1 # note any cards that need removing if nid in have: for ord, id in list(have[nid].items()): if ((changedOrNewReq is None or ord in changedOrNewReq) and #Adding this line to the condition ord not in avail): rem.append(id) # bulk update self.db.executemany( """ insert into cards values (?,?,?,?,?,?,0,0,?,0,0,0,0,0,0,0,0,"")""", data) return rem
def meta(self): return (self.col.mod, self.col.scm, self.col._usn, intTime(), None)
def deck_due_tree(self, top_deck_id: int = 0) -> DeckTreeNode: """Returns a tree of decks with counts. If top_deck_id provided, counts are limited to that node.""" return self.col._backend.deck_tree(top_deck_id=top_deck_id, now=intTime())
from send2trash import send2trash import anki.lang import anki.sound import aqt.forms from anki.db import DB from anki.lang import _ from anki.utils import intTime, isMac, isWin from aqt import appHelpSite from aqt.qt import * from aqt.utils import showWarning metaConf = dict( ver=0, updates=True, created=intTime(), id=random.randrange(0, 2**63), lastMsg=-1, suppressUpdate=False, firstRun=True, defaultLang=None, disabledAddons=[], ) profileConf: Dict[str, Any] = dict( # profile mainWindowGeom=None, mainWindowState=None, numBackups=50, lastOptimize=intTime(), # editing
def _updateLrnCutoff(self, force: bool) -> bool: nextCutoff = intTime() + self.col.conf["collapseTime"] if nextCutoff - self._lrnCutoff > 60 or force: self._lrnCutoff = nextCutoff return True return False
def fixIntegrity(self): "Fix possible problems and rebuild caches." problems = [] curs = self.db.cursor() self.save() oldSize = os.stat(self.path)[stat.ST_SIZE] if self.db.scalar("pragma integrity_check") != "ok": return (_("Collection is corrupt. Please see the manual."), False) # note types with a missing model ids = self.db.list(""" select id from notes where mid not in """ + ids2str(self.models.ids())) if ids: problems.append( ngettext("Deleted %d note with missing note type.", "Deleted %d notes with missing note type.", len(ids)) % len(ids)) self.remNotes(ids) # for each model for m in self.models.all(): for t in m['tmpls']: if t['did'] == "None": t['did'] = None problems.append(_("Fixed AnkiDroid deck override bug.")) self.models.save(m, updateReqs=False) if m['type'] == MODEL_STD: # model with missing req specification if 'req' not in m: self.models._updateRequired(m) problems.append(_("Fixed note type: %s") % m['name']) # cards with invalid ordinal ids = self.db.list( """ select id from cards where ord not in %s and nid in ( select id from notes where mid = ?)""" % ids2str([t['ord'] for t in m['tmpls']]), m['id']) if ids: problems.append( ngettext("Deleted %d card with missing template.", "Deleted %d cards with missing template.", len(ids)) % len(ids)) self.remCards(ids) # notes with invalid field count ids = [] for id, flds in self.db.execute( "select id, flds from notes where mid = ?", m['id']): if (flds.count("\x1f") + 1) != len(m['flds']): ids.append(id) if ids: problems.append( ngettext("Deleted %d note with wrong field count.", "Deleted %d notes with wrong field count.", len(ids)) % len(ids)) self.remNotes(ids) # delete any notes with missing cards ids = self.db.list(""" select id from notes where id not in (select distinct nid from cards)""") if ids: cnt = len(ids) problems.append( ngettext("Deleted %d note with no cards.", "Deleted %d notes with no cards.", cnt) % cnt) self._remNotes(ids) # cards with missing notes ids = self.db.list(""" select id from cards where nid not in (select id from notes)""") if ids: cnt = len(ids) problems.append( ngettext("Deleted %d card with missing note.", "Deleted %d cards with missing note.", cnt) % cnt) self.remCards(ids) # cards with odue set when it shouldn't be ids = self.db.list(""" select id from cards where odue > 0 and (type=1 or queue=2) and not odid""") if ids: cnt = len(ids) problems.append( ngettext("Fixed %d card with invalid properties.", "Fixed %d cards with invalid properties.", cnt) % cnt) self.db.execute("update cards set odue=0 where id in " + ids2str(ids)) # cards with odid set when not in a dyn deck dids = [id for id in self.decks.allIds() if not self.decks.isDyn(id)] ids = self.db.list(""" select id from cards where odid > 0 and did in %s""" % ids2str(dids)) if ids: cnt = len(ids) problems.append( ngettext("Fixed %d card with invalid properties.", "Fixed %d cards with invalid properties.", cnt) % cnt) self.db.execute("update cards set odid=0, odue=0 where id in " + ids2str(ids)) # tags self.tags.registerNotes() # field cache for m in self.models.all(): self.updateFieldCache(self.models.nids(m)) # new cards can't have a due position > 32 bits, so wrap items over # 2 million back to 1 million curs.execute( """ update cards set due=1000000+due%1000000,mod=?,usn=? where due>=1000000 and type=0""", [intTime(), self.usn()]) if curs.rowcount: problems.append( "Found %d new cards with a due number >= 1,000,000 - consider repositioning them in the Browse screen." % curs.rowcount) # new card position self.conf['nextPos'] = self.db.scalar( "select max(due)+1 from cards where type = 0") or 0 # reviews should have a reasonable due # ids = self.db.list( "select id from cards where queue = 2 and due > 100000") if ids: problems.append("Reviews had incorrect due date.") self.db.execute( "update cards set due = ?, ivl = 1, mod = ?, usn = ? where id in %s" % ids2str(ids), self.sched.today, intTime(), self.usn()) # v2 sched had a bug that could create decimal intervals curs.execute( "update cards set ivl=round(ivl),due=round(due) where ivl!=round(ivl) or due!=round(due)" ) if curs.rowcount: problems.append("Fixed %d cards with v2 scheduler bug." % curs.rowcount) curs.execute( "update revlog set ivl=round(ivl),lastIvl=round(lastIvl) where ivl!=round(ivl) or lastIvl!=round(lastIvl)" ) if curs.rowcount: problems.append( "Fixed %d review history entries with v2 scheduler bug." % curs.rowcount) # models if self.models.ensureNotEmpty(): problems.append("Added missing note type.") # and finally, optimize self.optimize() newSize = os.stat(self.path)[stat.ST_SIZE] txt = _("Database rebuilt and optimized.") ok = not problems problems.append(txt) # if any problems were found, force a full sync if not ok: self.modSchema(check=False) self.save() return ("\n".join(problems), ok)
def _setID(self, m: NoteType) -> None: while 1: id = str(intTime(1000)) if id not in self.models: break m['id'] = id
def _log_and_notify(self, entry: LogEntry) -> None: entry_with_time = LogEntryWithTime(time=intTime(), entry=entry) self._log.append(entry_with_time) self.mw.taskman.run_on_main( lambda: gui_hooks.media_sync_did_progress(entry_with_time) )
def save(self, g: Optional[Any] = None) -> None: "Can be called with either a deck or a deck configuration." if g: g["mod"] = intTime() g["usn"] = self.col.usn() self.changed = True
def _addSchema(db: DB, setColConf: bool = True) -> None: db.executescript(""" create table if not exists col ( id integer primary key, crt integer not null, mod integer not null, scm integer not null, ver integer not null, dty integer not null, usn integer not null, ls integer not null, conf text not null, models text not null, decks text not null, dconf text not null, tags text not null ); create table if not exists notes ( id integer primary key, /* 0 */ guid text not null, /* 1 */ mid integer not null, /* 2 */ mod integer not null, /* 3 */ usn integer not null, /* 4 */ tags text not null, /* 5 */ flds text not null, /* 6 */ sfld integer not null, /* 7 */ csum integer not null, /* 8 */ flags integer not null, /* 9 */ data text not null /* 10 */ ); create table if not exists cards ( id integer primary key, /* 0 */ nid integer not null, /* 1 */ did integer not null, /* 2 */ ord integer not null, /* 3 */ mod integer not null, /* 4 */ usn integer not null, /* 5 */ type integer not null, /* 6 */ queue integer not null, /* 7 */ due integer not null, /* 8 */ ivl integer not null, /* 9 */ factor integer not null, /* 10 */ reps integer not null, /* 11 */ lapses integer not null, /* 12 */ left integer not null, /* 13 */ odue integer not null, /* 14 */ odid integer not null, /* 15 */ flags integer not null, /* 16 */ data text not null /* 17 */ ); create table if not exists revlog ( id integer primary key, cid integer not null, usn integer not null, ease integer not null, ivl integer not null, lastIvl integer not null, factor integer not null, time integer not null, type integer not null ); create table if not exists graves ( usn integer not null, oid integer not null, type integer not null ); insert or ignore into col values(1,0,0,%(s)s,%(v)s,0,0,0,'','{}','','','{}'); """ % ({ "v": SCHEMA_VERSION, "s": intTime(1000) })) if setColConf: _addColVars(db, *_getColVars(db))
def _importCards(self) -> None: if self.mustResetLearning: self.src.changeSchedulerVer(2) # build map of (guid, ord) -> cid and used id cache self._cards: Dict[Tuple[str, int], int] = {} existing = {} for guid, ord, cid in self.dst.db.execute( "select f.guid, c.ord, c.id from cards c, notes f " "where c.nid = f.id" ): existing[cid] = True self._cards[(guid, ord)] = cid # loop through src cards = [] revlog = [] cnt = 0 usn = self.dst.usn() aheadBy = self.src.sched.today - self.dst.sched.today for card in self.src.db.execute( "select f.guid, f.mid, c.* from cards c, notes f " "where c.nid = f.id" ): guid = card[0] if guid in self._changedGuids: guid = self._changedGuids[guid] if guid in self._ignoredGuids: continue # does the card's note exist in dst col? if guid not in self._notes: continue dnid = self._notes[guid] # does the card already exist in the dst col? ord = card[5] if (guid, ord) in self._cards: # fixme: in future, could update if newer mod time continue # doesn't exist. strip off note info, and save src id for later card = list(card[2:]) scid = card[0] # ensure the card id is unique while card[0] in existing: card[0] += 999 existing[card[0]] = True # update cid, nid, etc card[1] = self._notes[guid][0] card[2] = self._did(card[2]) card[4] = intTime() card[5] = usn # review cards have a due date relative to collection if card[7] in (2, 3) or card[6] == 2: card[8] -= aheadBy # odue needs updating too if card[14]: card[14] -= aheadBy # if odid true, convert card from filtered to normal if card[15]: # odid card[15] = 0 # odue card[8] = card[14] card[14] = 0 # queue if card[6] == 1: # type card[7] = 0 else: card[7] = card[6] # type if card[6] == 1: card[6] = 0 cards.append(card) # we need to import revlog, rewriting card ids and bumping usn for rev in self.src.db.execute("select * from revlog where cid = ?", scid): rev = list(rev) rev[1] = card[0] rev[2] = self.dst.usn() revlog.append(rev) cnt += 1 # apply self.dst.db.executemany( """ insert or ignore into cards values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)""", cards, ) self.dst.db.executemany( """ insert or ignore into revlog values (?,?,?,?,?,?,?,?,?)""", revlog, )
def test_clock(): d = getEmptyCol() if (d.sched.dayCutoff - intTime()) < 10*60: raise Exception("Unit tests will fail around the day rollover.")
def suspendCards(self, ids): "Suspend cards." self.removeFailed(ids) self.col.db.execute( "update cards set queue=-1,mod=?,usn=? where id in " + ids2str(ids), intTime(), self.col.usn())
def _upgradeSchema(self): "Alter tables prior to ORM initialization." db = self.db # speed up the upgrade db.execute("pragma temp_store = memory") db.execute("pragma cache_size = 10000") db.execute("pragma synchronous = off") # these weren't always correctly set db.execute("pragma page_size = 4096") db.execute("pragma legacy_file_format = 0") # notes ########### # tags should have a leading and trailing space if not empty, and not # use commas db.execute(""" update facts set tags = (case when trim(tags) == "" then "" else " " || replace(replace(trim(tags), ",", " "), " ", " ") || " " end) """) # pull facts into memory, so we can merge them with fields efficiently facts = db.all(""" select id, id, modelId, 1, cast(created*1000 as int), cast(modified as int), 0, tags from facts order by created""") # build field hash fields = {} for (fid, ord, val) in db.execute( "select factId, ordinal, value from fields order by factId, ordinal" ): if fid not in fields: fields[fid] = [] val = self._mungeField(val) fields[fid].append((ord, val)) # build insert data and transform ids, and minimize qt's # bold/italics/underline cruft. map = {} data = [] factidmap = {} times = {} from anki.utils import minimizeHTML for c, row in enumerate(facts): oldid = row[0] row = list(row) # get rid of old created column and update id while row[4] in times: row[4] += 1000 times[row[4]] = True factidmap[row[0]] = row[4] row[0] = row[4] del row[4] map[oldid] = row[0] # convert old 64bit id into a string, discarding sign bit row[1] = base91(abs(row[1])) row.append( minimizeHTML("\x1f".join([x[1] for x in sorted(fields[oldid])]))) data.append(row) # and put the facts into the new table db.execute("drop table facts") _addSchema(db, False) db.executemany("insert into notes values (?,?,?,?,?,?,?,?,'','',0,'')", data) db.execute("drop table fields") # cards ########### # we need to pull this into memory, to rewrite the creation time if # it's not unique and update the fact id times = {} rows = [] cardidmap = {} for row in db.execute(""" select id, cast(created*1000 as int), factId, ordinal, cast(modified as int), 0, (case relativeDelay when 0 then 1 when 1 then 2 when 2 then 0 end), (case type when 0 then 1 when 1 then 2 when 2 then 0 else type end), cast(due as int), cast(interval as int), cast(factor*1000 as int), reps, noCount from cards order by created"""): # find an unused time row = list(row) while row[1] in times: row[1] += 1000 times[row[1]] = True # rewrite fact id row[2] = factidmap[row[2]] # note id change and save all but old id cardidmap[row[0]] = row[1] rows.append(row[1:]) # drop old table and rewrite db.execute("drop table cards") _addSchema(db, False) db.executemany( """ insert into cards values (?,?,1,?,?,?,?,?,?,?,?,?,?,0,0,0,"")""", rows) # reviewHistory -> revlog ########### # fetch the data so we can rewrite ids quickly r = [] for row in db.execute(""" select cast(time*1000 as int), cardId, 0, ease, cast(nextInterval as int), cast(lastInterval as int), cast(nextFactor*1000 as int), cast(min(thinkingTime, 60)*1000 as int), yesCount from reviewHistory"""): row = list(row) # new card ids try: row[1] = cardidmap[row[1]] except: # id doesn't exist continue # no ease 0 anymore row[3] = row[3] or 1 # determine type, overwriting yesCount newInt = row[4] oldInt = row[5] yesCnt = row[8] # yesCnt included the current answer if row[3] > 1: yesCnt -= 1 if oldInt < 1: # new or failed if yesCnt: # type=relrn row[8] = 2 else: # type=lrn row[8] = 0 else: # type=rev row[8] = 1 r.append(row) db.executemany( "insert or ignore into revlog values (?,?,?,?,?,?,?,?,?)", r) db.execute("drop table reviewHistory") # deck ########### self._migrateDeckTbl() # tags ########### tags = {} for t in db.list("select tag from tags"): tags[t] = intTime() db.execute("update col set tags = ?", simplejson.dumps(tags)) db.execute("drop table tags") db.execute("drop table cardTags") # the rest ########### db.execute("drop table media") db.execute("drop table sources") self._migrateModels() _updateIndices(db)
def genCards(self, nids): "Generate cards for non-empty templates, return ids to remove." # build map of (nid,ord) so we don't create dupes snids = ids2str(nids) have = {} dids = {} for id, nid, ord, did, odid in self.db.execute( "select id, nid, ord, did, odid from cards where nid in " + snids): # existing cards if nid not in have: have[nid] = {} have[nid][ord] = id # if in a filtered deck, add new cards to original deck if odid != 0: did = odid # and their dids if nid in dids: if dids[nid] and dids[nid] != did: # cards are in two or more different decks; revert to # model default dids[nid] = None else: # first card or multiple cards in same deck dids[nid] = did # build cards for each note data = [] ts = maxID(self.db) now = intTime() rem = [] usn = self.usn() for nid, mid, flds in self.db.execute( "select id, mid, flds from notes where id in " + snids): model = self.models.get(mid) avail = self.models.availOrds(model, flds) did = dids.get(nid) or model['did'] # add any missing cards for t in self._tmplsFromOrds(model, avail): doHave = nid in have and t['ord'] in have[nid] if not doHave: # check deck is not a cram deck did = t['did'] or did if self.decks.isDyn(did): did = 1 # if the deck doesn't exist, use default instead did = self.decks.get(did)['id'] # we'd like to use the same due# as sibling cards, but we # can't retrieve that quickly, so we give it a new id # instead data.append( (ts, nid, did, t['ord'], now, usn, self.nextID("pos"))) ts += 1 # note any cards that need removing if nid in have: for ord, id in list(have[nid].items()): if ord not in avail: rem.append(id) # bulk update self.db.executemany( """ insert into cards values (?,?,?,?,?,?,0,0,?,0,0,0,0,0,0,0,0,"")""", data) return rem
class Habitica(object): # debug = False allow_threads = True #startup config processes checking habits, etc. allow_post_scorecounter_thread = True #Maybe a source of database warnings? #find icon file iconfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), "habitica_icon.png") iconfile = iconfile.decode(sys.getfilesystemencoding()) offline_sincedate = intTime() #Score Since date for when we are offline offline_scorecount = 0 #Starting score for offline offline_recover_attempt = 0 #attempt to recover from offline state every third time def __init__(self): if ah.settings.keep_log: ah.log.debug("Begin function") self.api = HabiticaAPI(ah.settings.user, ah.settings.token) #ah.settings.profile = profile #ah.conffile = ah.conffile #self.habitlist = ah.settings.habitlist #ah.settings.show_popup = show_popup #ah.settings.sched_dict = ah.settings.sched_dict #holder for habit reward schedules self.name = 'Anki User' self.lvl = 0 self.xp = 0 self.xt = 0 self.gp = 0 self.hp = 0 self.ht = 50 self.mp = 0 self.mt = 0 self.stats = {} self.hnote = {} self.habit_grabbed = {} #marked true when we get scorecounter. self.habit_id = ah.config[ah.settings.profile][ 'habit_id'] #holder for habit IDs self.missing = {} #holds missing habits self.init_update() #check habits, grab user object, get avatar if ah.settings.keep_log: ah.log.debug("End function") def init_update(self): if ah.settings.keep_log: ah.log.debug("Begin function") for habit in ah.settings.habitlist: self.habit_grabbed[habit] = False #create a thread to check the habit as to not slow down #the startup process if Habitica.allow_threads: thread.start_new_thread(self.check_anki_habit, (habit, )) else: self.check_anki_habit(habit) #Grab user object in the background if Habitica.allow_threads: thread.start_new_thread(self.init_grab_stats, ()) else: self.init_grab_stats() #Grab avatar from habitica if Habitica.allow_threads: thread.start_new_thread(self.save_avatar, ()) else: self.save_avatar() if ah.settings.keep_log: ah.log.debug("End function") #Try updating stats silently on init def init_grab_stats(self): if ah.settings.keep_log: ah.log.debug("Begin function") try: self.update_stats(True) except: if ah.settings.keep_log: ah.log.error("End function") return #Save avatar from habitica as png def save_avatar(self): if ah.settings.keep_log: ah.log.debug("Begin function") #See if there's an image for this profile profile_pic = ah.settings.user + ".png" #use user id instead of profile name self.avatarfile = os.path.join( os.path.dirname(os.path.realpath(__file__)), profile_pic) self.avatarfile = self.avatarfile.decode(sys.getfilesystemencoding()) try: pngfile = self.api.export_avatar_as_png( ) #Grab avatar png from Habitica if not pngfile: if ah.settings.keep_log: ah.log.error("End function returning: %s" % False) #Exit if we failed return False #Exit if we failed with open(self.avatarfile, 'wb') as outfile: outfile.write(pngfile) del pngfile except: pass if os.path.exists(self.avatarfile): #use {profile}.png as icon if it exists self.iconfile = self.avatarfile else: self.iconfile = Habitica.iconfile if ah.settings.keep_log: ah.log.debug("End function") def hrpg_showInfo(self, text): if ah.settings.keep_log: ah.log.debug("Begin function") if ah.settings.keep_log: ah.log.info("Msg: %s" % text.replace('\n', ' ')) #display a small message window with an OK Button parent = aqt.mw.app.activeWindow() or aqt.mw icon = QMessageBox.Information mb = QMessageBox(parent) mb.setText(text) if os.path.isfile(self.iconfile): mb.setIconPixmap(QPixmap(self.iconfile)) else: mb.setIcon(icon) mb.setWindowModality(Qt.WindowModal) mb.setWindowTitle("Anki Habitica") b = mb.addButton(QMessageBox.Ok) b.setDefault(True) out = mb.exec_() if ah.settings.keep_log: ah.log.debug("End function returning: %s" % out) return out def get_user_object(self): if ah.settings.keep_log: ah.log.debug("Begin function") out = self.api.user() if ah.settings.keep_log: ah.log.debug("End function returning: %s" % out) return out def update_stats(self, silent=False): if ah.settings.keep_log: ah.log.debug("Begin function") #self.hrpg_tooltip("Connecting to Habitica") try: user = self.get_user_object() except: if not silent: self.hrpg_showInfo( "Unable to log in to Habitica.\n\nCheck that you have the correct user-id and api-token in\n%s.\n\nThese should not be your username and password.\n\nPost at github.com/eshapard/AnkiHRPG if this issue persists." % (ah.conffile)) if ah.settings.keep_log: ah.log.error("End function returning: %s" % False) return False self.name = user['profile']['name'] self.stats = user['stats'] self.lvl = self.stats['lvl'] self.xp = self.stats['exp'] self.gp = self.stats['gp'] self.hp = self.stats['hp'] self.mp = self.stats['mp'] self.xt = self.stats['toNextLevel'] self.ht = self.stats['maxHealth'] self.mt = self.stats['maxMP'] #if Habitica.debug: utils.showInfo(self.name) if ah.settings.keep_log: ah.log.debug(self.name) if ah.settings.keep_log: ah.log.debug("End function returning: %s" % True) return True def score_anki_points(self, habit): if ah.settings.keep_log: ah.log.debug("Begin function") try: habitID = self.habit_id[habit] out = self.api.perform_task(habitID, "up") if ah.settings.keep_log: ah.log.debug("End function returning: %s" % out) return out except: if ah.settings.keep_log: ah.log.error("End function returning: %s" % False) return False def update_anki_habit(self, habit): if ah.settings.keep_log: ah.log.debug("Begin function") try: habitID = self.habit_id[habit] #out = self.api.alter_task("Anki Points", True, False, None, None, None, "int", None) #if ah.settings.keep_log: ah.log.debug("End function returning: %s" % out) #return out data = {'up': True, 'down': False, 'attribute': 'int'} out = self.api.update_task(habitID, data) if ah.settings.keep_log: ah.log.debug("End function returning: %s" % out) return out except: if ah.settings.keep_log: ah.log.error("End function returning: %s" % False) return False #Check Anki Habit, make new one if it does not exist, and try to # grab the note string. def check_anki_habit(self, habit): if ah.settings.keep_log: ah.log.debug("Begin function") found = False #if Habitica.debug: utils.showInfo("checking %s" % habit) if ah.settings.keep_log: ah.log.debug("checking %s" % habit) if habit not in self.habit_id: try: self.habit_id[habit] = self.api.find_habit_id(habit) habitID = self.habit_id[habit] except: if ah.settings.keep_log: ah.log.error("End function returning: %s" % False) return False else: habitID = self.habit_id[habit] #We have an ID, but we may want to check that the habit still exists #if Habitica.debug: utils.showInfo("HabitID: %s" % habitID) if ah.settings.keep_log: ah.log.debug("HabitID: %s" % habitID) if not habitID: #find_habit_id returned False; habit not found! #if Habitica.debug: utils.showInfo("Habit ID Missing") if ah.settings.keep_log: ah.log.warning("Habit ID Missing") del self.habit_id[habit] self.missing[habit] = True #if Habitica.debug: utils.showInfo("Task not found") if ah.settings.keep_log: ah.log.warning("Task not found") self.create_missing_habit(habit) if ah.settings.keep_log: ah.log.warning("End function returning: %s" % False) return False #Check to see if habit Still exists #if Habitica.debug: utils.showInfo("Checking %s habit" % habit) if ah.settings.keep_log: ah.log.debug("Checking %s habit" % habit) if not found: try: tasks = self.api.tasks() #if Habitica.debug: utils.showInfo(json.dumps(tasks)) if ah.settings.keep_log: ah.log.debug(json.dumps(tasks)) for t in tasks: if str(t['id']) == str(habitID): found = True if found: self.missing[habit] = False del tasks #if Habitica.debug: utils.showInfo("Task found") if ah.settings.keep_log: ah.log.debug("Task found") else: self.missing[habit] = True #if Habitica.debug: utils.showInfo("Task not found") if ah.settings.keep_log: ah.log.warning("Task not found") self.create_missing_habit(habit) del tasks if ah.settings.keep_log: ah.log.warning("End function returning: %s" % False) return False except: pass #Check to see that habitica habit is set up properly #if Habitica.debug: utils.showInfo("Checking habit setup") if ah.settings.keep_log: ah.log.debug("Checking habit setup") try: response = self.api.task(habitID) except: #if Habitica.debug: utils.showInfo("Could not retrieve task") if ah.settings.keep_log: ah.log.error("Could not retrieve task") if ah.settings.keep_log: ah.log.error("End function returning: %s" % False) return False if response['down'] or response['attribute'] != "int": try: #if Habitica.debug: utils.showInfo("Updating Habit") if ah.settings.keep_log: ah.log.debug("Updating Habit") self.update_anki_habit(habitID) if ah.settings.keep_log: ah.log.debug("End function returning: %s" % True) return True except: hrpg_showInfo( "Your %s habit is not configured correctly yet.\nPlease set it to Up only and Mental attribute." % habit) if ah.settings.keep_log: ah.log.error("End function returning: %s" % False) return False #if Habitica.debug: utils.showInfo("Habit looks good") if ah.settings.keep_log: ah.log.debug("Habit looks good") #Grab scorecounter from habit out = self.grab_scorecounter(habit) if ah.settings.keep_log: ah.log.debug("End function returning: %s" % out) return out #Create a missing habits def create_missing_habit(self, habit): if ah.settings.keep_log: ah.log.debug("Begin function") try: #create habit #if Habitica.debug: utils.showInfo("Trying to create %s habit" % habit) if ah.settings.keep_log: ah.log.debug("Trying to create %s habit" % habit) #create task on habitica curtime = intTime() self.hnote[habit] = { 'scoresincedate': curtime, 'scorecount': 0, 'sched': ah.settings.sched_dict[habit] } note = json.dumps(self.hnote[habit]) msg = self.api.create_task('habit', habit, False, note, 'int', 1, True) self.habit_id[habit] = str(msg['_id']) #capture new task ID #if Habitica.debug: utils.showInfo("New habit created: %s" %self.habit_id[habit]) if ah.settings.keep_log: ah.log.debug("New habit created: %s" % self.habit_id[habit]) #if Habitica.debug: utils.showInfo(json.dumps(msg)) if ah.settings.keep_log: ah.log.debug(json.dumps(msg)) #self.reset_scorecounter(habit) self.missing[habit] = False self.habit_grabbed[habit] = True except: if ah.settings.keep_log: ah.log.error("End function returning: %s" % False) return False def reset_scorecounter(self, habit): if ah.settings.keep_log: ah.log.debug("Begin function") #if Habitica.debug: utils.showInfo("Resetting Scorecounter") if ah.settings.keep_log: ah.log.debug("Resetting Scorecounter") last_review_time = db_helper.latest_review_time() #if Habitica.debug: utils.showInfo(str(last_review_time)) if ah.settings.keep_log: ah.log.debug(str(last_review_time)) self.hnote[habit] = { 'scoresincedate': last_review_time, 'scorecount': 0, 'sched': ah.settings.sched_dict[habit] } self.habit_grabbed[habit] = True #if Habitica.debug: utils.showInfo("reset: %s" % json.dumps(self.hnote[habit])) if ah.settings.keep_log: ah.log.debug("reset: %s" % json.dumps(self.hnote[habit])) try: self.post_scorecounter(habit) if ah.settings.keep_log: ah.log.debug("End function returning: %s" % True) return True except: if ah.settings.keep_log: ah.log.error("End function returning: %s" % False) return False def grab_scorecounter(self, habit): if ah.settings.keep_log: ah.log.debug("Begin function") if self.habit_grabbed[habit]: if ah.settings.keep_log: ah.log.debug("End function returning: %s" % True) return True try: habitID = str(self.habit_id[habit]) #if Habitica.debug: utils.showInfo("grabbing scorecounter\n%s" % habitID) if ah.settings.keep_log: ah.log.debug("grabbing scorecounter: %s" % habitID) response = self.api.task(habitID) if not habitID: if ah.settings.keep_log: ah.log.error("End function returning: %s" % False) return False #if Habitica.debug: utils.showInfo(response['notes']) if ah.settings.keep_log: ah.log.debug(response['notes']) except: #Check if habit exists if habit not in self.missing: #if Habitica.debug: utils.showInfo("Habit not missing") if ah.settings.keep_log: ah.log.debug("Habit not missing") #Reset scorecount if habit is missing if self.missing[habit]: #if Habitica.debug: utils.showInfo("Habit was missing") if ah.settings.keep_log: ah.log.debug("Habit was missing") self.reset_scorecounter(habit) if ah.settings.keep_log: ah.log.error("End function returning: %s" % False) return False #Try to grab the scorecount and score since date #if Habitica.debug: utils.showInfo("trying to load note string:\n%s" % response['notes']) if ah.settings.keep_log: ah.log.debug("trying to load note string: %s" % response['notes']) try: self.hnote[habit] = json.loads(response['notes']) except: #if Habitica.debug: utils.showInfo("Reset 1") if ah.settings.keep_log: ah.log.warning("Reset 1") self.reset_scorecounter(habit) if ah.settings.keep_log: ah.log.warning("End function returning: %s" % True) return True if 'scoresincedate' not in self.hnote[ habit] or 'scorecount' not in self.hnote[habit]: #reset habit score counter if both keys not found #if Habitica.debug: utils.showInfo("scorecounter missing keys") if ah.settings.keep_log: ah.log.debug("scorecounter missing keys") self.reset_scorecounter(habit) if ah.settings.keep_log: ah.log.warning("End function returning: %s" % False) return False #reset if sched is different from last sched or is missing # this should prevent problems caused by changing the reward schedule if 'sched' not in self.hnote[habit] or (int( self.hnote[habit]['sched']) != int( ah.settings.sched_dict[habit])): self.reset_scorecounter(habit) if ah.settings.keep_log: ah.log.warning("End function returning: %s" % False) return False #if Habitica.debug: utils.showInfo("Habit Grabbed") if ah.settings.keep_log: ah.log.debug("Habit Grabbed") if ah.settings.keep_log: ah.log.debug("Habit note: %s" % self.hnote[habit]) self.habit_grabbed[habit] = True if ah.settings.keep_log: ah.log.debug("End function returning: %s" % True) return True def post_scorecounter(self, habit): if ah.settings.keep_log: ah.log.debug("Begin function") try: habitID = self.habit_id[habit] #if Habitica.debug: utils.showInfo("posting scorecounter") if ah.settings.keep_log: ah.log.debug("posting scorecounter: %s" % self.hnote[habit]) datastring = json.dumps(self.hnote[habit]) #self.hrpg_showInfo(datastring) data = {"notes": datastring} self.api.update_task(habitID, data) if ah.settings.keep_log: ah.log.debug("End function returning: %s" % True) return True except: if ah.settings.keep_log: ah.log.error("End function returning: %s" % False) return False def test_internet(self): if ah.settings.keep_log: ah.log.debug("Begin function") #self.hrpg_tooltip("Testing Internet Connection") out = self.api.test_internet() if ah.settings.keep_log: ah.log.debug("End function returning: %s" % out) return out def make_score_message(self, new_lvl, new_xp, new_mp, new_gp, new_hp, streak_bonus=0, crit_multiplier=0, drop_dialog=None): if ah.settings.keep_log: ah.log.debug("Begin function") hrpgresponse = "Huzzah! You've Earned Points!\nWell Done %s!\n" % ( self.name) #Check for increases and add to message if new_lvl > self.lvl: diff = int(new_lvl) - int(self.lvl) hrpgresponse += "\nYOU LEVELED UP! NEW LEVEL: %s" % (new_lvl) self.save_avatar() #save the new avatar! hrpgresponse += "\nHP: %s" % (int(self.hp)) if new_hp > self.hp: diff = int(new_hp) - int(self.hp) hrpgresponse += " +%s!" % (diff) hrpgresponse += "\nXP: %s" % (int(self.xp)) if new_xp > self.xp: diff = int(new_xp) - int(self.xp) hrpgresponse += " +%s!" % (diff) hrpgresponse += "\nGP: %s" % (round(self.gp, 2)) if new_gp > self.gp: diff = int(new_gp) - int(self.gp) hrpgresponse += " +%s!" % (diff) hrpgresponse += "\nMP: %s" % (int(self.mp)) if new_mp > self.mp: diff = int(new_mp) - int(self.mp) hrpgresponse += " +%s!" % (diff) #Check for drops, streaks, and critical hits if crit_multiplier: hrpgresponse += "\nCritical Hit! Bonus: +%s%%" % crit_multiplier if streak_bonus: hrpgresponse += "\nStreak Bonus! +%s" % (int(streak_bonus)) if drop_dialog: hrpgresponse += "\n\n%s" % str(drop_dialog) #Show message box if ah.settings.show_popup: self.hrpg_showInfo(hrpgresponse) else: # self.hrpg_tooltip("Huzzah! You Scored Points!") tooltip(_("Huzzah! You Scored Points!"), period=2500) #update levels if new_lvl > self.lvl and self.lvl > 0: self.update_stats(False) else: self.lvl = new_lvl self.xp = new_xp self.mp = new_mp self.gp = new_gp self.hp = new_hp runHook("HabiticaAfterScore") if ah.settings.keep_log: ah.log.debug("End function returning: %s" % True) return True def earn_points(self, habit): if ah.settings.keep_log: ah.log.debug("Begin function") #get user stats if we don't have them if 'lvl' not in self.stats: #if Habitica.debug: utils.showInfo("lvl not in stats") if ah.settings.keep_log: ah.log.warning("lvl not in stats") self.update_stats(False) #check habit if is is unchecked if not self.habit_grabbed[habit]: #if Habitica.debug: utils.showInfo("%s habit not checked" % habit) if ah.settings.keep_log: ah.log.debug("%s habit not checked" % habit) try: #if Habitica.debug: utils.showInfo("Checking Habit Score Counter") if ah.settings.keep_log: ah.log.debug("Checking Habit Score Counter") self.check_anki_habit(habit) except: pass crit_multiplier = None streak_bonus = None drop_dialog = None #Loop through scoring attempts up to 3 times #-- to account for missed scoring opportunities (smartphones, etc.) i = 0 #loop counter success = False while i < 3 and ah.config[ah.settings.profile][ 'score'] >= ah.settings.sched and ah.settings.internet: try: msg = self.score_anki_points(habit) if msg['lvl']: # Make sure we really got a response success = True self.hnote[habit]['scorecount'] += 1 ah.config[ ah.settings.profile]['score'] -= ah.settings.sched #Collect message strings if msg['_tmp']: if 'streakBonus' in msg['_tmp']: #streak bonuses if not streak_bonus: streak_bonus = "" else: streak_bonus += "\n" streak_bonus += str( round((100 * msg['_tmp']['streakBonus']), 0)) if 'crit' in msg['_tmp']: #critical multiplier if not crit_multiplier: crit_multiplier = "" else: crit_multiplier += ", " crit_multiplier += str( round((100 * msg['_tmp']['crit']), 0)) if 'drop' in msg['_tmp'] and 'dialog' in msg['_tmp'][ 'drop']: #drop happened if not drop_dialog: drop_dialog = "" else: drop_dialog += "\n" #drop_text = msg['_tmp']['drop']['text'] #drop_type = msg['_tmp']['drop']['type'] drop_dialog += str(msg['_tmp']['drop']['dialog']) except: pass i += 1 if not success: #exit if we failed all 3 times self.hrpg_showInfo( "Huzzah! You've earned points!\nWell done %s!\n\nSorry,\nI couldn't score your %s habit on Habitica.\nDon't worry, I'll remember your points and try again later." % (self.name, habit)) ah.settings.internet = False #internet failed if ah.settings.keep_log: ah.log.warning('Internet failed') if ah.settings.keep_log: ah.log.warning("End function returning: %s" % False) return False #Post scorecounter to Habit note field if Habitica.allow_post_scorecounter_thread: thread.start_new_thread(self.post_scorecounter, (habit, )) else: self.post_scorecounter(habit) #Gather new levels from last successful msg new_lvl = msg['lvl'] new_xp = msg['exp'] new_mp = msg['mp'] new_gp = msg['gp'] new_hp = msg['hp'] #MOVED: These data collection functions now part of above while loop #if msg['_tmp']: #if 'streakBonus' in msg['_tmp']: #streak #streak_bonus = str(round((100 * msg['_tmp']['streakBonus']), 0)) #if 'crit' in msg['_tmp']: #critical #crit_multiplier = str(round((100 * msg['_tmp']['crit']), 0)) #if 'drop' in msg['_tmp'] and 'dialog' in msg['_tmp']['drop']: #drop happened #drop_text = msg['_tmp']['drop']['text'] #drop_type = msg['_tmp']['drop']['type'] #drop_dialog = msg['_tmp']['drop']['dialog'] #Update habit if it was just created #DEPRICATED: Habits no longer created automatically for us in API v3 #if habit in self.missing and self.missing[habit]: #if self.check_anki_habit(habit): #self.missing[habit] = False out = self.make_score_message(new_lvl, new_xp, new_mp, new_gp, new_hp, streak_bonus, crit_multiplier, drop_dialog) if ah.settings.keep_log: ah.log.debug("End function returning: %s" % out) return out #Compact Habitica Stats for Progress Bar def compact_habitica_stats(self): if ah.settings.keep_log: ah.log.debug("Begin function") if self.ht and self.xt and self.mt: health = int(100 * self.hp / self.ht) experience = int(100 * self.xp / self.xt) mana = int(100 * self.mp / self.mt) string = "<font color='firebrick'>%s</font> | <font color='darkorange'>%s</font> | <font color='darkblue'>%s</font>" % ( health, experience, mana) else: string = False if ah.settings.keep_log: ah.log.debug("End function returning: %s" % string) return string #Silent Version of Earn Points def silent_earn_points(self, habit): if ah.settings.keep_log: ah.log.debug("Begin function") #check habit if is is unchecked if not self.habit_grabbed[habit]: try: self.check_anki_habit(habit) self.grab_scorecounter(habit) except: pass try: self.score_anki_points(habit) self.hnote[habit]['scorecount'] += 1 except: if ah.settings.keep_log: ah.log.error("End function returning: %s" % False) return False if ah.settings.keep_log: ah.log.debug("End function returning: %s" % True) return True
def _setID(self, m: Dict[str, Any]) -> None: while 1: id = str(intTime(1000)) if id not in self.models: break m['id'] = id
def _lrnForDeck(self, did): return self.col.db.scalar( """ select count() from (select 1 from cards where did = ? and queue = 1 and due < ? limit ?)""", did, intTime() + self.col.conf['collapseTime'], self.reportLimit)
def save(self, g=None): "Can be called with either a deck or a deck configuration." if g: g['mod'] = intTime() g['usn'] = self.col.usn() self.changed = True
def fix(row): nids.append(row[0]) return {'id': row[0], 't': fn(tags, row[1]), 'n':intTime(), 'u':self.col.usn()}
def updateNotes(allDb): t_0, now, db = time.time(), intTime(), mw.col.db TAG = mw.col.tags # type: TagManager ds, nid2mmi = [], {} N_notes = db.scalar('select count() from notes') mw.progress.start(label='Updating data', max=N_notes, immediate=True) fidDb = allDb.fidDb(recalc=True) loc_db = allDb.locDb(recalc=False) # type: Dict[Location, Set[Morpheme]] # read tag names compTag, vocabTag, freshTag, notReadyTag, alreadyKnownTag, priorityTag, tooShortTag, tooLongTag, frequencyTag = tagNames = cfg( 'Tag_Comprehension'), cfg('Tag_Vocab'), cfg('Tag_Fresh'), cfg('Tag_NotReady'), cfg( 'Tag_AlreadyKnown'), cfg('Tag_Priority'), cfg('Tag_TooShort'), cfg('Tag_TooLong'), cfg('Tag_Frequency') TAG.register(tagNames) badLengthTag = cfg('Tag_BadLength') # handle secondary databases mw.progress.update(label='Creating seen/known/mature from all.db') seenDb = filterDbByMat(allDb, cfg('threshold_seen')) knownDb = filterDbByMat(allDb, cfg('threshold_known')) matureDb = filterDbByMat(allDb, cfg('threshold_mature')) mw.progress.update(label='Loading priority.db') priorityDb = MorphDb(cfg('path_priority'), ignoreErrors=True).db mw.progress.update(label='Loading frequency.txt') frequencyListPath = cfg('path_frequency') try: with codecs.open(frequencyListPath, encoding='utf-8') as f: frequency_list = [line.strip().split('\t')[0] for line in f.readlines()] except FileNotFoundError: frequency_list = [] frequencyListLength = len(frequency_list) if cfg('saveDbs'): mw.progress.update(label='Saving seen/known/mature dbs') seenDb.save(cfg('path_seen')) knownDb.save(cfg('path_known')) matureDb.save(cfg('path_mature')) mw.progress.update(label='Updating notes') # prefetch cfg for fields field_focus_morph = cfg('Field_FocusMorph') field_unknown_count = cfg('Field_UnknownMorphCount') field_unmature_count = cfg('Field_UnmatureMorphCount') field_morph_man_index = cfg('Field_MorphManIndex') field_unknowns = cfg('Field_Unknowns') field_unmatures = cfg('Field_Unmatures') field_unknown_freq = cfg('Field_UnknownFreq') field_focus_morph_pos = cfg("Field_FocusMorphPos") for i, (nid, mid, flds, guid, tags) in enumerate(db.execute('select id, mid, flds, guid, tags from notes')): ts = TAG.split(tags) if i % 500 == 0: mw.progress.update(value=i) C = partial(cfg, model_id=mid) notecfg = getFilterByMidAndTags(mid, ts) if notecfg is None or not notecfg['Modify']: continue # Get all morphemes for note morphemes = set() for fieldName in notecfg['Fields']: try: loc = fidDb[(nid, guid, fieldName)] morphemes.update(loc_db[loc]) except KeyError: continue proper_nouns_known = cfg('Option_ProperNounsAlreadyKnown') # Determine un-seen/known/mature and i+N unseens, unknowns, unmatures, new_knowns = set(), set(), set(), set() for morpheme in morphemes: if proper_nouns_known and morpheme.isProperNoun(): continue if not seenDb.matches(morpheme): unseens.add(morpheme) if not knownDb.matches(morpheme): unknowns.add(morpheme) if not matureDb.matches(morpheme): unmatures.add(morpheme) if knownDb.matches(morpheme): new_knowns.add(morpheme) # Determine MMI - Morph Man Index N, N_s, N_k, N_m = len(morphemes), len( unseens), len(unknowns), len(unmatures) # Bail early for lite update if N_k > 2 and C('only update k+2 and below'): continue # add bonus for morphs in priority.db and frequency.txt frequencyBonus = C('frequency.txt bonus') isPriority = False isFrequency = False focusMorph = None F_k = 0 usefulness = 0 for focusMorph in unknowns: F_k += allDb.frequency(focusMorph) if focusMorph in priorityDb: isPriority = True usefulness += C('priority.db weight') focusMorphString = focusMorph.base try: focusMorphIndex = frequency_list.index(focusMorphString) isFrequency = True # The bigger this number, the lower mmi becomes usefulness += int(round( frequencyBonus * (1 - focusMorphIndex / frequencyListLength) )) except ValueError: pass # average frequency of unknowns (ie. how common the word is within your collection) F_k_avg = F_k // N_k if N_k > 0 else F_k usefulness += F_k_avg # add bonus for studying recent learned knowns (reinforce) for morpheme in new_knowns: locs = knownDb.getMatchingLocs(morpheme) if locs: ivl = min(1, max(loc.maturity for loc in locs)) # TODO: maybe average this so it doesnt favor long sentences usefulness += C('reinforce new vocab weight') // ivl if any(morpheme.pos == '動詞' for morpheme in unknowns): # FIXME: this isn't working??? usefulness += C('verb bonus') usefulness = 99999 - min(99999, usefulness) # difference from optimal length range (too little context vs long sentence) lenDiffRaw = min(N - C('min good sentence length'), max(0, N - C('max good sentence length'))) lenDiff = min(9, abs(lenDiffRaw)) # calculate mmi mmi = 100000 * N_k + 1000 * lenDiff + int(round(usefulness)) if C('set due based on mmi'): nid2mmi[nid] = mmi # Fill in various fields/tags on the note based on cfg fs = splitFields(flds) # clear any 'special' tags, the appropriate will be set in the next few lines ts = [t for t in ts if t not in ( notReadyTag, compTag, vocabTag, freshTag)] # determine card type if N_m == 0: # sentence comprehension card, m+0 ts.append(compTag) elif N_k == 1: # new vocab card, k+1 ts.append(vocabTag) setField(mid, fs, field_focus_morph, focusMorph.base) setField(mid, fs, field_focus_morph_pos, focusMorph.pos) elif N_k > 1: # M+1+ and K+2+ ts.append(notReadyTag) elif N_m == 1: # we have k+0, and m+1, so this card does not introduce a new vocabulary -> card for newly learned morpheme ts.append(freshTag) focusMorph = next(iter(unmatures)) setField(mid, fs, field_focus_morph, focusMorph.base) setField(mid, fs, field_focus_morph_pos, focusMorph.pos) else: # only case left: we have k+0, but m+2 or higher, so this card does not introduce a new vocabulary -> card for newly learned morpheme ts.append(freshTag) # set type agnostic fields setField(mid, fs, field_unknown_count, '%d' % N_k) setField(mid, fs, field_unmature_count, '%d' % N_m) setField(mid, fs, field_morph_man_index, '%d' % mmi) setField(mid, fs, field_unknowns, ', '.join(u.base for u in unknowns)) setField(mid, fs, field_unmatures, ', '.join(u.base for u in unmatures)) setField(mid, fs, field_unknown_freq, '%d' % F_k_avg) # remove deprecated tag if badLengthTag is not None and badLengthTag in ts: ts.remove(badLengthTag) # other tags if priorityTag in ts: ts.remove(priorityTag) if isPriority: ts.append(priorityTag) if frequencyTag in ts: ts.remove(frequencyTag) if isFrequency: ts.append(frequencyTag) if tooShortTag in ts: ts.remove(tooShortTag) if lenDiffRaw < 0: ts.append(tooShortTag) if tooLongTag in ts: ts.remove(tooLongTag) if lenDiffRaw > 0: ts.append(tooLongTag) # remove unnecessary tags if not cfg('Option_SetNotRequiredTags'): unnecessary = [priorityTag, tooShortTag, tooLongTag] ts = [tag for tag in ts if tag not in unnecessary] # update sql db tags_ = TAG.join(TAG.canonify(ts)) flds_ = joinFields(fs) if flds != flds_ or tags != tags_: # only update notes that have changed csum = fieldChecksum(fs[0]) sfld = stripHTML(fs[getSortFieldIndex(mid)]) ds.append( {'now': now, 'tags': tags_, 'flds': flds_, 'sfld': sfld, 'csum': csum, 'usn': mw.col.usn(), 'nid': nid}) mw.progress.update(label='Updating anki database...') mw.col.db.executemany( 'update notes set tags=:tags, flds=:flds, sfld=:sfld, csum=:csum, mod=:now, usn=:usn where id=:nid', ds) # Now reorder new cards based on MMI mw.progress.update(label='Updating new card ordering...') ds = [] # "type = 0": new cards # "type = 1": learning cards [is supposed to be learning: in my case no learning card had this type] # "type = 2": review cards for (cid, nid, due) in db.execute('select id, nid, due from cards where type = 0'): if nid in nid2mmi: # owise it was disabled due_ = nid2mmi[nid] if due != due_: # only update cards that have changed ds.append({'now': now, 'due': due_, 'usn': mw.col.usn(), 'cid': cid}) mw.col.db.executemany( 'update cards set due=:due, mod=:now, usn=:usn where id=:cid', ds) mw.reset() printf('Updated notes in %f sec' % (time.time() - t_0)) mw.progress.finish() return knownDb
def setDeck(self, cids, did): self.col.db.execute( "update cards set did=?,usn=?,mod=? where id in " + ids2str(cids), did, self.col.usn(), intTime())