def addItemToCards(self,item): "This method actually do conversion" # new anki card note = ForeignNote() # clean Q and A note.fields.append(self._fudgeText(self._decode_htmlescapes(item.Question))) note.fields.append(self._fudgeText(self._decode_htmlescapes(item.Answer))) note.tags = [] # pre-process scheduling data # convert learning data if (not self.META.resetLearningData and int(item.Interval) >= 1 and getattr(item, "LastRepetition", None)): # migration of LearningData algorithm tLastrep = time.mktime(time.strptime(item.LastRepetition, '%d.%m.%Y')) tToday = time.time() card = ForeignCard() card.ivl = int(item.Interval) card.lapses = int(item.Lapses) card.reps = int(item.Repetitions) + int(item.Lapses) nextDue = tLastrep + (float(item.Interval) * 86400.0) remDays = int((nextDue - time.time())/86400) card.due = self.col.sched.today+remDays card.factor = int(self._afactor2efactor(float(item.AFactor.replace(',','.')))*1000) note.cards[0] = card # categories & tags # it's worth to have every theme (tree structure of sm collection) stored in tags, but sometimes not # you can deceide if you are going to tag all toppics or just that containing some pattern tTaggTitle = False for pattern in self.META.pathsToBeTagged: if item.lTitle is not None and pattern.lower() in " ".join(item.lTitle).lower(): tTaggTitle = True break if tTaggTitle or self.META.tagAllTopics: # normalize - remove diacritic punctuation from unicode chars to ascii item.lTitle = [ self._unicode2ascii(topic) for topic in item.lTitle] # Transfrom xyz / aaa / bbb / ccc on Title path to Tag xyzAaaBbbCcc # clean things like [999] or [111-2222] from title path, example: xyz / [1000-1200] zyx / xyz # clean whitespaces # set Capital letters for first char of the word tmp = list(set([ re.sub('(\[[0-9]+\])' , ' ' , i ).replace('_',' ') for i in item.lTitle ])) tmp = list(set([ re.sub('(\W)',' ', i ) for i in tmp ])) tmp = list(set([ re.sub( '^[0-9 ]+$','',i) for i in tmp ])) tmp = list(set([ capwords(i).replace(' ','') for i in tmp ])) tags = [ j[0].lower() + j[1:] for j in tmp if j.strip() != ''] note.tags += tags if self.META.tagMemorizedItems and int(item.Interval) >0: note.tags.append("Memorized") self.logger('Element tags\t- ' + repr(note.tags), level=3) self.notes.append(note)
def addItemToCards(self,item): "This method actually do conversion" # new anki card note = ForeignNote() # clean Q and A note.fields.append(self._fudgeText(self._decode_htmlescapes(item.Question))) note.fields.append(self._fudgeText(self._decode_htmlescapes(item.Answer))) note.tags = [] # pre-process scheduling data # convert learning data if (not self.META.resetLearningData and int(item.Interval) >= 1 and getattr(item, "LastRepetition", None)): # migration of LearningData algorithm tLastrep = time.mktime(time.strptime(item.LastRepetition, '%d.%m.%Y')) tToday = time.time() card = ForeignCard() card.ivl = int(item.Interval) card.lapses = int(item.Lapses) card.reps = int(item.Repetitions) + int(item.Lapses) nextDue = tLastrep + (float(item.Interval) * 86400.0) remDays = int((nextDue - time.time())/86400) card.due = self.col.sched.today+remDays card.factor = int(self._afactor2efactor(float(item.AFactor.replace(',','.')))*1000) note.cards[0] = card # categories & tags # it's worth to have every theme (tree structure of sm collection) stored in tags, but sometimes not # you can deceide if you are going to tag all toppics or just that containing some pattern tTaggTitle = False for pattern in self.META.pathsToBeTagged: if item.lTitle != None and pattern.lower() in " ".join(item.lTitle).lower(): tTaggTitle = True break if tTaggTitle or self.META.tagAllTopics: # normalize - remove diacritic punctuation from unicode chars to ascii item.lTitle = [ self._unicode2ascii(topic) for topic in item.lTitle] # Transfrom xyz / aaa / bbb / ccc on Title path to Tag xyzAaaBbbCcc # clean things like [999] or [111-2222] from title path, example: xyz / [1000-1200] zyx / xyz # clean whitespaces # set Capital letters for first char of the word tmp = list(set([ re.sub('(\[[0-9]+\])' , ' ' , i ).replace('_',' ') for i in item.lTitle ])) tmp = list(set([ re.sub('(\W)',' ', i ) for i in tmp ])) tmp = list(set([ re.sub( '^[0-9 ]+$','',i) for i in tmp ])) tmp = list(set([ capwords(i).replace(' ','') for i in tmp ])) tags = [ j[0].lower() + j[1:] for j in tmp if j.strip() != ''] note.tags += tags if self.META.tagMemorizedItems and int(item.Interval) >0: note.tags.append("Memorized") self.logger('Element tags\t- ' + repr(note.tags), level=3) self.notes.append(note)
def _learnedCard(self, batch, timestamp): ivl = math.exp(batch) now = time.time() due = ivl - (now - timestamp / 1000.0) / ONE_DAY fc = ForeignCard() fc.due = self.col.sched.today + int(due + 0.5) fc.ivl = random.randint(int(ivl * 0.90), int(ivl + 0.5)) fc.factor = random.randint(1500, 2500) return fc
def run(self): db = DB(self.file) ver = db.scalar( "select value from global_variables where key='version'") if not ver.startswith("Mnemosyne SQL 1") and ver not in ("2", "3"): self.log.append(_("File version unknown, trying import anyway.")) # gather facts into temp objects curid = None notes = {} note = None for _id, id, k, v in db.execute(""" select _id, id, key, value from facts f, data_for_fact d where f._id=d._fact_id"""): if id != curid: if note: # pylint: disable=unsubscriptable-object notes[note["_id"]] = note note = {"_id": _id} curid = id assert note note[k] = v if note: notes[note["_id"]] = note # gather cards front = [] frontback = [] vocabulary = [] cloze = {} for row in db.execute(""" select _fact_id, fact_view_id, tags, next_rep, last_rep, easiness, acq_reps+ret_reps, lapses, card_type_id from cards"""): # categorize note note = notes[row[0]] if row[1].endswith(".1"): if row[1].startswith("1.") or row[1].startswith("1::"): front.append(note) elif row[1].startswith("2.") or row[1].startswith("2::"): frontback.append(note) elif row[1].startswith("3.") or row[1].startswith("3::"): vocabulary.append(note) elif row[1].startswith("5.1"): cloze[row[0]] = note # check for None to fix issue where import can error out rawTags = row[2] if rawTags is None: rawTags = "" # merge tags into note tags = rawTags.replace(", ", "\x1f").replace(" ", "_") tags = tags.replace("\x1f", " ") if "tags" not in note: note["tags"] = [] note["tags"] += self.col.tags.split(tags) note["tags"] = self.col.tags.canonify(note["tags"]) # if it's a new card we can go with the defaults if row[3] == -1: continue # add the card c = ForeignCard() c.factor = int(row[5] * 1000) c.reps = row[6] c.lapses = row[7] # ivl is inferred in mnemosyne next, prev = row[3:5] c.ivl = max(1, (next - prev) // 86400) # work out how long we've got left rem = int((next - time.time()) / 86400) c.due = self.col.sched.today + rem # get ord m = re.search(r".(\d+)$", row[1]) assert m ord = int(m.group(1)) - 1 if "cards" not in note: note["cards"] = {} note["cards"][ord] = c self._addFronts(front) total = self.total self._addFrontBacks(frontback) total += self.total self._addVocabulary(vocabulary) self.total += total self._addCloze(cloze) self.total += total self.log.append( ngettext("%d note imported.", "%d notes imported.", self.total) % self.total)
def run(self): db = DB(self.file) ver = db.scalar( "select value from global_variables where key='version'") assert ver.startswith('Mnemosyne SQL 1') or ver == "2" # gather facts into temp objects curid = None notes = {} note = None for _id, id, k, v in db.execute(""" select _id, id, key, value from facts f, data_for_fact d where f._id=d._fact_id"""): if id != curid: if note: notes[note['_id']] = note note = {'_id': _id} curid = id note[k] = v if note: notes[note['_id']] = note # gather cards front = [] frontback = [] vocabulary = [] cloze = {} for row in db.execute(""" select _fact_id, fact_view_id, tags, next_rep, last_rep, easiness, acq_reps+ret_reps, lapses, card_type_id from cards"""): # categorize note note = notes[row[0]] if row[1].endswith(".1"): if row[1].startswith("1.") or row[1].startswith("1::"): front.append(note) elif row[1].startswith("2.") or row[1].startswith("2::"): frontback.append(note) elif row[1].startswith("3.") or row[1].startswith("3::"): vocabulary.append(note) elif row[1].startswith("5.1"): cloze[row[0]] = note # check for None to fix issue where import can error out rawTags = row[2]; if rawTags is None: rawTags = "" # merge tags into note tags = rawTags.replace(", ", "\x1f").replace(" ", "_") tags = tags.replace("\x1f", " ") if "tags" not in note: note['tags'] = [] note['tags'] += self.col.tags.split(tags) note['tags'] = self.col.tags.canonify(note['tags']) # if it's a new card we can go with the defaults if row[3] == -1: continue # add the card c = ForeignCard() c.factor = int(row[5]*1000) c.reps = row[6] c.lapses = row[7] # ivl is inferred in mnemosyne next, prev = row[3:5] c.ivl = max(1, (next - prev)/86400) # work out how long we've got left rem = int((next - time.time())/86400) c.due = self.col.sched.today+rem # get ord m = re.search(".(\d+)$", row[1]) ord = int(m.group(1))-1 if 'cards' not in note: note['cards'] = {} note['cards'][ord] = c self._addFronts(front) total = self.total self._addFrontBacks(frontback) total += self.total self._addVocabulary(vocabulary) self.total += total self._addCloze(cloze) self.total += total self.log.append(ngettext("%d note imported.", "%d notes imported.", self.total) % self.total)
def run(self): db = DB(self.file) ver = db.scalar( "select value from global_variables where key='version'") assert ver.startswith('Mnemosyne SQL 1') # gather facts into temp objects curid = None notes = {} note = None for _id, id, k, v in db.execute(""" select _id, id, key, value from facts f, data_for_fact d where f._id=d._fact_id"""): if id != curid: if note: notes[note['_id']] = note note = {'_id': _id} curid = id note[k] = v if note: notes[note['_id']] = note # gather cards front = [] frontback = [] vocabulary = [] for row in db.execute(""" select _fact_id, fact_view_id, tags, next_rep, last_rep, easiness, acq_reps+ret_reps, lapses from cards"""): # categorize note note = notes[row[0]] if row[1] == "1.1": front.append(note) elif row[1] == "2.1": frontback.append(note) elif row[1] == "3.1": vocabulary.append(note) # merge tags into note tags = row[2].replace(", ", "\x1f").replace(" ", "_") tags = tags.replace("\x1f", " ") if "tags" not in note: note['tags'] = [] note['tags'] += self.col.tags.split(tags) note['tags'] = self.col.tags.canonify(note['tags']) # if it's a new card we can go with the defaults if row[3] == -1: continue # add the card c = ForeignCard() c.factor = int(row[5]*1000) c.reps = row[6] c.lapses = row[7] # ivl is inferred in mnemosyne next, prev = row[3:5] c.ivl = max(1, (next - prev)/86400) # work out how long we've got left rem = int((next - time.time())/86400) c.due = self.col.sched.today+rem # get ord m = re.match("\d+\.(\d+)", row[1]) ord = int(m.group(1))-1 if 'cards' not in note: note['cards'] = {} note['cards'][ord] = c self._addFronts(front) total = self.total self._addFrontBacks(frontback) total += self.total self._addVocabulary(vocabulary) self.total += total self.log.append(_("%d notes imported.") % self.total)