def run(self): # Always use the basic/reversed model, regardless of the current model. mm = self.col.models basicReversedModel = mm.byName("Basic (and reversed card)") self.model = basicReversedModel self.initMapping() TextImporter.run(self)
def __init__(self, *args): TextImporter.__init__(self, *args) self.log = [] self.lines = None self.fileobj = None self.delimiter = "\t" self.tagsToAdd = ["~import::FCD"] # specific to FlashcardsDeluxeImporter self.cardStats = {} self.startedAt = datetime.now() self.newNoteIds = [] self.clozeNoteIds = [] self.newTags = set([]) # set up renaming tags map self.renameTags = {} renameTagsFilename = os.path.join(self.col.media.dir(), RENAME_TAGS_FILENAME) if os.path.exists(renameTagsFilename): with open(renameTagsFilename, "rb") as renameTagsFile: reader = csv.reader(renameTagsFile, delimiter="\t", doublequote=True) for row in reader: self.renameTags[row[0]] = row[1]
def importDeckFromCSV(self, filename, name): #file = r"C:\Users\aarun\OneDrive\Documents\Anki\addons\import.txt" # select deck did = mw.col.decks.id(name) #model = self.addNewModel() mw.col.decks.select(did) # set note type for deck model = mw.col.models.byName("Basic") deck = mw.col.decks.get(did) deck['mid'] = model['id'] mw.col.decks.save(deck) # Assign new deck to model mw.col.models.setCurrent(model) model['did'] = did mw.col.models.save(model) # import into the collection ti = TextImporter(mw.col, filename) ti.initMapping() ti.run() mw.col.reset() mw.reset()
def setupImporter(self, temp_file_path): self.importer = TextImporter(self.mw.col, str(temp_file_path)) self.importer.initMapping() self.importer.allowHTML = True self.importer.importMode = self.frm.importMode.currentIndex() self.mw.pm.profile['importMode'] = self.importer.importMode self.importer.delimiter = ';'
def importNotes(self, notes): TextImporter.importNotes(self, notes) # Update any cloze cards. mm = self.col.models basicReversedModel = mm.byName("Basic (and reversed card)") clozeModel = mm.byName("Cloze") fmap = {0: 0, 1: 2, 2: 1, 3: 3} cmap = {0: None, 1: 0} mm.change(basicReversedModel, self.clozeNoteIds, clozeModel, fmap, cmap)
def vocabulous(): words_list, words_dict = get_entries() wordswritten = write_definitions("vocabulous.csv", words_list, words_dict) file = "vocabulous.csv" # select deck did = mw.col.decks.id("Default") mw.col.decks.select(did) # set note type for deck m = mw.col.models.byName("Basic") deck = mw.col.decks.get(did) deck['mid'] = m['id'] mw.col.decks.save(deck) # import into the collection ti = TextImporter(mw.col, file) ti.allowHTML = True ti.importMode = 1 ti.initMapping() ti.run() # get the number of cards in the current collection, which is stored in # the main window cardCount = mw.col.cardCount() # show a message box showInfo("Added %d new words. Now %d words in total." % (wordswritten, cardCount))
def newData(self, n): # [id, guid64(), self.model['id'], # intTime(), self.col.usn(), self.col.tags.join(n.tags), # n.fieldsStr, "", "", 0, ""] superData = TextImporter.newData(self, n) id, guid, mid, time, usn, tags, fieldsStr, a, b, c, d = superData fields = fieldsStr.split("\x1f") if len(fields) >= 4: noteId = fields[0] front = fields[1] back = fields[2] citation = fields[3] else: self.log.append(u"Could not import: {0}".format(fields)) return None self.newNoteIds.append(id) # change to the actual note ID, now that it's assigned if noteId in self.cardStats: self.cardStats[id] = self.cardStats[noteId] del self.cardStats[noteId] if noteId in self.clozeNoteIds: ndx = self.clozeNoteIds.index(noteId) self.clozeNoteIds[ndx] = id fieldsStr = "\x1f".join([str(id), front, back, citation]) return [id, guid, mid, time, usn, tags, fieldsStr, a, b, c, d]
def import_csv(csvFile, lang, book, chapter): try: deck = getEmptyDeck() except pysqlite2.dbapi2.OperationalError as e: print e, "you probably have Anki open while you are running this; run this as a plugin from within Anki or close Anki and run this." sys.exit(1) m = deck.models.byName("Basic") deck.models.setCurrent(m) # set 'Import' as the target deck m['did'] = deck.decks.id(lang + "::" + book + "::" + chapter) deck.models.save(m) i = TextImporter(deck, csvFile) i.initMapping() i.run() deck.save() deck.close()
def updateCards(self): suspendIds = set([]) for nid in self.newNoteIds: note = mw.col.getNote(nid) if not nid in self.cardStats: continue stats = self.cardStats[nid] # Use the same statistics for both directions. for card in note.cards(): self._updateStatistics(card, stats, suspendIds) card.flush() self._cards.append((nid, card.ord, card)) TextImporter.updateCards(self) mw.col.sched.suspendCards(suspendIds)
def test_tsv_tag_modified(): col = getEmptyCol() mm = col.models m = mm.current() note = mm.newField("Top") mm.addField(m, note) mm.save(m) n = col.newNote() n["Front"] = "1" n["Back"] = "2" n["Top"] = "3" n.add_tag("four") col.addNote(n) # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file with NamedTemporaryFile(mode="w", delete=False) as tf: tf.write("1\tb\tc\n") tf.flush() i = TextImporter(col, tf.name) i.initMapping() i.tagModified = "boom" i.run() clear_tempfile(tf) n.load() assert n["Front"] == "1" assert n["Back"] == "b" assert n["Top"] == "c" assert "four" in n.tags assert "boom" in n.tags assert len(n.tags) == 2 assert i.updateCount == 1 col.close()
def test_tsv_tag_modified(): deck = getEmptyCol() mm = deck.models m = mm.current() f = mm.newField("Top") mm.addField(m, f) mm.save(m) n = deck.newNote() n["Front"] = "1" n["Back"] = "2" n["Top"] = "3" n.addTag("four") deck.addNote(n) with NamedTemporaryFile(mode="w") as tf: tf.write("1\tb\tc\n") tf.flush() i = TextImporter(deck, tf.name) i.initMapping() i.tagModified = "boom" i.run() n.load() assert n["Front"] == "1" assert n["Back"] == "b" assert n["Top"] == "c" assert "four" in n.tags assert "boom" in n.tags assert len(n.tags) == 2 assert i.updateCount == 1 deck.close()
def test_tsv_tag_multiple_tags(): deck = getEmptyCol() mm = deck.models m = mm.current() f = mm.newField("Top") mm.addField(m, f) mm.save(m) n = deck.newNote() n["Front"] = "1" n["Back"] = "2" n["Top"] = "3" n.addTag("four") n.addTag("five") deck.addNote(n) with NamedTemporaryFile(mode="w") as tf: tf.write("1\tb\tc\n") tf.flush() i = TextImporter(deck, tf.name) i.initMapping() i.tagModified = "five six" i.run() n.load() assert n["Front"] == "1" assert n["Back"] == "b" assert n["Top"] == "c" assert list(sorted(n.tags)) == list(sorted(["four", "five", "six"])) deck.close()
def test_csv_tag_only_if_modified(): deck = getEmptyCol() mm = deck.models m = mm.current() f = mm.newField("Left") mm.addField(m, f) mm.save(m) n = deck.newNote() n["Front"] = "1" n["Back"] = "2" n["Left"] = "3" deck.addNote(n) with NamedTemporaryFile(mode="w") as tf: tf.write("1,2,3\n") tf.flush() i = TextImporter(deck, tf.name) i.initMapping() i.tagModified = "right" i.run() n.load() assert n.tags == [] assert i.updateCount == 0 deck.close()
def test_tsv_tag_multiple_tags(): deck = getEmptyCol() mm = deck.models m = mm.current() f = mm.newField("Top") mm.addField(m, f) mm.save(m) n = deck.newNote() n["Front"] = "1" n["Back"] = "2" n["Top"] = "3" n.addTag("four") n.addTag("five") deck.addNote(n) # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file with NamedTemporaryFile(mode="w", delete=False) as tf: tf.write("1\tb\tc\n") tf.flush() i = TextImporter(deck, tf.name) i.initMapping() i.tagModified = "five six" i.run() clear_tempfile(tf) n.load() assert n["Front"] == "1" assert n["Back"] == "b" assert n["Top"] == "c" assert list(sorted(n.tags)) == list(sorted(["four", "five", "six"])) deck.close()
def test_csv_tag_only_if_modified(): deck = getEmptyCol() mm = deck.models m = mm.current() f = mm.newField("Left") mm.addField(m, f) mm.save(m) n = deck.newNote() n["Front"] = "1" n["Back"] = "2" n["Left"] = "3" deck.addNote(n) # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file with NamedTemporaryFile(mode="w", delete=False) as tf: tf.write("1,2,3\n") tf.flush() i = TextImporter(deck, tf.name) i.initMapping() i.tagModified = "right" i.run() clear_tempfile(tf) n.load() assert n.tags == [] assert i.updateCount == 0 deck.close()
def __init__(self, file): super(DeckChooser, self).__init__(mw) self.setupUi(self) self.importer = TextImporter(mw.col, file) self.importer.delimiter = "\t" self.importer.importMode = 0 self.importer.allowHTML = True self.setup_decks() self.exec_()
def test_csv(): deck = getEmptyDeck() file = unicode(os.path.join(testDir, "support/text-2fields.txt")) i = TextImporter(deck, file) i.initMapping() i.run() # four problems - too many & too few fields, a missing front, and a # duplicate entry assert len(i.log) == 5 assert i.total == 5 # if we run the import again, it should update instead i.run() assert len(i.log) == 5 assert i.total == 5 # if updating is disabled, count will be 0 i.update = False i.run() assert i.total == 0 deck.close()
def ImportToAnki(model_name, import_to_deck, *args, **kwargs): # get file file = kwargs.get("file", None) if not file: file = getFile(mw, _("Import"), None, key="import", filter=Importers[0][0]) if not file: return file = str(file) # check default model try: model = mw.col.models.byName(model_name) if not model: raise Exception("没有找到【{}】".format(model_name)) except: importFile(mw, settings.deck_template_file) try: model = mw.col.models.byName(model_name) except: model = None importer = TextImporter(mw.col, file) importer.delimiter = "\t" importer.importMode = 0 importer.allowHTML = True importer.model = model did = mw.col.decks.id(import_to_deck) mw.col.conf['curDeck'] = did importer.model['did'] = did mw.col.decks.select(did) importer.mapping = [kwargs.get("first")] importer.run() mw.reset() txt = _("Importing complete.") + "\n" if importer.log: txt += "\n".join(importer.log) showText(txt)
def add_dictionary_to_anki(collection_path, deck_name = 'Import'): # See: # http://ankisrs.net/docs/addons.html#the-collection dictionary = unicode(os.path.abspath('data/dictionary.txt')) col = Collection(collection_path) # Change to the basic note type m = col.models.byName('Basic') col.models.setCurrent(m) # Set 'Import' as the target deck m['did'] = col.decks.id(deck_name) col.models.save(m) # Import into the collection ti = TextImporter(col, dictionary) ti.allowHTML = True ti.initMapping() ti.run() col.close() print("Imported dictionary into collection.") return 0
def test_csv2(): deck = getEmptyDeck() mm = deck.models m = mm.current() f = mm.newField("Three") mm.addField(m, f) mm.save(m) n = deck.newNote() n['Front'] = "1" n['Back'] = "2" n['Three'] = "3" deck.addNote(n) # an update with unmapped fields should not clobber those fields file = unicode(os.path.join(testDir, "support/text-update.txt")) i = TextImporter(deck, file) i.initMapping() i.run() n.load() assert n['Front'] == "1" assert n['Back'] == "x" assert n['Three'] == "3" deck.close()
def drv(col): cwd = os.getcwd() flist = glob.glob('????.txt') for inf in flist: textf = 'economist.words.' + inf deckname = textf.rstrip('.txt') textf = os.path.join(cwd, textf) print(textf) deckid = col.decks.id(deckname) ti = TextImporter(col, textf) ti.model['did'] = deckid col.decks.select(deckid) ti.delimiter = '\t' ti.initMapping() ti.run()
def importCourse(self): courseUrl = self.courseUrlLineEdit.text() # make sure the url given actually looks like a course home url if re.match('http://www.memrise.com/course/\d+/.+/', courseUrl) == None: self.courseUrlLineEdit.setText("Import failed. Does your URL look like the sample URL above?") return courseTitle, levelTitles = self.getCourseInfo(courseUrl) levelCount = len(levelTitles) # build list of urls for each level in the course levelUrls = map(lambda levelNum: format("%s%i" % (courseUrl, levelNum)), range(1, levelCount+1)) # fetch notes data for each level memriseNotesByLevel = map(lambda levelUrl: self.getLevelNotes(levelUrl), levelUrls) # zip the notes data for a level together with its level title. levelData = zip(memriseNotesByLevel, levelTitles) # This looks ridiculous, sorry. Figure out how many zeroes we need # to order the subdecks alphabetically, e.g. if there are 100+ levels # we'll need to write "Level 001", "Level 002" etc. zeroCount = len(str(len(levelData))) levelNumber = 1 # For each level, create an import file and import it as a deck for level in levelData: notes = level[0] levelTitle = level[1] if len(notes) == 0: continue importFilePath = self.createImportFile(notes) # import our file into Anki noteModel = mw.col.models.byName("Basic") mw.col.models.setCurrent(noteModel) deckTitle = format("%s::Level %s: %s" % (courseTitle, str(levelNumber).zfill(zeroCount), levelTitle)) noteModel['did'] = mw.col.decks.id(deckTitle) mw.col.models.save(noteModel) importer = TextImporter(mw.col, importFilePath) importer.allowHTML = True importer.initMapping() importer.run() os.remove(importFilePath) levelNumber += 1 # refresh deck browser so user can see the newly imported deck mw.deckBrowser.refresh() # bye! self.hide()
def _importFileToCards(file, model_name): deck_id = mw.col.decks.id(":Expressions") mw.col.decks.select(deck_id) m = mw.col.models.byName(model_name) deck = mw.col.decks.get(deck_id) deck['mid'] = m['id'] mw.col.decks.save(deck) m['did'] = deck_id importer = TextImporter(mw.col, file) importer.allowHTML = True importer.initMapping() importer.run()
def run_importer(file_path, tag_name, deck_name, collection): if not os.stat(file_path).st_size > 0: print "Nothing to import!" sys.exit() ti = TextImporter(collection, file_path) ti.delimiter = get_delimiter() ti.allowHTML = True ti.tagsToAdd = [tag_name] ti.initMapping() ti.run() # BUGFIX: anki doesn't add to selected deck did = collection.decks.id(deck_name) num_cards_added = ti.total ids = get_card_ids(collection, tag_name) ids = sorted(ids, reverse=True) for id in ids[:num_cards_added]: collection.db.execute("update cards set did = ? where id = ?", did, id)
def importfile(filename, deck, type): try: file = join(dirname(realpath(__file__)), filename) did = mw.col.decks.id(deck) mw.col.decks.select(did) m = mw.col.models.byName(type) deck = mw.col.decks.get(did) deck['mid'] = m['id'] mw.col.decks.save(deck) m['did'] = did ti = TextImporter(mw.col, file) ti.initMapping() ti.run() tooltip('Successfully imported results.') except: showWarning('<b>Import error</b><br>Make sure that the notetype and deck exist and that you have enough fields.')
def main(): parser = argparser() args = parser.parse_args() csv_filename = args.csv_filename deck_name = args.deck_name apkg_filename = args.apkg_filename media_directory = args.media_directory model_name = args.model_name # this is removed at the end of the program TMPDIR = tempfile.mkdtemp() collection = anki.Collection(os.path.join(TMPDIR, 'collection.anki2')) deck_id = collection.decks.id(deck_name) deck = collection.decks.get(deck_id) if model_name == 'Image Occlusion Enhanced': model = add_io_model(collection) else: model = collection.models.byName(model_name).copy() model['did'] = deck_id collection.models.update(model) collection.models.setCurrent(model) collection.models.save(model) importer = TextImporter(collection, csv_filename) importer.allowHTML = True importer.initMapping() importer.run() for media_file in os.listdir(media_directory): os.symlink(os.path.join(media_directory, media_file), os.path.join(TMPDIR, 'collection.media', media_file)) export = AnkiPackageExporter(collection) export.exportInto(apkg_filename) shutil.rmtree(TMPDIR)
def importFile(filename): name = filename.rsplit('/', 1)[-1].rsplit('.', 1)[0] # select deck did = mw.col.decks.id(name) mw.col.decks.select(did) # anki defaults to the last note type used in the selected deck m = mw.col.models.byName("Basic") deck = mw.col.decks.get(did) deck['mid'] = m['id'] mw.col.decks.save(deck) # and puts cards in the last deck used by the note type m['did'] = did # import into the collection ti = TextImporter(mw.col, filename) ti.initMapping() ti.run() mw.col.reset() mw.reset()
def txtImporter(): ##select deck # Modify to your own destination deck did = mw.col.decks.id("Test") mw.col.decks.select(did) # set note type for deck m = mw.col.models.byName("Basic") deck = mw.col.decks.get(did) #deck['mid'] = m['id'] mw.col.decks.save(deck) # import into the collection oriCardCount = mw.col.cardCount() ti = TextImporter(mw.col, txtConvertor()) ti.initMapping() ti.run() mw.reset() CardCount = mw.col.cardCount() # show a message box showInfo("Cards imported:" + str(CardCount-oriCardCount) + "\n\nCards Count:" + str(CardCount))
def importFile(deckId, notes, noteType): if len(notes) == 0: return with open(u"tmp.txt", "w") as f: for item in notes: f.write("%s\n" % item) m = mw.col.models.byName(noteType) deck = mw.col.decks.get(deckId) deck['mid'] = m['id'] m['did'] = deckId mw.col.decks.save(deck) mw.col.models.save(m) ti = TextImporter(mw.col, u"tmp.txt") ti.delimiter = "," ti.initMapping() ti.run() os.remove(u"tmp.txt")
def importFileOutsideGUI(filename): pickle_list = loadFromPickle() profile_name = pickle_list[1] anki_dir = os.path.dirname(os.path.realpath(__file__).rsplit('\\', 2)[0]) anki_collection = anki_dir + '\\' + profile_name + '\collection.anki2' col = Collection(anki_collection) name = filename.rsplit('/', 1)[-1].rsplit('.', 1)[0] did = col.decks.id(name) col.decks.select(did) m = col.models.byName("Basic") deck = col.decks.get(did) deck['mid'] = m['id'] col.decks.save(deck) m['did'] = did ti = TextImporter(col, filename) ti.initMapping() ti.run() col.reset() mw.reset() col.close()
def importFlashcards(): # Import cards for notetype in ['Basic', 'Cloze', 'Definition']: filename = os.path.expanduser("~/tmp/" + notetype.lower() + ".csv") linecount = sum(1 for line in open(filename)) if linecount > 1: # Select deck did = mw.col.decks.id(currentDeck) mw.col.decks.select(did) # Set note type m = mw.col.models.byName(notetype) # Set note type for deck deck = mw.col.decks.get(did) deck['mid'] = m['id'] mw.col.decks.save(deck) # Import into the collection ti = TextImporter(mw.col, filename) ti.allowHTML = True ti.initMapping() ti.run() showInfo("Import complete")
def testFunction(): file = u"/home/hollisma/.local/share/Anki2/addons21/myaddon/cards.txt" # Select deck did = mw.col.decks.id("Vocab") mw.col.decks.select(did) # Settings for cards m = mw.col.models.byName("Cloze") deck = mw.col.decks.get(did) deck['mid'] = m['id'] mw.col.decks.save(deck) m['did'] = did mw.col.models.save(m) # Import cards ti = TextImporter(mw.col, file) ti.initMapping() ti.run() showInfo('Done!')
def importMHT(): # Ask for the .mht file. file_path = getFile(mw, _("Import mht file"), None, key="import") if not file_path: return file_path = unicode(file_path) # Convert mht parser = Parser(file_path) output = parser.run() # Creates a temp dir instead of file since windows # won't allow subprocesses to access it otherwise. # https://stackoverflow.com/questions/15169101/how-to-create-a-temporary-file-that-can-be-read-by-a-subprocess try: temp_dir = mkdtemp() path = os.path.join(temp_dir, 'import.html') with open(path, 'w+') as html: html.write(output) # Move temp images to collection.media media_dir = os.path.join(mw.pm.profileFolder(), "collection.media") for meta in parser.file_map.values(): temp_path = meta.get('path') new_path = os.path.join(media_dir, meta.get('filename')) shutil.move(temp_path, new_path) # import into the collection ti = TextImporter(mw.col, path) ti.delimiter = '\t' ti.allowHTML = True ti.initMapping() MHTImportDialog(mw, ti) # Remove file os.remove(path) finally: os.rmdir(temp_dir)
def test_csv2(): col = getEmptyCol() mm = col.models m = mm.current() note = mm.newField("Three") mm.addField(m, note) mm.save(m) n = col.newNote() n["Front"] = "1" n["Back"] = "2" n["Three"] = "3" col.addNote(n) # an update with unmapped fields should not clobber those fields file = str(os.path.join(testDir, "support", "text-update.txt")) i = TextImporter(col, file) i.initMapping() i.run() n.load() assert n["Front"] == "1" assert n["Back"] == "x" assert n["Three"] == "3" col.close()
def test_csv(): deck = getEmptyDeck() file = unicode(os.path.join(testDir, "support/text-2fields.txt")) i = TextImporter(deck, file) i.initMapping() i.run() # four problems - too many & too few fields, a missing front, and a # duplicate entry assert len(i.log) == 5 assert i.total == 5 # if we run the import again, it should update instead i.run() assert len(i.log) == 5 assert i.total == 5 # but importing should not clobber tags if they're unmapped n = deck.getNote(deck.db.scalar("select id from notes")) n.addTag("test") n.flush() i.run() n.load() assert n.tags == ['test'] # if add-only mode, count will be 0 i.importMode = 1 i.run() assert i.total == 0 # and if dupes mode, will reimport everything assert deck.cardCount() == 5 i.importMode = 2 i.run() # includes repeated field assert i.total == 6 assert deck.cardCount() == 11 deck.close()
def test_csv(): deck = getEmptyDeck() file = unicode(os.path.join(testDir, "support/text-2fields.txt")) i = TextImporter(deck, file) i.initMapping() i.run() # four problems - too many & too few fields, a missing front, and a # duplicate entry assert len(i.log) == 5 assert i.total == 5 # if we run the import again, it should update instead i.run() assert len(i.log) == 10 assert i.total == 5 # but importing should not clobber tags if they're unmapped n = deck.getNote(deck.db.scalar("select id from notes")) n.addTag("test") n.flush() i.run() n.load() assert n.tags == ['test'] # if add-only mode, count will be 0 i.importMode = 1 i.run() assert i.total == 0 # and if dupes mode, will reimport everything assert deck.cardCount() == 5 i.importMode = 2 i.run() # includes repeated field assert i.total == 6 assert deck.cardCount() == 11 deck.close()
def advimport(): Log('-'*80) filename = getFile(mw, "Select file to import", None, key="import") if len(filename) == 0: showText("invalid filename", mw, type="text", run=True) return lines = [] n = 0 with open(filename) as f: reader = unicode_csv_reader(f) for i in range(N_HEADER_LINES): n += 1 reader.next() for row in reader: #print row n += 1 lines.append((n, row)) for n, line in lines: #Log("--"*5) data = [] _chapt = line[0] _sect = line[1] _keywords = line[2] _question = line[3] _solution = line[4] _type = line[5] _subtype = line[6] _symb = SYMBOLS.get(_type, "") _rests = line[7:] print "L%03i:"%n, if not _type: print "!!! No type, skipping" continue elif _type == u"rule": print " Rule ", model = "Rule" key = _question data = [key, _question, _solution, _chapt, _sect, _type, _symb] elif _type == u"pron": print " Pronoun ", model = "Simple" key = _solution data = [key, _question, _solution, _chapt, _sect, _type, _symb] elif _type == u"wend": print " Sentence ", model = "Simple" key = _solution data = [key, _question, _solution, _chapt, _sect, _type, _symb] elif _type == u"prep": print " Prepos ", model = "Simple" key = _solution data = [key, _question, _solution, _chapt, _sect, _type, _symb] elif _type == u"adv": print " Adverb ", model = "Simple" key = _solution data = [key, _question, _solution, _chapt, _sect, _type, _symb] elif _type == u"nom": # Noun print " Noun ", model = "Noun" key = _solution lst = _solution.split(' ') art = lst.pop(0) noun = " ".join(lst) if not _subtype or _subtype == u"": if art == "el": _subtype = u"♂" elif art == "la": _subtype = u"♀" elif art == "los": _subtype = u"♂♂/♂♀" elif art == "las": _subtype = u"♀♀" elif art == "el/la": _subtype = u"♂/♀" elif _subtype[0] in ["F", "f"]: _subtype = u"♀" elif _subtype[0] in ["M", "m"]: _subtype = u"♂" data = [key, _question, _solution, _chapt, _sect, _type, _subtype, _symb] elif _type == u"verb": print " Verb ", modus = _rests[0] temp = _rests[1] forms = _rests[2:] for ii, f in enumerate(forms): _ = f.split('|') if len(_)==2: for i, (c, e) in enumerate(zip(["stem", "ext"], _)): _[i] = '<span class="%s">%s</span>' % (c, e) for i, x in enumerate(_): _[i] = _[i].replace("[", '<span class="irr">') _[i] = _[i].replace("]", '</span>') forms[ii] = "".join(_) model = "Verb" key = "%s (%s; %s)" % (_solution, modus, temp) jsforms = '''{'sg1':'%s','sg2':'%s','sg3':'%s','pl1':'%s','pl2':'%s','pl3':'%s'}''' % tuple(forms) #Log("JSF", jsforms) _question = _question.replace("[", '<span class="prp">') _question = _question.replace("]", '</span>') _solution = _solution.replace("[", '<span class="prp">') _solution = _solution.replace("]", '</span>') #print _question data = [key, _question, _solution, _chapt, _sect, _type, _subtype, _symb, modus, temp, jsforms] elif _type == u"adj": print " Adjective ", s = _solution def decline(stem, exts=['_o', '_a', '_os', '_as'], wrap=('<b>', '</b>')): return [stem+wrap[0]+_+wrap[1] for _ in exts] if '[' in s: _subtype = 'IRR' i = s.find('[') stem = s[:i] exts = s[i+1:s.find(']')].split('|') #Log("ir1: ", i, stem, exts, len(exts)) if len(exts)==4: pass elif len(exts)==2: exts = [exts[0], exts[0], exts[1], exts[1]] elif len(exts)==3: exts = [exts[0], exts[1], exts[2], exts[2]] else: #TODO exts = ['???']*4 elif '|' in s: _subtype = 'IRR' stem = '' exts = s.split('|') if len(exts)==4: pass elif len(exts)==2: exts = [exts[0], exts[0], exts[1], exts[1]] elif len(exts)==3: exts = [exts[0], exts[1], exts[2], exts[2]] else: exts = ['???']*4 elif s[-1]=='o': _subtype = '-o' stem = s[:-1] exts = ['_o', '_a', '_os', '_as'] elif s[-1]=='e': _subtype = '-e' stem = s[:-1] exts = ['e', 'e', '_es', '_es'] elif s[-4:]=='ista': _subtype = '-ista' stem = s[:-4] exts = ['ist_a', 'ist_a', 'ist_as', 'ist_as'] elif s[-2:] == u'ón': _subtype = u'-ón' stem = s[:-2] exts = [u'*ón_', '*on_a', '*on_es', '*on_as'] elif s[-5:] == 'erior': _subtype = '-erior' stem = s[:-5] exts = [u'erior', 'erior', 'erior_s', 'erior_s'] elif s[-2:] == u'or': _subtype = '-or' stem = s[:-2] exts = [u'or', 'or_a', 'or_es', 'or_as'] elif s[-1] == 'z': _subtype = '-z' stem = s[:-1] exts = [u'*z', '*z', '*c_es', '*c_es'] else: # consonant at end: _subtype = '-CONS' stem = s exts = ['', '', '_es', '_es'] print '!!!! >> check this:', stem, exts ,"\n ", #decl = decline(stem, exts, wrap=('<span class="ext">', '</span>')) decl = decline(stem, exts, wrap=('', '')) #decl = [_.replace('_', '') for _ in decl] for i, d in enumerate(decl): while d.find('*')>=0: fi = d.find('*') #print fi, d d = d[:fi] + '<span class="irr">' + d[fi+1] + '</span>' + d[fi+2:] if '_' in d: d = d.replace('_', '<span class="ext">') + '</span>' decl[i] = d #print decl #Log(stem, exts, decl) model = "Adjectiv" key = stem + exts[0] # use masculine form sg as key key = key.replace('*', '').replace('_','') jsforms = '''{'MSg':'%s','FSg':'%s','MPl':'%s','FPl':'%s'}''' % tuple(decl) data = [key, _question, key, _chapt, _sect, _type, _subtype, _symb, jsforms] else: print "!!! Unknown type, skipping" continue if len(data) > 0: print data[1], " | ", data[2] with codecs.open('multiimport.tsv', 'w', encoding='utf-8') as f: #data = [_.encode("utf8") for _ in data] s = "\t".join(data) #f.write(s.decode("utf8")) f.write(s) #print s did = mw.col.decks.byName(deck_name)['id'] mw.col.decks.select(did) m = mw.col.models.byName(model) mw.col.conf['curModel'] = m['id'] cdeck = mw.col.decks.current() cdeck['mid'] = m['id'] mw.col.decks.save(cdeck) mw.col.models.setCurrent(m) m['did'] = did mw.col.models.save(m) mw.reset() ti = TextImporter(mw.col,'multiimport.tsv') ti.delimiter = '\t' ti.allowHTML = True ti.initMapping() ti.run() #os.remove('multiimport.tsv') print('-'*80)
def makeDeck(parent,prefix,deck): name = deck.csvname csvfile = "%s%s%s.csv" % (tmpPath,prefix,name) if not os.path.exists(csvfile) and deck.cardType != None: print('Skipping deck "%s" because no file "%s" was found.' % (name, csvfile)) return # raise Exception('No csv file "' + csvfile + '" found.') did = tcol.decks.id(parent + deck.name) d = tcol.decks.get(did) tcol.decks.select(did) confId = tcol.decks.confId(parent + deck.name, cloneFrom=deck.conf) if not deck.cardType: conf = tcol.decks.getConf(confId) conf['new']['perDay'] = 999 tcol.decks.updateConf(conf) elif deck.perDay: conf = tcol.decks.getConf(confId) conf['new']['perDay'] = deck.perDay tcol.decks.updateConf(conf) tcol.decks.setConf(d,confId) if deck.cardType: ct = deck.cardType if not tcol.models.byName(ct.name): m = tcol.models.new(ct.name) m['req'] = [[0, 'all', [0]]] m['css'] = ct.css() m['tmpls'] = [ { 'name': 'Card 1', 'qfmt': ct.front(), 'afmt': ct.back(), 'bfont': 'Lucida Sans Unicode', 'bamft': '', 'bqmft': '', 'ord': 0, 'did': None, 'bsize': 12 } ] tcol.models.add(m) for i,field in enumerate(ct.fields): f = tcol.models.newField(field.anki_name) f['ord'] = i tcol.models.addField(m,f) else: m = tcol.models.byName(ct.name) # So that we can reuse already-present models # todo: this doesn't actually work but would be a big part of # updating # if m['id'] != ct.mid: # m = tcol.models.get(m['id']) # m['id'] = ct.mid # m.save(m) tcol.save() m['did'] = did tcol.decks.select(did) ti = TextImporter(tcol,csvfile) ti.model = m ti.allowHTML = True ti.initMapping() ti.delimiter = "\t" ti.updateDelimiter() ti.run() tcol.save() for sd in deck.subdecks: makeDeck(parent + deck.name + '::',prefix + name + '-', sd)