Exemplo n.º 1
0
 def do_import(self, filename, extra_tag_names=""):
     FileFormat.do_import(self, filename, extra_tag_names)
     w = self.main_widget()
     w.set_progress_text(_("Importing cards..."))
     db = self.database()
     # The import process generates card log entries, which we will delete
     # in favour of those events that are recorded in the logs and which
     # capture the true timestamps. They also have new 2.0 ids, as opposed
     # to their old 1.x ids.
     log_index = db.current_log_index()
     try:
         self.read_items_from_mnemosyne1_mem(filename)
         self.create_cards_from_mnemosyne1(extra_tag_names)
     except MnemosyneError:
         w.close_progress()
         return
     db.remove_card_log_entries_since(log_index)
     self.import_logs(filename)
     # Force an ADDED_CARD log entry for those cards that did not figure in
     # the txt logs, e.g. due to missing or corrupt logs.
     db.add_missing_added_card_log_entries(
         set(item.id for item in self.items))
     # In 2.x, repetition events are used to update a card's last_rep and
     # next_rep during sync. In 1.x, there was no such information, and
     # calculating it from the logs will fail if they are incomplete.
     # Therefore, we force a card edit event for all cards.
     timestamp = int(time.time())
     for item in self.items:
         db.log_edited_card(timestamp, item.id)
     # Detect inverses.
     db.link_inverse_cards()
     w.close_progress()
     self.warned_about_missing_media = False
Exemplo n.º 2
0
 def do_import(self, filename, extra_tag_names=None):
     FileFormat.do_import(self, filename, extra_tag_names)
     w = self.main_widget()
     w.set_progress_text(_("Importing cards..."))
     db = self.database()
     # The import process generates card log entries, which we will delete
     # in favour of those events that are recorded in the logs and which
     # capture the true timestamps. They also have new 2.0 ids, as opposed
     # to their old 1.x ids.
     log_index = db.current_log_index()
     try:
         self.read_items_from_mnemosyne1_mem(filename)
         self.create_cards_from_mnemosyne1(extra_tag_names)
     except MnemosyneError:
         w.close_progress()
         return
     db.remove_card_log_entries_since(log_index)
     self.import_logs(filename)
     # Force an ADDED_CARD log entry for those cards that did not figure in
     # the txt logs, e.g. due to missing or corrupt logs.
     db.add_missing_added_card_log_entries(
         set(item.id for item in self.items))
     # In 2.x, repetition events are used to update a card's last_rep and
     # next_rep during sync. In 1.x, there was no such information, and
     # calculating it from the logs will fail if they are incomplete.
     # Therefore, we force a card edit event for all cards.
     timestamp = int(time.time())
     for item in self.items:
         db.log_edited_card(timestamp, item.id)
     # Detect inverses.
     db.link_inverse_cards()
     w.close_progress()
     self.warned_about_missing_media = False
Exemplo n.º 3
0
 def do_import(self, filename, extra_tag_names=""):
     FileFormat.do_import(self, filename, extra_tag_names)
     w = self.main_widget()
     try:
         tree = cElementTree.parse(filename)
     except cElementTree.ParseError as e:
         w.show_error(_("Unable to parse file:") + str(e))
         return
     card_type = self.card_type_with_id("1")
     tag_names = [tag_name.strip() for \
         tag_name in extra_tag_names.split(",") if tag_name.strip()]
     for element in tree.getroot().findall("Card"):
         fact_data = {
             "f": element.attrib["Question"],
             "b": element.attrib["Answer"]
         }
         self.preprocess_media(fact_data, tag_names)
         card = self.controller().create_new_cards(
             fact_data,
             card_type,
             grade=-1,
             tag_names=tag_names,
             check_for_duplicates=False,
             save=False)[0]
         if _("MISSING_MEDIA") in tag_names:
             tag_names.remove(_("MISSING_MEDIA"))
     self.warned_about_missing_media = False
Exemplo n.º 4
0
 def do_import(self, filename, extra_tag_names=None):
     FileFormat.do_import(self, filename, extra_tag_names)
     w = self.main_widget()
     try:
         tree = cElementTree.parse(filename)
     except cElementTree.ParseError, e:
         w.show_error(_("Unable to parse file:") + str(e))
         return
Exemplo n.º 5
0
 def do_import(self, filename, extra_tag_names=None):
     FileFormat.do_import(self, filename, extra_tag_names)
     w = self.main_widget()
     try:
         tree = cElementTree.parse(filename)
     except cElementTree.ParseError, e:
         w.show_error(_("Unable to parse file:") + str(e))
         return
Exemplo n.º 6
0
 def do_import(self, filename, extra_tag_names=""):
     FileFormat.do_import(self, filename, extra_tag_names)
     try:
         f = open(filename, encoding="utf-8")
     except:
         self.main_widget().show_error(_("Could not load file."))
         return
     facts_data = []
     line_number = 0
     for line in f:
         line_number += 1
         line = line.rstrip()
         # Parse html style escaped unicode (e.g. 至).
         for match in re0.finditer(line):
             # Integer part.
             u = chr(int(match.group(1)))
             # Integer part with &# and ;.
             line = line.replace(match.group(), u)
         if len(line) == 0:
             continue
         if line[0] == "\\ufeff": # Remove byte-order mark.
             line = line[1:]
         fields = line.split("\t")
         if len(fields) >= 3:  # Vocabulary card.
             if len(fields) >= 4:
                 facts_data.append({"f": fields[0], "p_1": fields[1],
                     "m_1": fields[2], "n": fields[3]})
             else:
                 facts_data.append({"f": fields[0], "p_1": fields[1],
                     "m_1": fields[2]})
         elif len(fields) == 2:  # Front-to-back only.
             facts_data.append({"f": fields[0], "b": fields[1]})
         else:  # Malformed line.
             self.main_widget().show_error(_("Badly formed input on line") \
                 + " " + str(line_number) + ":\n" + line)
             return
     # Now that we know all the data is well-formed, create the cards.
     tag_names = [tag_name.strip() for \
         tag_name in extra_tag_names.split(",") if tag_name.strip()]
     for fact_data in facts_data:
         if len(list(fact_data.keys())) == 2:
             card_type = self.card_type_with_id("1")
         else:
             card_type = self.card_type_with_id("3")
         self.preprocess_media(fact_data, tag_names)
         self.controller().create_new_cards(fact_data, card_type, grade=-1,
             tag_names=tag_names, check_for_duplicates=False, save=False)
         if _("MISSING_MEDIA") in tag_names:
             tag_names.remove(_("MISSING_MEDIA"))
     self.warned_about_missing_media = False
Exemplo n.º 7
0
 def do_import(self, filename, extra_tag_names=None, show_metadata=True):
     FileFormat.do_import(self, filename, extra_tag_names)
     if not extra_tag_names:
         extra_tags = []
     else:
         extra_tags = [self.database().get_or_create_tag_with_name(\
             tag_name.strip()) for tag_name in extra_tag_names.split(",")]
     self.database().set_extra_tags_on_import(extra_tags)
     # Extract zipfile.
     w = self.main_widget()
     w.set_progress_text(_("Decompressing..."))
     zip_file = zipfile.ZipFile(filename, "r")
     zip_file.extractall(self.database().media_dir())
     # Show metadata.
     metadata_filename = os.path.join(\
             self.database().media_dir(), "METADATA")
     if show_metadata:
         metadata = {}
         for line in open(metadata_filename, encoding="utf-8"):
             key, value = line.split(":", 1)
             metadata[key] = value.replace("<br>", "\n")
         self.controller().show_export_metadata_dialog(metadata,
                                                       read_only=True)
     # Parse XML.
     w.set_progress_text(_("Importing cards..."))
     self.database().card_types_to_instantiate_later = set()
     xml_filename = os.path.join(self.database().media_dir(), "cards.xml")
     element_loop = XMLFormat().parse_log_entries(\
         open(xml_filename, "r", encoding="utf-8"))
     number_of_entries = int(next(element_loop))
     if number_of_entries == 0:
         return
     w.set_progress_range(number_of_entries)
     w.set_progress_update_interval(number_of_entries / 20)
     for log_entry in element_loop:
         self.database().apply_log_entry(log_entry, importing=True)
         w.increase_progress(1)
     w.set_progress_value(number_of_entries)
     if len(self.database().card_types_to_instantiate_later) != 0:
         raise RuntimeError(_("Missing plugins for card types."))
     os.remove(xml_filename)
     os.remove(metadata_filename)
     w.close_progress()
Exemplo n.º 8
0
 def do_import(self, filename, extra_tag_names=None, show_metadata=True):
     FileFormat.do_import(self, filename, extra_tag_names)
     if not extra_tag_names:
         extra_tags = []
     else:
         extra_tags = [self.database().get_or_create_tag_with_name(\
             tag_name.strip()) for tag_name in extra_tag_names.split(",")]
     self.database().set_extra_tags_on_import(extra_tags)
     # Extract zipfile. 
     w = self.main_widget()
     w.set_progress_text(_("Decompressing..."))
     zip_file = zipfile.ZipFile(filename, "r")
     zip_file.extractall(self.database().media_dir())
     # Show metadata.          
     metadata_filename = os.path.join(\
             self.database().media_dir(), "METADATA")
     if show_metadata:
         metadata = {}
         for line in file(metadata_filename):
             key, value = line.split(":", 1)
             metadata[key] = value.replace("<br>", "\n")
         self.controller().show_export_metadata_dialog(metadata, read_only=True)
     # Parse XML.
     w.set_progress_text(_("Importing cards..."))
     self.database().card_types_to_instantiate_later = set()
     xml_filename = os.path.join(self.database().media_dir(), "cards.xml")
     element_loop = XMLFormat().parse_log_entries(file(xml_filename, "r"))
     number_of_entries = int(element_loop.next())
     if number_of_entries == 0:
         return
     w.set_progress_range(number_of_entries)
     w.set_progress_update_interval(number_of_entries/20)
     for log_entry in element_loop:
         self.database().apply_log_entry(log_entry, importing=True)
         w.increase_progress(1)
     w.set_progress_value(number_of_entries)
     if len(self.database().card_types_to_instantiate_later) != 0:
         raise RuntimeError, _("Missing plugins for card types.")
     os.remove(xml_filename)
     os.remove(metadata_filename)
     w.close_progress()
Exemplo n.º 9
0
 def do_import(self, filename, extra_tag_names=None):
     FileFormat.do_import(self, filename, extra_tag_names)
     w = self.main_widget()
     # The import process generates card log entries which have new 2.0
     # ids as opposed to their old 1.x ids, so we need to delete them
     # later.
     db = self.database()
     log_index = db.current_log_index()
     try:
         w.set_progress_text(_("Importing cards..."))
         self.read_items_from_mnemosyne1_xml(filename)
         self.create_cards_from_mnemosyne1(extra_tag_names)
     except MnemosyneError:
         w.close_progress()
         return
     db.remove_card_log_entries_since(log_index)
     # We now generate 'added card' events with the proper ids.
     timestamp = int(time.time())
     for item in self.items:
         db.log_added_card(timestamp, item.id)
     self.database().link_inverse_cards()
     w.close_progress()
     self.warned_about_missing_media = False
Exemplo n.º 10
0
 def do_import(self, filename, extra_tag_names=None):
     FileFormat.do_import(self, filename, extra_tag_names)
     w = self.main_widget()
     # The import process generates card log entries which have new 2.0
     # ids as opposed to their old 1.x ids, so we need to delete them
     # later.
     db = self.database()
     log_index = db.current_log_index()
     try:
         w.set_progress_text(_("Importing cards..."))
         self.read_items_from_mnemosyne1_xml(filename)
         self.create_cards_from_mnemosyne1(extra_tag_names)
     except MnemosyneError:
         w.close_progress()
         return
     db.remove_card_log_entries_since(log_index)
     # We now generate 'added card' events with the proper ids.
     timestamp = int(time.time())
     for item in self.items:
         db.log_added_card(timestamp, item.id)
     self.database().link_inverse_cards()
     w.close_progress()
     self.warned_about_missing_media = False
Exemplo n.º 11
0
 def do_import(self, filename, extra_tag_names=None):
     FileFormat.do_import(self, filename, extra_tag_names)
     # Open txt file. Use Universal line ending detection.
     f = None
     try:
         f = file(filename, "rU")
     except:
         try:
             f = file(filename.encode("latin", "rU"))
         except:
             self.main_widget().show_error(_("Could not load file."))
             return
     # Parse txt file.
     facts_data = []
     line_number = 0
     for line in f:
         line_number += 1
         try:
             line = unicode(line, "utf-8")
         except:
             try:
                 line = unicode(line, "latin")
             except:
                 self.main_widget().show_error(\
                     _("Could not determine encoding."))
                 return
         line = line.rstrip()
         # Parse html style escaped unicode (e.g. &#33267;).
         for match in re0.finditer(line):
             # Integer part.
             u = unichr(int(match.group(1)))
             # Integer part with &# and ;.
             line = line.replace(match.group(), u)
         if len(line) == 0:
             continue
         if line[0] == u"\ufeff": # Remove byte-order mark.
             line = line[1:]
         fields = line.split("\t")
         if len(fields) >= 3:  # Vocabulary card.
             if len(fields) >= 4:
                 facts_data.append({"f": fields[0], "p_1": fields[1],
                     "m_1": fields[2], "n": fields[3]})
             else:
                 facts_data.append({"f": fields[0], "p_1": fields[1],
                     "m_1": fields[2]})
         elif len(fields) == 2:  # Front-to-back only.
             facts_data.append({"f": fields[0], "b": fields[1]})               
         else:  # Malformed line.
             self.main_widget().show_error(_("Badly formed input on line") \
                 + " " + str(line_number) + ":\n" + line)
             return
     # Now that we know all the data is well-formed, create the cards.
     tag_names = []
     if extra_tag_names:
         tag_names += [tag_name.strip() for tag_name \
             in extra_tag_names.split(",")]
     for fact_data in facts_data:
         if len(fact_data.keys()) == 2:
             card_type = self.card_type_with_id("1")
         else:
             card_type = self.card_type_with_id("3")
         self.preprocess_media(fact_data, tag_names)
         self.controller().create_new_cards(fact_data, card_type, grade=-1,
             tag_names=tag_names, check_for_duplicates=False, save=False)
         if _("MISSING_MEDIA") in tag_names:
             tag_names.remove(_("MISSING_MEDIA"))
     self.warned_about_missing_media = False
Exemplo n.º 12
0
    def do_import(self, filename, extra_tag_names=""):
        self.main_widget().show_information(_(\
"Note that while you can edit imported cards, adding new cards to Anki's card types is currently not supported.\n\nAlso, in case you run into problems, don't hesitate to contact the developers."))
        FileFormat.do_import(self, filename, extra_tag_names)
        w = self.main_widget()
        db = self.database()
        # Preprocess apkg files.
        tmp_dir = None
        if filename.endswith(".apkg"):
            tmp_dir = self.extract_apkg(filename)
            filename = os.path.join(tmp_dir, "collection.anki2")
        # Set up tag cache.
        tag_with_name = TagCache(self.component_manager)
        # Open database.
        con = sqlite3.connect(filename)
        # Copy media directory.
        w.set_progress_text(_("Copying media files..."))
        src = filename.replace(".anki2", ".media")
        dst = db.media_dir()
        number_of_files = len(os.listdir(src))
        w.set_progress_range(number_of_files)
        w.set_progress_update_interval(number_of_files / 50)
        for item in os.listdir(src):
            shutil.copy(os.path.join(src, item), os.path.join(dst, item))
            w.increase_progress(1)
        # Import collection table.
        w.set_progress_text(_("Importing card types..."))
        # Too few in number to warrant counted progress bar.
        card_type_for_mid = {}  # mid: model id
        deck_name_for_did = {}  # did: deck id
        for id, crt, mod, scm, ver, dty, usn, ls, conf, models, decks, \
            dconf, tags in con.execute("""select id, crt, mod, scm, ver, dty,
            usn, ls, conf, models, decks, dconf, tags from col"""):
            # mod: modification time, ignore.
            # scm: schema modification time, ignore.
            # ver: schema version, ignore.
            # dty: no longer used according to Anki source.
            # usn: syncing related, ignore.
            # ls: last sync, ignore.
            # conf: configuration, ignore.
            # dconf: deck configuration, ignore.
            # tags: list of tags, but they turn up later in the notes, ignore.
            collection_creation_time = crt
            decks = json.loads(decks)
            # Decks will be converted to Tags when creating cards.
            for did in decks:
                deck_name_for_did[int(did)] = decks[did]["name"]
            # Models will be converted to CardTypes
            models = json.loads(models)
            for mid in models:  # mid: model id
                card_type_id = "7::" + mid
                card_type_already_imported = \
                    db.has_card_type_with_id(card_type_id)
                if card_type_already_imported:
                    card_type = self.component_manager.card_type_with_id[\
                        card_type_id]
                else:
                    card_type = MSided(self.component_manager)
                card_type.name = models[mid]["name"]
                card_type.id = card_type_id
                card_type.hidden_from_UI = False
                card_type_for_mid[int(mid)] = card_type
                vers = models[mid]["vers"]  # Version, ignore.
                tags = models[mid]["tags"]  # Seems empty, ignore.
                did = models[mid]["did"]  # Deck id, ignore.
                usn = models[mid]["usn"]  # Syncing related, ignore.
                if "req" in models[mid]:
                    required = models[mid]["req"]
                    # Cache for a calculation to determine which fields are
                    # required. "req": [[0, "all", [0]]]
                    # Not yet implemented.
                else:
                    required = []
                flds = models[mid]["flds"]
                flds.sort(key=lambda x: x["ord"])
                card_type.fact_keys_and_names = []
                for field in flds:
                    card_type.fact_keys_and_names.append(\
                        (str(field["ord"]), field["name"]))
                    media = field["media"]  # Reserved for future use, ignore.
                    sticky = field["sticky"]  # Sticky field, ignore.
                    rtl = field["rtl"]  # Text direction, ignore.
                    font_string = field["font"] + "," + str(field["size"]) + \
                        ",-1,5,50,0,0,0,0,0,Regular"
                    self.config().set_card_type_property(
                        "font", font_string, card_type, str(field["ord"]))
                sortf = models[mid]["sortf"]  # Sorting field, ignore.
                tmpls = models[mid]["tmpls"]
                tmpls.sort(key=lambda x: x["ord"])
                # Fact views.
                card_type.fact_views = []
                for template in tmpls:
                    fact_view_id = card_type.id + "." + str(template["ord"])
                    fact_view_already_imported = \
                        db.has_fact_view_with_id(fact_view_id)
                    if fact_view_already_imported:
                        fact_view = db.fact_view(\
                            fact_view_id, is_id_internal=False)
                        fact_view.name = template["name"]
                    else:
                        fact_view = FactView(template["name"], fact_view_id)
                    fact_view.extra_data["qfmt"] = template["qfmt"]
                    fact_view.extra_data["afmt"] = template["afmt"]
                    fact_view.extra_data["bqfmt"] = template["bqfmt"]
                    fact_view.extra_data["bafmt"] = template["bafmt"]
                    fact_view.extra_data["ord"] = template["ord"]
                    did = template["did"]  # Deck id, ignore.
                    card_type.fact_views.append(fact_view)
                    if fact_view_already_imported:
                        db.update_fact_view(fact_view)
                    else:
                        db.add_fact_view(fact_view)
                mod = models[mid]["mod"]  # Modification time, ignore.
                type_ = models[mid]["type"]  # 0: standard, 1 cloze
                id = models[mid]["id"]
                css = models[mid]["css"]
                latex_preamble = models[mid]["latexPre"]  # Ignore.
                latex_postamble = models[mid]["latexPost"]  # Ignore.
                # Save to database.
                card_type.extra_data = {"css": css, "id": id, "type": type_}
                if card_type_already_imported:
                    db.update_card_type(card_type)
                else:
                    db.add_card_type(card_type)
        # nid are Anki-internal indices for notes, so we need to temporarily
        # store some information.
        tag_names_for_nid = {}
        card_type_for_nid = {}
        # Import facts and tags.
        w.set_progress_text(_("Importing notes..."))
        number_of_notes = con.execute(
            "select count() from notes").fetchone()[0]
        w.set_progress_range(number_of_notes)
        w.set_progress_update_interval(number_of_notes / 20)
        fact_for_nid = {}
        modification_time_for_nid = {}
        for id, guid, mid, mod, usn, tags, flds, sfld, csum, flags, data in \
            con.execute("""select id, guid, mid, mod, usn, tags, flds, sfld,
            csum, flags, data from notes"""):
            # usn: syncing related, ignore.
            # sfld: sorting field, ignore.
            # csum: checksum, ignore.
            # flags: seems empty, ignore.
            # data: seems empty, ignore.
            # Make compatible with openSM2sync:
            guid = guid.replace("`", "ap").replace("\"", "qu")
            guid = guid.replace("&", "am").replace("<",
                                                   "lt").replace(">", "gt")
            modification_time_for_nid[id] = mod
            card_type = card_type_for_mid[int(mid)]
            card_type_for_nid[id] = card_type
            fields = flds.split("\x1f")
            assert (len(fields) == len(card_type.fact_keys_and_names))
            fact_data = {}
            for i in range(len(fields)):
                fact_key = card_type.fact_keys_and_names[i][0]
                data = fields[i]
                # Deal with sound tags.
                for match in sound_re.finditer(data):
                    fname = match.group("fname")
                    data = data.replace(\
                        match.group(0), "<audio src=\"" + fname + "\">")
                # Deal with latex tags.
                data = data.replace("[latex]", "<latex>")
                data = data.replace("[/latex]", "</latex>")
                data = data.replace("[$]", "<$>")
                data = data.replace("[/$]", "</$>")
                data = data.replace("[$$]", "<$$>")
                data = data.replace("[/$$]", "</$$>")
                fact_data[fact_key] = data
            if db.has_fact_with_id(guid):
                fact = db.fact(guid, is_id_internal=False)
                fact.data = fact_data
                db.update_fact(fact)
            else:
                fact = Fact(fact_data, id=guid)
                db.add_fact(fact)
            fact_for_nid[id] = fact
            tag_names_for_nid[id] = tags
            w.increase_progress(1)
        # Import logs. This needs to happen before creating the cards,
        # otherwise, the sync protocol will use the scheduling data from the
        # latest repetition log, instead of the correct current one.
        w.set_progress_text(_("Importing logs..."))
        number_of_logs = con.execute(
            "select count() from revlog").fetchone()[0]
        w.set_progress_range(number_of_logs)
        w.set_progress_update_interval(number_of_logs / 20)
        for id, cid, usn, ease, ivl, lastIvl, factor, time, type_ in \
            con.execute("""select id, cid, usn, ease, ivl, lastIvl, factor,
            time, type from revlog"""):
            # usn: syncing related, ignore.
            if type_ == 0:  # Acquisition phase.
                grade = 0
            else:  # Retention phase.
                grade = ease + 1  # Anki ease is from 1 to 4.
            timestamp = int(id / 1000)
            scheduled_interval = lastIvl * 86400 if lastIvl > 0 else 0
            new_interval = ivl * 86400 if ivl > 0 else 0
            next_rep = timestamp + new_interval
            easiness = factor / 1000 if factor else 2.5
            db.log_repetition(timestamp=timestamp,
                              card_id=cid,
                              grade=grade,
                              easiness=easiness,
                              acq_reps=0,
                              ret_reps=0,
                              lapses=0,
                              acq_reps_since_lapse=0,
                              ret_reps_since_lapse=0,
                              scheduled_interval=scheduled_interval,
                              actual_interval=scheduled_interval,
                              thinking_time=int(time / 1000),
                              next_rep=next_rep,
                              scheduler_data=0)
            w.increase_progress(1)
        # Import cards.
        w.set_progress_text(_("Importing cards..."))
        number_of_cards = con.execute(
            "select count() from cards").fetchone()[0]
        w.set_progress_range(number_of_cards)
        w.set_progress_update_interval(number_of_cards / 20)
        for id, nid, did, ord, mod, usn, type_, queue, due, ivl, factor, reps, \
            lapses, left, odue, odid, flags, data in con.execute("""select id,
            nid, did, ord, mod, usn, type, queue, due, ivl, factor, reps,
            lapses, left, odue, odid, flags, data from cards"""):
            # type: 0=new, 1=learning, 2=due
            # queue: same as above, and -1=suspended,
            #        -2=user buried, -3=sched buried
            # due is used differently for different queues.
            # - new queue: note id or random int
            # - rev queue: integer day
            # - lrn queue: integer timestamp
            # In Mnemosyne, type=2 / rev queue corresponds to grades >= 2.
            # mod: modification time, but gets updated on each answer.
            # usn: syncing related, ignore.
            # left: repetitions left to graduation, ignore.
            # odue: original due, related to filtered decks, ignore.
            # odid: original deck id, related to filtered decks, ignore.
            # flags: seems empty, ignore.
            # data: seems empty, ignore
            fact = fact_for_nid[nid]
            card_type = card_type_for_nid[nid]
            creation_time = int(nid / 1000)
            if card_type.extra_data["type"] == 0:
                fact_view = card_type.fact_views[ord]
            else:  # Cloze.
                fact_view = card_type.fact_views[0]
            already_imported = db.has_card_with_id(id)
            if already_imported:
                card = db.card(id, is_id_internal=False)
                card.card_type = card_type
                card.fact = fact
                card.fact_view = fact_view
                card.creation_time = creation_time
            else:
                card = Card(card_type,
                            fact,
                            fact_view,
                            creation_time=creation_time)
            card.id = id
            card.extra_data["ord"] = ord  # Needed separately for clozes.
            tag_names = [tag_name.strip() for \
                             tag_name in extra_tag_names.split(",")]
            tag_names += [tag_name.strip() for \
                             tag_name in tag_names_for_nid[nid].split(" ")]
            tag_names += [deck_name_for_did[did].strip().replace(",", ";")]
            for tag_name in tag_names:
                if tag_name:
                    card.tags.add(tag_with_name[tag_name])
            card.next_rep = collection_creation_time + due * 86400
            card.last_rep = card.next_rep - ivl * 86400
            card.easiness = factor / 1000 if factor else 2.5
            card.acq_reps = 1  # No information.
            card.ret_reps = reps
            card.lapses = lapses
            card.acq_reps_since_lapse = card.acq_reps  # No information.
            card.ret_reps_since_lapse = card.ret_reps  # No information.
            card.modification_time = modification_time_for_nid[nid]
            self.active = (queue >= 0)
            if type_ == 0:  # 'new', unseen.
                card.reset_learning_data()
            elif type_ == 1:  # 'learning', acquisition phase.
                card.grade = 0
                card.last_rep = mod
                card.next_rep = mod
            else:  # 'due', retention phase.
                card.grade = 4  # No information.
            if card.grade >= 2:
                assert card.ret_reps_since_lapse != 0  # Issue #93 on github.
            if already_imported:
                db.update_card(card)
            else:
                db.add_card(card)
            w.increase_progress(1)
        # Create criteria for 'database' tags.
        for deck_name in deck_name_for_did.values():
            deck_name = deck_name.strip().replace(",", ";")
            if deck_name in [criterion.name for criterion in db.criteria()]:
                continue
            tag = tag_with_name[deck_name]
            criterion = DefaultCriterion(\
                component_manager=self.component_manager)
            criterion.name = deck_name
            criterion._tag_ids_active.add(tag._id)
            criterion._tag_ids_forbidden = set()
            db.add_criterion(criterion)
        # Clean up.
        con.close()
        if tmp_dir:
            shutil.rmtree(tmp_dir)
        w.close_progress()
        self.warned_about_missing_media = False
Exemplo n.º 13
0
 def do_import(self, filename, extra_tag_names=""):
     FileFormat.do_import(self, filename, extra_tag_names)
     w = self.main_widget()
     try:
         tree = cElementTree.parse(filename)
     except cElementTree.ParseError as e:
         w.show_error(_("Unable to parse file:") + str(e))
         return
     card_type = self.card_type_with_id("1")
     tag_names = [tag_name.strip() for \
         tag_name in extra_tag_names.split(",") if tag_name.strip()]
     for element in tree.find("cards").findall("card"):
         category = element.attrib["category"]
         commit = not (element.attrib["commit"] == "0")
         for field in element.find("card_fields").findall("card_field"):
             if field.attrib["idx"] == "1":
                 question = field.text
             else:
                 answer = field.text
         card_other = element.find("card_other")
         if card_other is None:
             difficulty = 40
             difficulty_prev = 40
         else:
             difficulty = int(card_other.attrib["difficulty"])
             difficulty_prev = int(card_other.attrib["difficulty_prev"])
         # Grades are 0-5. In SM for Palm there are commited and uncommited
         # cards. Uncommited cards go to grade -1.
         # Otherwise try to extrapolate something from difficulty in SM
         # I have implemented guess_grade such, that the distribution of
         # grades looks reasonable for my test database of 4000 entries.
         # By "reasonable" I mean than most of the entries should be
         # at grade 4. I've been learning that database for 4 years, so the
         # cards should have converged by now.
         if commit == False:
             grade = -1
         # Very easy items are scarce in SM and must be easiest grade.
         elif difficulty < 10:
             grade = 5
         # Assign passing grades, based upon whether the difficulty has
         # changed.
         elif difficulty > difficulty_prev:
             grade = 2
         elif difficulty == difficulty_prev:
             grade = 3
         elif difficulty < difficulty_prev:
             grade = 4
         # If the interval becomes shorter, it must have been a failure.
         if card_other is None:
             interval = 0
             interval_prev = 0
         else:
             interval = int(card_other.attrib["interval"]) * DAY
             interval_prev = int(card_other.attrib["interval_prev"]) * DAY
         if interval < interval_prev:
             grade = 0
         # Construct card.
         fact_data = {"f": question, "b": answer}
         self.preprocess_media(fact_data, tag_names)
         card = self.controller().create_new_cards(
             fact_data,
             card_type,
             grade=grade,
             tag_names=tag_names + [category],
             check_for_duplicates=False,
             save=False)[0]
         if _("MISSING_MEDIA") in tag_names:
             tag_names.remove(_("MISSING_MEDIA"))
         if card_other is not None:
             card.creation_time = int(time.mktime(time.strptime(\
                 card_other.attrib["datecreate"], "%Y-%m-%d")))
             card.modification_time = int(time.mktime(time.strptime(\
                 card_other.attrib["datecommit"], "%Y-%m-%d")))
             card.next_rep = self.scheduler().midnight_UTC(int(time.mktime(\
                 time.strptime(card_other.attrib["datenexttest"],
                 "%Y-%m-%d"))))
             card.last_rep = card.next_rep - interval
             card.lapses = int(card_other.attrib["lapses"])
             # Try to fill acquisiton reps and retention reps.
             # Since SM statistics are only available for commited
             # cards, I take acq_reps = 0 and ret_reps = lapses + recalls.
             card.ret_reps = card.lapses + int(card_other.attrib["recalls"])
             # Try to derive an easines factor EF from [1.3 .. 3.2] from
             # difficulty d from [1% .. 100%].
             # The math below is set to translate
             # difficulty=100% --> easiness = 1.3
             # difficulty=40% --> easiness = 2.5
             # difficulty=1% --> easiness = 3.2
             dp = difficulty * 0.01
             # Small values should be easy, large ones hard.
             if dp > 0.4:
                 card.easiness = 1.28 - 1.32 * math.log(dp)
             else:
                 card.easiness = 4.2 - 1.139 * math.exp(dp)
             self.database().update_card(card)
     self.warned_about_missing_media = False