Exemplo n.º 1
0
    def test_rename_associated_file(self, setup: Any,
                                    preserve_files: bool) -> None:
        """Test removing associated files.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            preserve_files: argument to `DeleteCommand`.
        """
        try:
            config.commands.edit.editor = "sed -i 's/einstein:/dummy:/'"

            with tempfile.TemporaryDirectory() as tmpdirname:
                path = RelPath(tmpdirname + "/einstein.pdf")
                open(  # pylint: disable=consider-using-with
                    path.path,
                    "w",
                    encoding="utf-8").close()

                Database()["einstein"].file = str(path)

                args = ["einstein"]
                if preserve_files:
                    args.insert(2, "--preserve-files")
                EditCommand().execute(args)
                assert "dummy" in Database().keys()

                target = RelPath(tmpdirname + "/dummy.pdf")
                if preserve_files:
                    assert path.path.exists()
                else:
                    assert target.path.exists()
        finally:
            config.defaults()
Exemplo n.º 2
0
    def file(self, file: Union[str, List[str]]) -> None:
        # noqa: D402 (we skip this error because file(s) raises a false negative)
        """Sets the associated file(s) of this entry.

        Args:
            file: can be either a single path (`str`) or a list thereof. In either case, the strings
                will be converted to paths relative to the user's home directory. Internally, this
                field will always be stored as a list.
        """
        if isinstance(file, list):
            paths = [RelPath(f) for f in file]
        else:
            paths = [
                RelPath(f) for f in file.split(
                    config.database.stringify.list_separator.file)
            ]
            if len(paths) > 1:
                LOGGER.info(
                    "Converted the field '%s' of entry '%s' to a list. You can consider storing it "
                    "as such directly.",
                    "file",
                    self.label,
                    extra={
                        "entry": self.label,
                        "field": "file"
                    },
                )
        self.data["file"] = [str(p) for p in paths]
        LOGGER.debug("Adding '%s' as the file to '%s'.", self.data["file"],
                     self.label)
Exemplo n.º 3
0
    def read(cls) -> None:
        """Reads the database file.

        The YAML database file pointed to by the configuration file is read in and parsed.
        This uses `cobib.parsers.YAMLParser` to parse the data.
        This function clears the contents of the singleton `Database` instance and resets
        `Database._unsaved_entries` to an empty dictionary. Thus, a call to this function
        *irreversibly* synchronizes the state of the runtime `Database` instance to the actually
        written contents of the database file on disc.
        """
        if cls._instance is None:
            cls()
            return
        _instance = cls._instance

        file = RelPath(config.database.file).path
        try:
            LOGGER.info("Loading database file: %s", file)
            # pylint: disable=import-outside-toplevel
            from cobib.parsers.yaml import YAMLParser

            _instance.clear()
            _instance.update(YAMLParser().parse(file))
        except FileNotFoundError:
            LOGGER.critical(
                "The database file %s does not exist! Please run `cobib init`!",
                file)
            sys.exit(1)

        cls._unsaved_entries.clear()
Exemplo n.º 4
0
    def test_add_skip_download(self, setup: Any, caplog: pytest.LogCaptureFixture) -> None:
        """Test adding a new entry and skipping the automatic download.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            caplog: the built-in pytest fixture.
        """
        path = RelPath("/tmp/Cao2018.pdf")
        try:
            args = ["-a", "1812.09976", "--skip-download"]

            AddCommand().execute(args)

            if (
                "cobib.parsers.arxiv",
                logging.ERROR,
                "An Exception occurred while trying to query the arXiv ID: 1812.09976.",
            ) in caplog.record_tuples:
                pytest.skip("The requests API encountered an error. Skipping test.")

            entry = Database()["Cao2018"]
            assert entry.label == "Cao2018"
            assert entry.data["archivePrefix"] == "arXiv"
            assert entry.data["arxivid"].startswith("1812.09976")
            assert "_download" not in entry.data.keys()
            assert not os.path.exists(path.path)
        finally:
            try:
                os.remove(path.path)
            except FileNotFoundError:
                pass
Exemplo n.º 5
0
    def load(
        configpath: Optional[Union[str, Path, TextIO, io.TextIOWrapper]] = None
    ) -> None:
        """Loads another configuration object at runtime.

        WARNING: The new Python-like configuration allows essentially arbitrary Python code so it is
        the user's responsibility to treat this with care!

        Args:
            configpath: the path to the configuration.
        """
        if configpath is not None:
            if isinstance(configpath, (TextIO, io.TextIOWrapper)):
                configpath = configpath.name
        elif "COBIB_CONFIG" in os.environ:
            configpath_env = os.environ["COBIB_CONFIG"]
            if configpath_env.lower() in ("", "0", "f", "false", "nil",
                                          "none"):
                LOGGER.info(
                    "Skipping configuration loading because negative COBIB_CONFIG environment "
                    "variable was detected.")
                return
            configpath = RelPath(configpath_env).path
        elif RelPath(Config.XDG_CONFIG_FILE).exists():
            configpath = RelPath(Config.XDG_CONFIG_FILE).path
        else:  # pragma: no cover
            return  # pragma: no cover
        LOGGER.info("Loading configuration from default location: %s",
                    configpath)

        spec = importlib.util.spec_from_file_location("config", configpath)
        if spec is None:
            LOGGER.error(
                "The config at %s could not be interpreted as a Python module.",
                configpath)
            sys.exit(1)
        else:
            cfg = importlib.util.module_from_spec(spec)
            spec.loader.exec_module(cfg)  # type: ignore

        try:
            # validate config
            config.validate()
        except RuntimeError as exc:
            LOGGER.error(exc)
            sys.exit(1)
Exemplo n.º 6
0
def test_relative_path() -> None:
    """Test a path which is relative to the user's home directory."""
    path = Path.home() / "dummy.txt"
    rel_path = RelPath(path)

    # the string is relative
    assert str(rel_path) == "~/dummy.txt"
    # the path property is fully-resolved
    assert rel_path.path == path
    # getting a Path-property resolves the path first
    assert rel_path.parent == Path.home()
Exemplo n.º 7
0
def test_absolute_path() -> None:
    """Test a path which is not relative to the user's home directory and therefore absolute."""
    path = Path("/tmp/dummy.txt")
    abs_path = RelPath(path)

    # the string is relative
    assert str(abs_path) == "/tmp/dummy.txt"
    # the path property is fully-resolved
    assert abs_path.path == path
    # getting a Path-property resolves the path first
    assert abs_path.parent == Path("/tmp")
Exemplo n.º 8
0
 def _open_url(url: ParseResult) -> None:
     """Opens a URL."""
     opener = config.commands.open.command
     try:
         url_str: str = url.geturl() if url.scheme else str(RelPath(url.geturl()).path)
         LOGGER.debug('Opening "%s" with %s.', url_str, opener)
         with open(os.devnull, "w", encoding="utf-8") as devnull:
             subprocess.Popen(  # pylint: disable=consider-using-with
                 [opener, url_str], stdout=devnull, stderr=devnull, stdin=devnull, close_fds=True
             )
     except FileNotFoundError as err:
         LOGGER.error(err)
Exemplo n.º 9
0
    def test_event_post_git_commit(self, setup: Any) -> None:
        """Test the PostGitCommit event.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
        """
        @Event.PostGitCommit.subscribe
        def hook(root: Path, file: Path) -> None:
            file.unlink()

        assert Event.PostGitCommit.validate()

        DummyCommand().execute([])

        assert not RelPath(config.database.file).path.exists()
Exemplo n.º 10
0
    def test_rename_associated_file(self, setup: Any, preserve_files: bool) -> None:
        """Test removing associated files.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            preserve_files: argument to `DeleteCommand`.
        """
        with tempfile.TemporaryDirectory() as tmpdirname:
            path = RelPath(tmpdirname + "/knuthwebsite.pdf")
            open(path.path, "w", encoding="utf-8").close()  # pylint: disable=consider-using-with

            Database()["knuthwebsite"].file = str(path)

            args = ["label:dummy", "-s", "--", "knuthwebsite"]
            if preserve_files:
                args.insert(2, "--preserve-files")
            ModifyCommand().execute(args)
            assert "dummy" in Database().keys()

            target = RelPath(tmpdirname + "/dummy.pdf")
            if preserve_files:
                assert path.path.exists()
            else:
                assert target.path.exists()
Exemplo n.º 11
0
    def _get_cached_oauth_tokens() -> Dict[str, str]:
        """Obtain the OAuth authentication tokens for the Zotero API from coBib's cache.

        Returns:
            A dictionary containing the authentication information. Refer to
            `_get_fresh_oauth_tokens` for more specific details on the dictionary contents.
        """
        LOGGER.info("Attempting to load cached OAuth tokens for Zotero.")
        cache_path = RelPath(config.logging.cache).path
        try:
            with open(cache_path, "r", encoding="utf-8") as cache:
                cached_data = json.load(cache)
        except FileNotFoundError:
            cached_data = {}
        return cached_data.get("Zotero", {})  # type: ignore[no-any-return]
Exemplo n.º 12
0
    def test_add_with_download(
        self,
        folder: Optional[str],
        setup: Any,
        capsys: pytest.CaptureFixture[str],
        caplog: pytest.LogCaptureFixture,
    ) -> None:
        """Test adding a new entry with an associated file automatically downloaded.

        Args:
            folder: the folder for the downloaded file.
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            capsys: the built-in pytest fixture.
            caplog: the built-in pytest fixture.
        """
        path = RelPath(f"{'/tmp' if folder is None else folder}/Cao2018.pdf")
        try:
            # ensure file does not exist yet
            os.remove(path.path)
        except FileNotFoundError:
            pass
        try:
            args = ["-a", "1812.09976"]
            if folder:
                args += ["-p", folder]

            AddCommand().execute(args)

            if (
                "cobib.parsers.arxiv",
                logging.ERROR,
                "An Exception occurred while trying to query the arXiv ID: 1812.09976.",
            ) in caplog.record_tuples:
                pytest.skip("The requests API encountered an error. Skipping test.")

            entry = Database()["Cao2018"]
            assert entry.label == "Cao2018"
            assert entry.data["archivePrefix"] == "arXiv"
            assert entry.data["arxivid"].startswith("1812.09976")
            assert "_download" not in entry.data.keys()
            assert f"Successfully downloaded {path}" in capsys.readouterr().out
            assert os.path.exists(path.path)
        finally:
            try:
                os.remove(path.path)
            except FileNotFoundError:
                pass
Exemplo n.º 13
0
    def git(self, args: Optional[Dict[str, Any]] = None, force: bool = False) -> None:
        """Generates a git commit to track the commands changes.

        This function only has an effect when `config.database.git` is enabled *and* the database
        has been initialized correctly with `cobib.commands.init.InitCommand`.
        Otherwise, a warning will be printed and no commit will be generated.
        Nonetheless, the changes applied by the commit will have taken effect in the database.

        Args:
            args: a dictionary containing the *parsed* command arguments.
            force: whether to ignore the configuration setting. This option is mainly used by the
                `cobib.commands.init.InitCommand`.
        """
        git_tracked = config.database.git
        if not git_tracked and not force:
            return

        file = RelPath(config.database.file).path
        root = file.parent

        if not (root / ".git").exists():
            if git_tracked:
                msg = (
                    "You have configured coBib to track your database with git."
                    "\nPlease run `cobib init --git`, to initialize this tracking."
                )
                LOGGER.warning(msg)
                return

        msg = f"Auto-commit: {self.name.title()}Command"
        if args:
            msg += "\n\n"
            msg += json.dumps(args, indent=2, default=str)

        msg = Event.PreGitCommit.fire(msg, args) or msg

        commands = [
            f"cd {root}",
            f"git add -- {file}",
            f"git commit --no-gpg-sign --quiet --message {shlex.quote(msg)}",
        ]
        LOGGER.debug("Auto-commit to git from %s command.", self.name)
        os.system("; ".join(commands))

        Event.PostGitCommit.fire(root, file)
Exemplo n.º 14
0
def test_entry_set_file(files: List[str], caplog: pytest.LogCaptureFixture) -> None:
    """Test file setting.

    Args:
        files: a list of paths to files.
        caplog: the built-in pytest fixture.
    """
    entry = Entry("Cao_2019", EXAMPLE_ENTRY_DICT)
    entry.file = files[0] if len(files) == 1 else files  # type: ignore
    expected = [str(RelPath(file)) for file in files]
    assert entry.file == expected
    # check lint logging
    if len(files) > 1:
        entry.file = ", ".join(files)  # type: ignore
        assert entry.file == expected
        assert (
            "cobib.database.entry",
            20,
            "Converted the field 'file' of entry 'Cao_2019' to a list. You can consider storing it "
            "as such directly.",
        ) in caplog.record_tuples
Exemplo n.º 15
0
    def _store_oauth_tokens(tokens: Dict[str, str]) -> None:
        """Stores the OAuth authentication tokens for the Zotero API in coBib's cache.

        Args:
            tokens: the dictionary containing the authentication information. Refer to
                `_get_fresh_oauth_tokens` for more specific details on the dictionary contents.
        """
        LOGGER.info("Storing OAuth tokens for Zotero in cache.")
        cache_path = RelPath(config.logging.cache).path
        try:
            with open(cache_path, "r", encoding="utf-8") as cache:
                cached_data = json.load(cache)
        except FileNotFoundError:
            cached_data = {}

        if "Zotero" not in cached_data.keys():
            cached_data["Zotero"] = {}
        cached_data["Zotero"].update(tokens)

        if not cache_path.parent.exists():
            cache_path.parent.mkdir(parents=True)

        with open(cache_path, "w", encoding="utf-8") as cache:
            json.dump(cached_data, cache)
Exemplo n.º 16
0
class TestAddCommand(CommandTest, TUITest):
    """Tests for coBib's AddCommand."""

    def get_command(self) -> Type[cobib.commands.base_command.Command]:
        # noqa: D102
        return AddCommand

    def _assert(self, extra_filename: str) -> None:
        """Common assertion utility method.

        Args:
            extra_filename: path to an additional filename whose contents are to be added to the
                expected lines.
        """
        # compare with reference file
        with open(EXAMPLE_LITERATURE, "r", encoding="utf-8") as expected:
            true_lines = expected.readlines()
        with open(extra_filename, "r", encoding="utf-8") as extra:
            true_lines += extra.readlines()
        with open(config.database.file, "r", encoding="utf-8") as file:
            # we use zip_longest to ensure that we don't have more than we expect
            for line, truth in zip_longest(file, true_lines):
                assert line == truth

    def _assert_entry(self, label: str, **kwargs) -> None:  # type: ignore
        """An additional assertion utility to check specific entry fields.

        Args:
            label: the label of the entry.
            kwargs: additional keyword arguments whose contents are checked against the Entry's
                `data contents.
        """
        entry = Database()[label]
        for key, value in kwargs.items():
            assert entry.data.get(key, None) == value

    @pytest.mark.parametrize(
        ["setup"],
        [
            [{"git": False}],
            [{"git": True}],
        ],
        indirect=["setup"],
    )
    @pytest.mark.parametrize(
        ["more_args", "entry_kwargs"],
        [
            [[], {}],
            [
                ["-f", "test/debug.py"],
                {"file": [str(RelPath("test/debug.py"))]},
            ],
            [["-l", "dummy_label"], {}],
            [["tag"], {"tags": ["tag"]}],
            [["tag", "tag2"], {"tags": ["tag", "tag2"]}],
        ],
    )
    def test_command(self, setup: Any, more_args: List[str], entry_kwargs: Dict[str, Any]) -> None:
        """Test the command itself.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            more_args: additional arguments to be passed to the command.
            entry_kwargs: the expected contents of the resulting `Entry`.
        """
        git = setup.get("git", False)

        try:
            label = more_args[more_args.index("-l") + 1]
        except ValueError:
            label = "example_multi_file_entry"
        args = ["-b", EXAMPLE_MULTI_FILE_ENTRY_BIB] + more_args

        AddCommand().execute(args)

        assert Database()[label]

        if entry_kwargs or label != "example_multi_file_entry":
            self._assert_entry(label, **entry_kwargs)
        else:
            # only when we don't use extra arguments the files will match
            self._assert(EXAMPLE_MULTI_FILE_ENTRY_YAML)

        if git:
            # assert the git commit message
            # Note: we do not assert the arguments, because they depend on the available parsers
            self.assert_git_commit_message("add", None)

    def test_add_new_entry(self, setup: Any, caplog: pytest.LogCaptureFixture) -> None:
        """Test adding a new plain entry.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            caplog: the built-in pytest fixture.
        """
        AddCommand().execute(["-l", "dummy"])
        assert (
            "cobib.commands.add",
            30,
            "No input to parse. Creating new entry 'dummy' manually.",
        ) in caplog.record_tuples

        with open(config.database.file, "r", encoding="utf-8") as file:
            lines = file.readlines()
            dummy_start = lines.index("dummy:\n")
            assert dummy_start > 0
            assert lines[dummy_start - 1] == "---\n"
            assert lines[dummy_start + 1] == "  ENTRYTYPE: article\n"
            assert lines[dummy_start + 2] == "...\n"

    @pytest.mark.parametrize("folder", [None, "."])
    def test_add_with_download(
        self,
        folder: Optional[str],
        setup: Any,
        capsys: pytest.CaptureFixture[str],
        caplog: pytest.LogCaptureFixture,
    ) -> None:
        """Test adding a new entry with an associated file automatically downloaded.

        Args:
            folder: the folder for the downloaded file.
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            capsys: the built-in pytest fixture.
            caplog: the built-in pytest fixture.
        """
        path = RelPath(f"{'/tmp' if folder is None else folder}/Cao2018.pdf")
        try:
            # ensure file does not exist yet
            os.remove(path.path)
        except FileNotFoundError:
            pass
        try:
            args = ["-a", "1812.09976"]
            if folder:
                args += ["-p", folder]

            AddCommand().execute(args)

            if (
                "cobib.parsers.arxiv",
                logging.ERROR,
                "An Exception occurred while trying to query the arXiv ID: 1812.09976.",
            ) in caplog.record_tuples:
                pytest.skip("The requests API encountered an error. Skipping test.")

            entry = Database()["Cao2018"]
            assert entry.label == "Cao2018"
            assert entry.data["archivePrefix"] == "arXiv"
            assert entry.data["arxivid"].startswith("1812.09976")
            assert "_download" not in entry.data.keys()
            assert f"Successfully downloaded {path}" in capsys.readouterr().out
            assert os.path.exists(path.path)
        finally:
            try:
                os.remove(path.path)
            except FileNotFoundError:
                pass

    def test_add_skip_download(self, setup: Any, caplog: pytest.LogCaptureFixture) -> None:
        """Test adding a new entry and skipping the automatic download.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            caplog: the built-in pytest fixture.
        """
        path = RelPath("/tmp/Cao2018.pdf")
        try:
            args = ["-a", "1812.09976", "--skip-download"]

            AddCommand().execute(args)

            if (
                "cobib.parsers.arxiv",
                logging.ERROR,
                "An Exception occurred while trying to query the arXiv ID: 1812.09976.",
            ) in caplog.record_tuples:
                pytest.skip("The requests API encountered an error. Skipping test.")

            entry = Database()["Cao2018"]
            assert entry.label == "Cao2018"
            assert entry.data["archivePrefix"] == "arXiv"
            assert entry.data["arxivid"].startswith("1812.09976")
            assert "_download" not in entry.data.keys()
            assert not os.path.exists(path.path)
        finally:
            try:
                os.remove(path.path)
            except FileNotFoundError:
                pass

    @pytest.mark.parametrize(
        ["setup"],
        [
            [{"git": False}],
            [{"git": True}],
        ],
        indirect=["setup"],
    )
    def test_add_with_update(self, setup: Any, caplog: pytest.LogCaptureFixture) -> None:
        """Test update option of AddCommand.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            caplog: the built-in pytest fixture.
        """
        git = setup.get("git", False)
        AddCommand().execute(["-a", "1812.09976", "--skip-download"])

        if (
            "cobib.parsers.arxiv",
            logging.ERROR,
            "An Exception occurred while trying to query the arXiv ID: 1812.09976.",
        ) in caplog.record_tuples:
            pytest.skip("The requests API encountered an error. Skipping test.")

        # assert initial state
        entry = Database()["Cao2018"]

        assert entry.data["author"].startswith("Yudong Cao")
        assert entry.data["title"].startswith("Quantum Chemistry in the Age of Quantum Computing")
        assert entry.data["arxivid"].startswith("1812.09976")
        assert entry.data["doi"] == "10.1021/acs.chemrev.8b00803"
        assert entry.data["primaryClass"] == "quant-ph"
        assert entry.data["archivePrefix"] == "arXiv"
        assert entry.data["abstract"] != ""
        assert entry.data["year"] == 2018

        assert "journal" not in entry.data.keys()
        assert "month" not in entry.data.keys()
        assert "number" not in entry.data.keys()
        assert "pages" not in entry.data.keys()
        assert "volume" not in entry.data.keys()

        args = ["-d", "10.1021/acs.chemrev.8b00803", "-l", "Cao2018", "--skip-download", "--update"]
        AddCommand().execute(args)

        if (
            "cobib.parsers.doi",
            logging.ERROR,
            "An Exception occurred while trying to query the DOI: 10.1021/acs.chemrev.8b00803.",
        ) in caplog.record_tuples:
            pytest.skip("The requests API encountered an error. Skipping test.")

        # assert final state
        entry = Database()["Cao2018"]

        assert entry.data["author"].startswith("Yudong Cao")
        assert entry.data["title"].startswith("Quantum Chemistry in the Age of Quantum Computing")
        assert entry.data["arxivid"].startswith("1812.09976")
        assert entry.data["primaryClass"] == "quant-ph"
        assert entry.data["archivePrefix"] == "arXiv"
        assert entry.data["abstract"] != ""

        assert entry.data["journal"] == "Chemical Reviews"
        assert entry.data["doi"] == "10.1021/acs.chemrev.8b00803"
        assert entry.data["month"] == "aug"
        assert entry.data["number"] == 19
        assert entry.data["pages"] == "10856--10915"
        assert entry.data["volume"] == 119
        assert entry.data["year"] == 2019

        if git:
            # assert the git commit message
            # Note: we do not assert the arguments, because they depend on the available parsers
            self.assert_git_commit_message("add", None)

    def test_skip_manual_add_if_exists(self, setup: Any, caplog: pytest.LogCaptureFixture) -> None:
        """Test manual addition is skipped if the label exists already.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            caplog: the built-in pytest fixture.
        """
        AddCommand().execute(["-l", "einstein"])
        assert (
            "cobib.commands.add",
            30,
            "You tried to add a new entry 'einstein' which already exists!",
        ) in caplog.record_tuples
        assert (
            "cobib.commands.add",
            30,
            "Please use `cobib edit einstein` instead!",
        ) in caplog.record_tuples

    def test_continue_after_skip_exists(self, setup: Any, caplog: pytest.LogCaptureFixture) -> None:
        """Test entry addition continues after skipping over existing entry.

        Regression test against #83

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            caplog: the built-in pytest fixture.
        """
        with tempfile.NamedTemporaryFile("w") as file:
            with open(EXAMPLE_DUPLICATE_ENTRY_BIB, "r", encoding="utf-8") as existing:
                file.writelines(existing.readlines())
            file.writelines(["@article{dummy,\nauthor = {Dummy},\n}"])
            file.flush()
            AddCommand().execute(["--skip-existing", "-b", file.name])
        assert (
            "cobib.commands.add",
            30,
            "You tried to add a new entry 'einstein' which already exists!",
        ) in caplog.record_tuples
        assert (
            "cobib.commands.add",
            30,
            "Please use `cobib edit einstein` instead!",
        ) in caplog.record_tuples
        assert (
            "cobib.database.database",
            10,
            "Updating entry dummy",
        ) in caplog.record_tuples

    def test_warning_missing_label(self, setup: Any, caplog: pytest.LogCaptureFixture) -> None:
        """Test warning for missing label and any other input.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            caplog: the built-in pytest fixture.
        """
        AddCommand().execute([""])
        assert (
            "cobib.commands.add",
            40,
            "Neither an input to parse nor a label for manual creation specified!",
        ) in caplog.record_tuples

    @pytest.mark.parametrize(
        ["setup"],
        [
            [{"git": False}],
            [{"git": True}],
        ],
        indirect=["setup"],
    )
    def test_overwrite_label(self, setup: Any) -> None:
        """Test add command while specifying a label manually.

        Regression test against #4.

        The duplicate entry has been adapted to also assert the elongation of Journal names.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
        """
        config.utils.journal_abbreviations = [("Annalen der Physik", "Ann. Phys.")]
        git = setup.get("git", False)
        # add potentially duplicate entry
        AddCommand().execute(["-b", EXAMPLE_DUPLICATE_ENTRY_BIB, "--label", "duplicate_resolver"])

        assert Database()["duplicate_resolver"]

        self._assert(EXAMPLE_DUPLICATE_ENTRY_YAML)

        if git:
            # assert the git commit message
            self.assert_git_commit_message("add", None)

    @pytest.mark.parametrize(
        ["setup"],
        [
            [{"git": False}],
            [{"git": True}],
        ],
        indirect=["setup"],
    )
    def test_configured_label_default(self, setup: Any) -> None:
        """Test add command when a `label_default` is pre-configured.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
        """
        config.database.format.label_default = "{author.split()[1]}{year}"
        git = setup.get("git", False)

        AddCommand().execute(["-b", EXAMPLE_DUPLICATE_ENTRY_BIB])

        assert Database()["Einstein1905"]

        if git:
            # assert the git commit message
            self.assert_git_commit_message("add", None)

    @pytest.mark.parametrize(
        ["setup"],
        [
            [{"git": False}],
            [{"git": True}],
        ],
        indirect=["setup"],
    )
    def test_disambiguate_label(self, setup: Any, caplog: pytest.LogCaptureFixture) -> None:
        """Test label disambiguation if the provided one already exists.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            caplog: the built-in pytest fixture.
        """
        git = setup.get("git", False)

        AddCommand().execute(["-b", EXAMPLE_DUPLICATE_ENTRY_BIB])

        assert (
            "cobib.commands.add",
            30,
            "You tried to add a new entry 'einstein' which already exists!",
        ) in caplog.record_tuples
        assert (
            "cobib.commands.add",
            30,
            "The label will be disambiguated based on the configuration option: "
            "config.database.format.label_suffix",
        ) in caplog.record_tuples

        assert Database()["einstein_a"]

        if git:
            # assert the git commit message
            self.assert_git_commit_message("add", None)

    @pytest.mark.parametrize(
        ["setup"],
        [
            [{"git": False}],
        ],
        indirect=["setup"],
    )
    # other variants are already covered by test_command
    def test_cmdline(self, setup: Any, monkeypatch: pytest.MonkeyPatch) -> None:
        """Test the command-line access of the command.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
            monkeypatch: the built-in pytest fixture.
        """
        self.run_module(monkeypatch, "main", ["cobib", "add", "-b", EXAMPLE_MULTI_FILE_ENTRY_BIB])
        self._assert(EXAMPLE_MULTI_FILE_ENTRY_YAML)

    def test_tui(self, setup: Any) -> None:
        """Test the TUI access of the command.

        Args:
            setup: the `tests.commands.command_test.CommandTest.setup` fixture.
        """

        def assertion(screen, logs, **kwargs):  # type: ignore
            self._assert(EXAMPLE_MULTI_FILE_ENTRY_YAML)

            assert "example_multi_file_entry" in screen.display[1]

            expected_log = [
                ("cobib.commands.add", 10, "Add command triggered from TUI."),
                ("cobib.commands.add", 10, "Starting Add command."),
                (
                    "cobib.commands.add",
                    10,
                    "Adding entries from bibtex: '" + EXAMPLE_MULTI_FILE_ENTRY_BIB + "'.",
                ),
                ("cobib.commands.add", 20, "'example_multi_file_entry' was added to the database."),
                ("cobib.commands.add", 10, "Updating list after Add command."),
            ]
            assert [log for log in logs if log[0] == "cobib.commands.add"] == expected_log

        keys = "a-b " + EXAMPLE_MULTI_FILE_ENTRY_BIB + "\n"
        self.run_tui(keys, assertion, {})

    def test_event_pre_add_command(self, setup: Any) -> None:
        """Tests the PreAddCommand event."""

        @Event.PreAddCommand.subscribe
        def hook(largs: Namespace) -> None:
            largs.label = "dummy"

        assert Event.PreAddCommand.validate()

        AddCommand().execute(["-b", EXAMPLE_DUPLICATE_ENTRY_BIB])

        assert "dummy" in Database().keys()

    def test_event_post_add_command(self, setup: Any) -> None:
        """Tests the PostAddCommand event."""

        @Event.PostAddCommand.subscribe
        def hook(new_entries: Dict[str, Entry]) -> None:
            new_entries["dummy"] = new_entries.pop("einstein_a")

        assert Event.PostAddCommand.validate()

        AddCommand().execute(["-b", EXAMPLE_DUPLICATE_ENTRY_BIB])

        assert "dummy" in Database().keys()
Exemplo n.º 17
0
    def execute(self, args: List[str], out: IO[Any] = sys.stdout) -> None:
        """Redoes the last undone change.

        This command is *only* available if coBib's git-integration has been enabled via
        `config.database.git` *and* initialized properly (see `cobib.commands.init.InitCommand`).
        If that is the case, this command will re-apply the changes *of a previously undone* command
        (see `cobib.commands.undo.UndoCommand`).

        Args:
            args: a sequence of additional arguments used for the execution. The following values
                are allowed for this command:
                    * **no** additional arguments are required for this subcommand!
            out: the output IO stream. This defaults to `sys.stdout`.
        """
        git_tracked = config.database.git
        if not git_tracked:
            msg = (
                "You must enable coBib's git-tracking in order to use the `Redo` command."
                "\nPlease refer to the man-page for more information on how to do so."
            )
            LOGGER.error(msg)
            return

        file = RelPath(config.database.file).path
        root = file.parent
        if not (root / ".git").exists():
            msg = (
                "You have configured, but not initialized coBib's git-tracking."
                "\nPlease consult `cobib init --help` for more information on how to do so."
            )
            LOGGER.error(msg)
            return

        LOGGER.debug("Starting Redo command.")
        parser = ArgumentParser(prog="redo",
                                description="Redo subcommand parser.")

        try:
            # pylint: disable=unused-variable
            largs = parser.parse_args(args)
        except argparse.ArgumentError as exc:
            LOGGER.error(exc.message)
            return

        Event.PreRedoCommand.fire(largs)

        LOGGER.debug("Obtaining git log.")
        lines = subprocess.check_output([
            "git",
            "--no-pager",
            "-C",
            f"{root}",
            "log",
            "--oneline",
            "--no-decorate",
            "--no-abbrev",
        ])
        redone_shas = set()
        for commit in lines.decode().strip().split("\n"):
            LOGGER.debug("Processing commit %s", commit)
            sha, *message = commit.split()
            if message[0] == "Redo":
                # Store already redone commit sha
                LOGGER.debug("Storing redone commit sha: %s", message[-1])
                redone_shas.add(message[-1])
                continue
            if sha in redone_shas:
                LOGGER.info("Skipping %s as it was already redone", sha)
                continue
            if message[0] == "Undo":
                LOGGER.debug("Attempting to redo %s.", sha)
                commands = [
                    f"git -C {root} revert --no-commit {sha}",
                    f"git -C {root} commit --no-gpg-sign --quiet --message 'Redo {sha}'",
                ]
                with subprocess.Popen("; ".join(commands),
                                      shell=True,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE) as redo:
                    redo.communicate()
                    if redo.returncode != 0:
                        LOGGER.error(  # pragma: no cover
                            "Redo was unsuccessful. Please consult the logs and git history of your"
                            " database for more information.")
                    else:
                        # update Database
                        Database().read()
                break
        else:
            msg = "Could not find a commit to redo. You must have undone something first!"
            LOGGER.warning(msg)
            sys.exit(1)

        Event.PostRedoCommand.fire(root, sha)
Exemplo n.º 18
0
    def test_unify_labels(self, git: bool) -> None:
        # pylint: disable=no-self-use
        """Test actual changes of label unification.

        Args:
            git: whether or not git-tracking should be enabled.
        """
        tmp_dir = Path(tempfile.gettempdir()).resolve()
        cobib_test_dir = tmp_dir / "cobib_unify_label_test"
        cobib_test_dir.mkdir(parents=True, exist_ok=True)
        cobib_test_dir_git = cobib_test_dir / ".git"

        database_file = RelPath(cobib_test_dir / "database.yaml")

        copyfile(TestUnifyLabels.REL_PATH.path, database_file.path)
        config.database.file = str(database_file)
        config.database.git = git

        if git:
            commands = [
                f"cd {cobib_test_dir}",
                "git init",
                "git add -- database.yaml",
                "git commit --no-gpg-sign --quiet --message 'Initial commit'",
            ]
            os.system("; ".join(commands))

        try:
            # apply label unification
            shell_helper.unify_labels(["--apply"])

            # assert unified database
            with open(database_file.path, "r", encoding="utf-8") as file:
                with open(
                        RelPath(get_resource("unified_database.yaml",
                                             "utils")).path,
                        "r",
                        encoding="utf-8",
                ) as expected:
                    for line, truth in zip_longest(file.readlines(),
                                                   expected.readlines()):
                        assert line == truth

            # assert git message
            if git:
                with subprocess.Popen(
                    [
                        "git",
                        "-C",
                        cobib_test_dir_git,
                        "show",
                        "--format=format:%B",
                        "--no-patch",
                        "HEAD",
                    ],
                        stdout=subprocess.PIPE,
                ) as proc:
                    message, _ = proc.communicate()
                    # decode it
                    split_msg = message.decode("utf-8").split("\n")
                    if split_msg is None:
                        return
                    # assert subject line
                    assert "Auto-commit: ModifyCommand" in split_msg[0]

        finally:
            rmtree(cobib_test_dir)
            Database().clear()
Exemplo n.º 19
0
    def test_lint_auto_format(self, git: bool) -> None:
        """Test automatic lint formatter.

        Args:
            git: whether or not git-tracking should be enabled.
        """
        tmp_dir = Path(tempfile.gettempdir()).resolve()
        cobib_test_dir = tmp_dir / "cobib_lint_test"
        cobib_test_dir.mkdir(parents=True, exist_ok=True)
        cobib_test_dir_git = cobib_test_dir / ".git"

        database_file = RelPath(cobib_test_dir / "database.yaml")

        copyfile(TestLintDatabase.REL_PATH.path, database_file.path)
        config.database.file = str(database_file)
        config.database.git = git

        if git:
            commands = [
                f"cd {cobib_test_dir}",
                "git init",
                "git add -- database.yaml",
                "git commit --no-gpg-sign --quiet --message 'Initial commit'",
            ]
            os.system("; ".join(commands))

        try:
            # apply linting with formatting and check for the expected lint messages
            args: List[str] = ["--format"]
            pre_lint_messages = shell_helper.lint_database(args)
            expected_messages = [
                "The following lint messages have successfully been resolved:"
            ] + self.EXPECTED
            for msg, truth in zip_longest(pre_lint_messages,
                                          expected_messages):
                if msg.strip() and truth:
                    assert msg == truth.replace(str(TestLintDatabase.REL_PATH),
                                                str(database_file))

            # assert auto-formatted database
            with open(database_file.path, "r", encoding="utf-8") as file:
                with open(get_resource("fixed_database.yaml", "utils"),
                          "r",
                          encoding="utf-8") as expected:
                    for line, truth in zip_longest(file.readlines(),
                                                   expected.readlines()):
                        assert line == truth

            # assert git message
            if git:
                with subprocess.Popen(
                    [
                        "git",
                        "-C",
                        cobib_test_dir_git,
                        "show",
                        "--format=format:%B",
                        "--no-patch",
                        "HEAD",
                    ],
                        stdout=subprocess.PIPE,
                ) as proc:
                    message, _ = proc.communicate()
                    # decode it
                    split_msg = message.decode("utf-8").split("\n")
                    if split_msg is None:
                        return
                    # assert subject line
                    assert "Auto-commit: LintCommand" in split_msg[0]

            # recheck linting and assert no lint messages
            post_lint_messages = shell_helper.lint_database([])
            for msg, exp in zip_longest(
                    post_lint_messages,
                ["Congratulations! Your database triggers no lint messages."]):
                if msg.strip() and exp:
                    assert msg == exp

        finally:
            rmtree(cobib_test_dir)
            Database().clear()
Exemplo n.º 20
0
    def execute(self, args: List[str], out: IO[Any] = sys.stdout) -> None:
        """Opens an entry for manual editing.

        This command opens an `cobib.database.Entry` in YAML format for manual editing.
        The editor program can be configured via `config.commands.edit.editor`.
        By default, this setting will respect your `$EDITOR` environment variable, but fall back to
        using `vim` if that variable is not set.

        Args:
            args: a sequence of additional arguments used for the execution. The following values
                are allowed for this command:
                    * `label`: the label of the entry to edit.
                    * `-a`, `--add`: if specified, allows adding new entries for non-existent
                      labels. The default entry type of this new entry can be configured via
                      `config.commands.edit.default_entry_type`.
            out: the output IO stream. This defaults to `sys.stdout`.
        """
        LOGGER.debug("Starting Edit command.")
        parser = ArgumentParser(prog="edit",
                                description="Edit subcommand parser.")
        parser.add_argument("label", type=str, help="label of the entry")
        parser.add_argument(
            "-a",
            "--add",
            action="store_true",
            help="if specified, will add a new entry for unknown labels",
        )
        parser.add_argument("--preserve-files",
                            action="store_true",
                            help="do not rename associated files")

        if not args:
            parser.print_usage(sys.stderr)
            sys.exit(1)

        try:
            largs = parser.parse_args(args)
        except argparse.ArgumentError as exc:
            LOGGER.error(exc.message)
            return

        Event.PreEditCommand.fire(largs)

        yml = YAMLParser()

        bib = Database()

        try:
            entry = bib[largs.label]
            prv = yml.dump(entry)
            if largs.add:
                LOGGER.warning(
                    "Entry '%s' already exists! Ignoring the `--add` argument.",
                    largs.label)
                largs.add = False
        except KeyError:
            # No entry for given label found
            if largs.add:
                # add a new entry for the unknown label
                entry = Entry(
                    largs.label,
                    {"ENTRYTYPE": config.commands.edit.default_entry_type},
                )
                prv = yml.dump(entry)
            else:
                msg = (
                    f"No entry with the label '{largs.label}' could be found."
                    "\nUse `--add` to add a new entry with this label.")
                LOGGER.error(msg)
                return
        if prv is None:
            # No entry found to be edited. This should never occur unless the YAMLParser experiences
            # an unexpected error.
            return

        LOGGER.debug("Creating temporary file.")
        with tempfile.NamedTemporaryFile(mode="w+",
                                         prefix="cobib-",
                                         suffix=".yaml") as tmp_file:
            tmp_file_name = tmp_file.name
            tmp_file.write(prv)
            tmp_file.flush()
            LOGGER.debug('Starting editor "%s".', config.commands.edit.editor)
            status = os.system(config.commands.edit.editor + " " +
                               tmp_file.name)
            assert status == 0
            LOGGER.debug("Editor finished successfully.")
            new_entries = YAMLParser().parse(tmp_file.name)
            new_entry = list(new_entries.values())[0]
        assert not Path(tmp_file_name).exists()
        if entry == new_entry and not largs.add:
            LOGGER.info("No changes detected.")
            return

        bib.update({new_entry.label: new_entry})
        if new_entry.label != largs.label:
            bib.rename(largs.label, new_entry.label)
            if not largs.preserve_files:
                new_files = []
                for file in new_entry.file:
                    path = RelPath(file)
                    if path.path.stem == largs.label:
                        LOGGER.info("Also renaming associated file '%s'.",
                                    str(path))
                        target = RelPath(path.path.parent /
                                         f"{new_entry.label}.pdf")
                        if target.path.exists():
                            LOGGER.warning(
                                "Found conflicting file, not renaming '%s'.",
                                str(path))
                        else:
                            path.path.rename(target.path)
                            new_files.append(str(target))
                            continue
                    new_files.append(file)
                new_entry.file = new_files

        Event.PostEditCommand.fire(new_entry)
        bib.save()

        self.git(args=vars(largs))

        msg = f"'{largs.label}' was successfully edited."
        LOGGER.info(msg)
Exemplo n.º 21
0
    def save(cls) -> None:
        """Saves all unsaved entries.

        This uses `cobib.parsers.YAMLParser` to save all entries in `Database._unsaved_entries` to
        disc. In doing so, this function preserves the order of the entries in the database file by
        overwriting changed entries in-place and appending new entries to the end of the file.

        The method of determining whether an entry was added, changed or removed is the following:
        1. we read in the current database as written to disc.
        2. we iterate all lines and determine the label of the entry we are currently on.
        3. if this label is not in `Database._unsaved_entries` we continue.
        4. Otherwise we query the runtime `Database` instance for the new contents of the unsaved
           (and therefore changed) entry and remove the label from `Database._unsaved_entries`.
        5. Using `Entry.save` and a `cobib.parsers.YAMLParser` we overwrite the previous lines of
           the changed entry.
        6. Finally, all labels still left in `Database._unsaved_entries` are newly added entries and
           can simply be appended to the file.

        In order to optimize performance and IO access, all of the above is done with a single call
        to `write`.
        """
        if cls._instance is None:
            cls()
        _instance = cast(Database, cls._instance)

        # pylint: disable=import-outside-toplevel
        from cobib.parsers.yaml import YAMLParser

        yml = YAMLParser()

        file = RelPath(config.database.file).path
        with open(file, "r", encoding="utf-8") as bib:
            lines = bib.readlines()

        label_regex = re.compile(r"^([^:]+):$")

        overwrite = False
        cur_label: str = ""
        buffer: List[str] = []
        for line in lines:
            try:
                matches = label_regex.match(line)
                if matches is None:
                    raise AttributeError
                new_label = matches.groups()[0]
                if new_label in cls._unsaved_entries:
                    LOGGER.debug(
                        'Entry "%s" found. Starting to replace lines.',
                        new_label)
                    overwrite = True
                    cur_label = new_label
                    continue
            except AttributeError:
                pass
            if overwrite and line.startswith("..."):
                LOGGER.debug('Reached end of entry "%s".', cur_label)
                overwrite = False

                new_label = cls._unsaved_entries.pop(cur_label)
                entry = _instance.get(new_label, None)
                if entry:
                    LOGGER.debug('Writing modified entry "%s".', new_label)
                    entry_str = entry.save(parser=yml)
                    buffer.append("\n".join(entry_str.split("\n")[1:]))
                else:
                    # Entry has been deleted. Pop the previous `---` line.
                    LOGGER.debug('Deleting entry "%s".', new_label)
                    buffer.pop()
                # we pop `new_label` too, because in case of a rename it differs from `cur_label`
                if new_label is not None:
                    cls._unsaved_entries.pop(new_label, None)
            elif not overwrite:
                # keep previous line
                buffer.append(line)

        if cls._unsaved_entries:
            for label in cls._unsaved_entries.copy().values():
                if label is None:
                    # should never occur but we avoid a type exception
                    continue
                LOGGER.debug('Adding new entry "%s".', label)
                entry_str = _instance[label].save(parser=yml)
                buffer.append(entry_str)
                cls._unsaved_entries.pop(label)

        with open(file, "w", encoding="utf-8") as bib:
            for line in buffer:
                bib.write(line)
Exemplo n.º 22
0
    def execute(self, args: List[str], out: IO[Any] = sys.stdout) -> None:
        """Undoes the last change.

        This command is *only* available if coBib's git-integration has been enabled via
        `config.database.git` *and* initialized properly (see `cobib.commands.init.InitCommand`).
        If that is the case, this command will undo the changes of a previous command.
        Note, that this *only* applies to commands whose changes have been committed by coBib
        *automatically*.
        This is a safety measure which you can disable by setting the `--force` argument.

        Args:
            args: a sequence of additional arguments used for the execution. The following values
                are allowed for this command:
                    * `-f`, `--force`: if specified, this will also revert changes which have *not*
                      been auto-committed by coBib.
            out: the output IO stream. This defaults to `sys.stdout`.
        """
        git_tracked = config.database.git
        if not git_tracked:
            msg = (
                "You must enable coBib's git-tracking in order to use the `Undo` command."
                "\nPlease refer to the man-page for more information on how to do so."
            )
            LOGGER.error(msg)
            return

        file = RelPath(config.database.file).path
        root = file.parent
        if not (root / ".git").exists():
            msg = (
                "You have configured, but not initialized coBib's git-tracking."
                "\nPlease consult `cobib init --help` for more information on how to do so."
            )
            LOGGER.error(msg)
            return

        LOGGER.debug("Starting Undo command.")
        parser = ArgumentParser(prog="undo",
                                description="Undo subcommand parser.")
        parser.add_argument("-f",
                            "--force",
                            action="store_true",
                            help="allow undoing non auto-committed changes")

        try:
            largs = parser.parse_args(args)
        except argparse.ArgumentError as exc:
            LOGGER.error(exc.message)
            return

        Event.PreUndoCommand.fire(largs)

        LOGGER.debug("Obtaining git log.")
        lines = subprocess.check_output([
            "git",
            "--no-pager",
            "-C",
            f"{root}",
            "log",
            "--oneline",
            "--no-decorate",
            "--no-abbrev",
        ])
        undone_shas = set()
        for commit in lines.decode().strip().split("\n"):
            LOGGER.debug("Processing commit %s", commit)
            sha, *message = commit.split()
            if message[0] == "Undo":
                # Store already undone commit sha
                LOGGER.debug("Storing undone commit sha: %s", message[-1])
                undone_shas.add(message[-1])
                continue
            if sha in undone_shas:
                LOGGER.info("Skipping %s as it was already undone", sha)
                continue
            if largs.force or (message[0] == "Auto-commit:"
                               and message[-1] != "InitCommand"):
                # we undo a commit if and only if:
                #  - the `force` argument is specified OR
                #  - the commit is an `auto-committed` change which is NOT from `InitCommand`
                LOGGER.debug("Attempting to undo %s.", sha)
                commands = [
                    f"git -C {root} revert --no-commit {sha}",
                    f"git -C {root} commit --no-gpg-sign --quiet --message 'Undo {sha}'",
                ]
                with subprocess.Popen("; ".join(commands),
                                      shell=True,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE) as undo:
                    undo.communicate()
                    if undo.returncode != 0:
                        LOGGER.error(  # pragma: no cover
                            "Undo was unsuccessful. Please consult the logs and git history of your"
                            " database for more information.")
                    else:
                        # update Database
                        Database().read()
                break
        else:
            msg = "Could not find a commit to undo. Please commit something first!"
            LOGGER.warning(msg)
            sys.exit(1)

        Event.PostUndoCommand.fire(root, sha)
Exemplo n.º 23
0
    def search(self,
               query: str,
               context: int = 1,
               ignore_case: bool = False) -> List[List[str]]:
        """Search entry contents for the query string.

        The entry will *always* be converted to a searchable string using the
        `cobib.parsers.BibtexParser.dump` method. This text will then be search for `query` which
        will be interpreted as a regex pattern.
        If a `file` is associated with this entry, the search will try its best to recursively query
        its contents, too. However, the success of this depends highly on the configured search
        tool, `config.commands.search.grep`.

        Args:
            query: the text to search for.
            context: the number of context lines to provide for each match. This behaves similarly
                to the *Context Line Control* available for the UNIX `grep` command (`--context`).
            ignore_case: if True, the search will be case-*in*sensitive.

        Returns:
            A list of lists containing the context for each match associated with this entry.
        """
        LOGGER.debug("Searching entry %s for %s.", self.label, query)
        matches: List[List[str]] = []
        # pylint: disable=import-outside-toplevel,cyclic-import
        from cobib.parsers.bibtex import BibtexParser

        bibtex = BibtexParser().dump(self).split("\n")
        re_flags = re.IGNORECASE if ignore_case else 0
        re_compiled = re.compile(rf"{query}", flags=re_flags)
        for idx, line in enumerate(bibtex):
            if re_compiled.search(line):
                # add new match
                matches.append([])
                # upper context; (we iterate in reverse in order to ensure that we abort on the
                # first previous occurrence of the query pattern)
                for string in reversed(
                        bibtex[max(idx - context, 0):min(idx, len(bibtex))]):
                    if re_compiled.search(string):
                        break
                    matches[-1].insert(0, string)
                # matching line itself
                matches[-1].append(line)
                # lower context
                for string in bibtex[max(idx +
                                         1, 0):min(idx + context +
                                                   1, len(bibtex))]:
                    if re_compiled.search(string):
                        break
                    matches[-1].append(string)

        for file_ in self.file:
            grep_prog = config.commands.search.grep
            LOGGER.debug("Searching associated file %s with %s", file_,
                         grep_prog)
            with subprocess.Popen(
                [
                    grep_prog,
                    *config.commands.search.grep_args,
                    f"-C{context}",
                    query,
                    RelPath(file_).path,
                ],
                    stdout=subprocess.PIPE,
            ) as grep:
                if grep.stdout is None:
                    continue
                stdout = grep.stdout
                # extract results
                results = stdout.read().decode().split("\n--\n")
            for match in results:
                if match:
                    matches.append([
                        line.strip() for line in match.split("\n")
                        if line.strip()
                    ])

        return matches
Exemplo n.º 24
0
class TestLintDatabase(ShellHelperTest):
    """Tests for the shell helper which lints the users database."""

    COMMAND = "lint_database"
    REL_PATH = RelPath(get_resource("linting_database.yaml", "utils"))
    EXPECTED = [
        f"{REL_PATH}:5 Converted the field 'file' of entry 'dummy' to a list. You can consider "
        "storing it as such directly.",
        f"{REL_PATH}:6 Converting field 'month' of entry 'dummy' from '8' to 'aug'.",
        f"{REL_PATH}:7 Converting field 'number' of entry 'dummy' to integer: 1.",
        f"{REL_PATH}:8 Converted the field 'tags' of entry 'dummy' to a list. You can consider "
        "storing it as such directly.",
        f"{REL_PATH}:9 Converted the field 'url' of entry 'dummy' to a list. You can consider "
        "storing it as such directly.",
        f"{REL_PATH}:4 The field 'ID' of entry 'dummy' is no longer required. It will be inferred "
        "from the entry label.",
    ]

    @staticmethod
    @pytest.fixture(autouse=True)
    def setup() -> None:
        """Set linting database path.

        This fixture is automatically enabled for all tests in this class.
        """
        config.defaults()
        config.database.file = str(TestLintDatabase.REL_PATH)

    def _assert(self, out: str) -> None:
        for msg, truth in zip_longest(out.split("\n"), self.EXPECTED):
            if msg.strip() and truth:
                assert msg == truth

    # pylint: disable=no-self-use
    def test_no_lint_warnings(self) -> None:
        """Test the case of no raised lint warnings."""
        config.load(get_resource("debug.py"))
        args: List[str] = []
        lint_messages = shell_helper.lint_database(args)
        for msg, exp in zip_longest(
                lint_messages,
            ["Congratulations! Your database triggers no lint messages."]):
            if msg.strip() and exp:
                assert msg == exp

    @pytest.mark.parametrize("git", [False, True])
    def test_lint_auto_format(self, git: bool) -> None:
        """Test automatic lint formatter.

        Args:
            git: whether or not git-tracking should be enabled.
        """
        tmp_dir = Path(tempfile.gettempdir()).resolve()
        cobib_test_dir = tmp_dir / "cobib_lint_test"
        cobib_test_dir.mkdir(parents=True, exist_ok=True)
        cobib_test_dir_git = cobib_test_dir / ".git"

        database_file = RelPath(cobib_test_dir / "database.yaml")

        copyfile(TestLintDatabase.REL_PATH.path, database_file.path)
        config.database.file = str(database_file)
        config.database.git = git

        if git:
            commands = [
                f"cd {cobib_test_dir}",
                "git init",
                "git add -- database.yaml",
                "git commit --no-gpg-sign --quiet --message 'Initial commit'",
            ]
            os.system("; ".join(commands))

        try:
            # apply linting with formatting and check for the expected lint messages
            args: List[str] = ["--format"]
            pre_lint_messages = shell_helper.lint_database(args)
            expected_messages = [
                "The following lint messages have successfully been resolved:"
            ] + self.EXPECTED
            for msg, truth in zip_longest(pre_lint_messages,
                                          expected_messages):
                if msg.strip() and truth:
                    assert msg == truth.replace(str(TestLintDatabase.REL_PATH),
                                                str(database_file))

            # assert auto-formatted database
            with open(database_file.path, "r", encoding="utf-8") as file:
                with open(get_resource("fixed_database.yaml", "utils"),
                          "r",
                          encoding="utf-8") as expected:
                    for line, truth in zip_longest(file.readlines(),
                                                   expected.readlines()):
                        assert line == truth

            # assert git message
            if git:
                with subprocess.Popen(
                    [
                        "git",
                        "-C",
                        cobib_test_dir_git,
                        "show",
                        "--format=format:%B",
                        "--no-patch",
                        "HEAD",
                    ],
                        stdout=subprocess.PIPE,
                ) as proc:
                    message, _ = proc.communicate()
                    # decode it
                    split_msg = message.decode("utf-8").split("\n")
                    if split_msg is None:
                        return
                    # assert subject line
                    assert "Auto-commit: LintCommand" in split_msg[0]

            # recheck linting and assert no lint messages
            post_lint_messages = shell_helper.lint_database([])
            for msg, exp in zip_longest(
                    post_lint_messages,
                ["Congratulations! Your database triggers no lint messages."]):
                if msg.strip() and exp:
                    assert msg == exp

        finally:
            rmtree(cobib_test_dir)
            Database().clear()
Exemplo n.º 25
0
    def execute(self, args: List[str], out: IO[Any] = sys.stdout) -> None:
        """Modifies multiple entries in bulk.

        This command allows bulk modification of multiple entries.
        It takes a modification in the form `<field>:<value>` and will overwrite the `field` of all
        matching entries with the new `value`.
        The entries can be specified as a manual selection (when using `--selection` or the visual
        selection of the TUI) or through filters (see also `cobib.commands.list`).

        Args:
            args: a sequence of additional arguments used for the execution. The following values
                are allowed for this command:
                    * `modification`: a string conforming to `<field>:<value>` indicating the
                      modification that should be applied to all matching entries. By default, the
                      modification will overwrite any existing data in the specified `field` with
                      the new `value`. For more information about formatting options of `<value>`
                      refer to the module documentation or the man-page.
                    * `--dry`: run in "dry"-mode which lists modifications without applying them.
                    * `-a`, `--add`: when specified, the modification's value will be added to the
                      entry's field rather than overwrite it. If the field in question is numeric,
                      the numbers will be added.
                    * `-s`, `--selection`: when specified, the positional arguments will *not* be
                      interpreted as filters but rather as a direct list of entry labels. This can
                      be used on the command-line but is mainly meant for the TUIs visual selection
                      interface (hence the name).
                    * in addition to the above, you can add `filters` to specify a subset of your
                      database for exporting. For more information refer to `cobib.commands.list`.
            out: the output IO stream. This defaults to `sys.stdout`.
        """
        LOGGER.debug("Starting Modify command.")
        parser = ArgumentParser(prog="modify",
                                description="Modify subcommand parser.")
        parser.add_argument(
            "modification",
            type=self.field_value_pair,
            help="Modification to apply to the specified entries."
            "\nThis argument must be a string formatted as <field>:<value> where field can be any "
            "field of the entries and value can be any string which should be placed in that "
            "field. Be sure to escape this field-value pair properly, especially if the value "
            "contains spaces.",
        )
        parser.add_argument(
            "--dry",
            action="store_true",
            help=
            "Run in 'dry'-mode, listing modifications without actually applying them.",
        )
        parser.add_argument(
            "-a",
            "--add",
            action="store_true",
            help="Adds to the modified field rather than overwriting it.",
        )
        parser.add_argument(
            "-s",
            "--selection",
            action="store_true",
            help=
            "When specified, the `filter` argument will be interpreted as a list of entry "
            "labels rather than arguments for the `list` command.",
        )
        parser.add_argument(
            "filter",
            nargs="+",
            help=
            "You can specify filters as used by the `list` command in order to select a "
            "subset of labels to be modified. To ensure this works as expected you should add the "
            "pseudo-argument '--' before the list of filters. See also `list --help` for more "
            "information.",
        )
        parser.add_argument("--preserve-files",
                            action="store_true",
                            help="do not rename associated files")

        if not args:
            parser.print_usage(sys.stderr)
            sys.exit(1)

        try:
            largs = parser.parse_intermixed_args(args)
        except argparse.ArgumentError as exc:
            LOGGER.error(exc.message)
            return

        Event.PreModifyCommand.fire(largs)

        info_handler: logging.Handler
        if largs.dry:
            info_handler = get_stream_handler(logging.INFO)

            class ModifyInfoFilter(logging.Filter):
                """A logging filter to only print ModifyCommand INFO messages."""
                def filter(self, record: logging.LogRecord) -> bool:
                    return record.name == "cobib.commands.modify" and record.levelname == "INFO"

            info_handler.addFilter(ModifyInfoFilter())
            LOGGER.addHandler(info_handler)

        if largs.selection:
            LOGGER.info(
                "Selection given. Interpreting `filter` as a list of labels")
            labels = largs.filter
        else:
            LOGGER.debug("Gathering filtered list of entries to be modified.")
            with open(os.devnull, "w", encoding="utf-8") as devnull:
                labels = ListCommand().execute(largs.filter, out=devnull)

        field, value = largs.modification

        bib = Database()

        for label in labels:  # pylint: disable=too-many-nested-blocks
            try:
                entry = bib[label]
                local_value = evaluate_as_f_string(value, {
                    "label": label,
                    **entry.data.copy()
                })

                if hasattr(entry, field):
                    prev_value = getattr(entry, field, None)
                else:
                    prev_value = entry.data.get(field, None)

                if not largs.add:
                    new_value = local_value
                    if local_value.isnumeric():
                        new_value = int(local_value)  # type: ignore
                else:
                    try:
                        if prev_value is None:
                            new_value = local_value
                        elif isinstance(prev_value, str):
                            new_value = prev_value + local_value
                        elif isinstance(prev_value, list):
                            new_value = prev_value + [local_value
                                                      ]  # type: ignore
                        elif isinstance(prev_value, int):
                            if local_value.isnumeric():
                                new_value = prev_value + int(
                                    local_value)  # type: ignore
                            else:
                                raise TypeError
                        else:
                            raise TypeError
                    except TypeError:
                        LOGGER.warning(
                            "Encountered an unexpected field type to add to. Converting the field "
                            "'%s' of entry '%s' to a simple string: '%s'.",
                            field,
                            label,
                            str(prev_value) + local_value,
                        )
                        new_value = str(prev_value) + local_value

                # guard against overwriting existing data if label gets changed
                if field == "label":
                    new_value = bib.disambiguate_label(new_value, entry)

                if new_value == prev_value:
                    LOGGER.info(
                        "New and previous values match. Skipping modification of entry '%s'.",
                        label)
                    continue

                if hasattr(entry, field):
                    if largs.dry:
                        LOGGER.info(
                            "%s: changing field '%s' from %s to %s",
                            entry.label,
                            field,
                            getattr(entry, field),
                            new_value,
                        )
                    setattr(entry, field, new_value)
                else:
                    if largs.dry:
                        LOGGER.info(
                            "%s: adding field '%s' = %s",
                            entry.label,
                            field,
                            new_value,
                        )
                    entry.data[field] = new_value

                bib.update({entry.label: entry})

                if entry.label != label:
                    bib.rename(label, entry.label)
                    if not largs.preserve_files:
                        new_files = []
                        for file in entry.file:
                            path = RelPath(file)
                            if path.path.stem == label:
                                LOGGER.info(
                                    "Also renaming associated file '%s'.",
                                    str(path))
                                target = RelPath(path.path.parent /
                                                 f"{entry.label}.pdf")
                                if target.path.exists():
                                    LOGGER.warning(
                                        "Found conflicting file, not renaming '%s'.",
                                        str(path))
                                else:
                                    if largs.dry:
                                        LOGGER.info(
                                            "%s: renaming associated file '%s' to '%s'",
                                            entry.label,
                                            path.path,
                                            target.path,
                                        )
                                    else:
                                        path.path.rename(target.path)
                                        new_files.append(str(target))
                                    continue
                            if not largs.dry:
                                new_files.append(file)
                        if not largs.dry and new_files:
                            entry.file = new_files

                if not largs.dry:
                    msg = f"'{label}' was modified."
                    LOGGER.info(msg)
            except KeyError:
                msg = f"No entry with the label '{label}' could be found."
                LOGGER.warning(msg)

        Event.PostModifyCommand.fire(labels, largs.dry)

        if largs.dry:
            LOGGER.removeHandler(info_handler)
            # read also functions as a restoring method
            bib.read()
        else:
            bib.save()
            self.git(args=vars(largs))
Exemplo n.º 26
0
class TestUnifyLabels(ShellHelperTest):
    """Tests for the shell helper which unifies all database labels."""

    COMMAND = "unify_labels"
    REL_PATH = RelPath(get_resource("unifying_database.yaml", "utils"))
    EXPECTED: List[str] = [
        "[INFO] einstein: changing field 'label' from einstein to Einstein1905_a",
        "[INFO] latexcompanion: changing field 'label' from latexcompanion to Goossens1993",
        "[INFO] knuthwebsite: changing field 'label' from knuthwebsite to Knuth",
        "[INFO] Einstein_1905: changing field 'label' from Einstein_1905 to Einstein1905_b",
        "[INFO] New and previous values match. Skipping modification of entry 'Einstein1905'.",
        "[INFO] einstein_2: changing field 'label' from einstein_2 to Einstein1905_c",
        "[INFO] New and previous values match. Skipping modification of entry 'Author2021'.",
    ]

    @staticmethod
    @pytest.fixture(autouse=True)
    def setup() -> None:
        """Set linting database path.

        This fixture is automatically enabled for all tests in this class.
        """
        config.defaults()
        config.database.format.label_default = "{author.split()[1]}{year}"
        config.database.file = str(TestUnifyLabels.REL_PATH)

    def _assert(self, out: str) -> None:
        filtered = [
            line for line in out.split("\n") if line.startswith("[INFO]")
        ]
        for msg, truth in zip_longest(filtered, self.EXPECTED):
            if msg.strip() and truth:
                assert msg == truth

    @pytest.mark.parametrize("git", [False, True])
    def test_unify_labels(self, git: bool) -> None:
        # pylint: disable=no-self-use
        """Test actual changes of label unification.

        Args:
            git: whether or not git-tracking should be enabled.
        """
        tmp_dir = Path(tempfile.gettempdir()).resolve()
        cobib_test_dir = tmp_dir / "cobib_unify_label_test"
        cobib_test_dir.mkdir(parents=True, exist_ok=True)
        cobib_test_dir_git = cobib_test_dir / ".git"

        database_file = RelPath(cobib_test_dir / "database.yaml")

        copyfile(TestUnifyLabels.REL_PATH.path, database_file.path)
        config.database.file = str(database_file)
        config.database.git = git

        if git:
            commands = [
                f"cd {cobib_test_dir}",
                "git init",
                "git add -- database.yaml",
                "git commit --no-gpg-sign --quiet --message 'Initial commit'",
            ]
            os.system("; ".join(commands))

        try:
            # apply label unification
            shell_helper.unify_labels(["--apply"])

            # assert unified database
            with open(database_file.path, "r", encoding="utf-8") as file:
                with open(
                        RelPath(get_resource("unified_database.yaml",
                                             "utils")).path,
                        "r",
                        encoding="utf-8",
                ) as expected:
                    for line, truth in zip_longest(file.readlines(),
                                                   expected.readlines()):
                        assert line == truth

            # assert git message
            if git:
                with subprocess.Popen(
                    [
                        "git",
                        "-C",
                        cobib_test_dir_git,
                        "show",
                        "--format=format:%B",
                        "--no-patch",
                        "HEAD",
                    ],
                        stdout=subprocess.PIPE,
                ) as proc:
                    message, _ = proc.communicate()
                    # decode it
                    split_msg = message.decode("utf-8").split("\n")
                    if split_msg is None:
                        return
                    # assert subject line
                    assert "Auto-commit: ModifyCommand" in split_msg[0]

        finally:
            rmtree(cobib_test_dir)
            Database().clear()
Exemplo n.º 27
0
    def execute(self, args: List[str], out: IO[Any] = sys.stdout) -> None:
        """Initializes the database.

        Initializes the YAML database in the location specified by `config.database.file`.
        If you enabled `config.database.git` *and* you specify the `--git` command-line argument,
        the git-integration will be initialized, too.

        Args:
            args: a sequence of additional arguments used for the execution. The following values
                are allowed for this command:
                    * `-g`, `--git`: initializes the git-integration.
            out: the output IO stream. This defaults to `sys.stdout`.
        """
        LOGGER.debug("Starting Init command.")
        parser = ArgumentParser(prog="init",
                                description="Init subcommand parser.")
        parser.add_argument("-g",
                            "--git",
                            action="store_true",
                            help="initialize git repository")

        try:
            largs = parser.parse_args(args)
        except argparse.ArgumentError as exc:
            LOGGER.error(exc.message)
            return

        Event.PreInitCommand.fire(largs)

        file = RelPath(config.database.file).path
        root = file.parent

        file_exists = file.exists()
        git_tracked = (root / ".git").exists()

        if file_exists:
            if git_tracked:
                msg = (
                    "Database file already exists and is being tracked by git. There is nothing "
                    "else to do.")
                LOGGER.info(msg)
                return

            if not git_tracked and not largs.git:
                msg = "Database file already exists! Use --git to start tracking it with git."
                LOGGER.warning(msg)
                return

        else:
            LOGGER.debug('Creating path for database file: "%s"', root)
            root.mkdir(parents=True, exist_ok=True)

            LOGGER.debug('Creating empty database file: "%s"', file)
            open(file, "w", encoding="utf-8").close()  # pylint: disable=consider-using-with

        if largs.git:
            if not config.database.git:
                msg = (
                    "You are about to initialize the git tracking of your database, but this will "
                    "only have effect if you also enable the DATABASE/git setting in your "
                    "configuration file!")
                LOGGER.warning(msg)
            # First, check whether git is configured correctly.
            print("Checking `git config --get user.name`:",
                  end=" ",
                  flush=True)
            name_set = os.system("git config --get user.name")
            print()
            print("Checking `git config --get user.email`:",
                  end=" ",
                  flush=True)
            email_set = os.system("git config --get user.email")
            print()
            if name_set != 0 or email_set != 0:
                msg = (
                    "In order to use git you must configure your name and email first! For more "
                    "information please consult `man gittutorial`.")
                LOGGER.warning(msg)
                sys.exit(1)
            LOGGER.debug('Initializing git repository in "%s"', root)
            os.system(f"git init {root}")
            self.git(args=vars(largs), force=True)

        Event.PostInitCommand.fire(root, file)
Exemplo n.º 28
0
    def execute(self, args: List[str], out: IO[Any] = sys.stdout) -> None:
        """Exports the database.

        This command exports the database (or a selected subset of entries).
        You can choose the exported formats from the following list:
        * BibLaTex (via the `--bibtex` argument)
        * Zip archive (via the `--zip` argument)

        Args:
            args: a sequence of additional arguments used for the execution. The following values
                are allowed for this command:
                    * `-b`, `--bibtex`: specifies a BibLaTex filename into which to export.
                    * `-z`, `--zip`: specifies a Zip-filename into which to export associated files.
                    * `-a`, `--abbreviate`: abbreviate the Journal names before exporting. See also
                      `config.utils.journal_abbreviations`.
                    * `--dotless`: remove punctuation from the Journal abbreviations.
                    * `-s`, `--selection`: when specified, the positional arguments will *not* be
                      interpreted as filters but rather as a direct list of entry labels. This can
                      be used on the command-line but is mainly meant for the TUIs visual selection
                      interface (hence the name).
                    * in addition to the above, you can add `filters` to specify a subset of your
                      database for exporting. For more information refer to `cobib.commands.list`.
            out: the output IO stream. This defaults to `sys.stdout`.
        """
        LOGGER.debug("Starting Export command.")
        parser = ArgumentParser(prog="export", description="Export subcommand parser.")
        parser.add_argument(
            "-b", "--bibtex", type=argparse.FileType("a"), help="BibLaTeX output file"
        )
        parser.add_argument("-z", "--zip", type=argparse.FileType("a"), help="zip output file")
        parser.add_argument(
            "-s",
            "--selection",
            action="store_true",
            help="When specified, the `filter` argument will be interpreted as a list of entry "
            "labels rather than arguments for the `list` command.",
        )
        parser.add_argument(
            "filter",
            nargs="*",
            help="You can specify filters as used by the `list` command in order to select a "
            "subset of labels to be modified. To ensure this works as expected you should add the "
            "pseudo-argument '--' before the list of filters. See also `list --help` for more "
            "information.",
        )
        parser.add_argument(
            "-a", "--abbreviate", action="store_true", help="Abbreviate journal names"
        )
        parser.add_argument(
            "--dotless", action="store_true", help="Remove punctuation from journal abbreviations"
        )

        if not args:
            parser.print_usage(sys.stderr)
            sys.exit(1)

        try:
            largs = parser.parse_intermixed_args(args)
        except argparse.ArgumentError as exc:
            LOGGER.error(exc.message)
            return

        Event.PreExportCommand.fire(largs)

        if largs.bibtex is None and largs.zip is None:
            msg = "No output file specified!"
            LOGGER.error(msg)
            return
        if largs.zip is not None:
            largs.zip = ZipFile(largs.zip.name, "w")  # pylint: disable=consider-using-with

        if largs.selection:
            LOGGER.info("Selection given. Interpreting `filter` as a list of labels")
            labels = largs.filter
        else:
            LOGGER.debug("Gathering filtered list of entries to be exported.")
            with open(os.devnull, "w", encoding="utf-8") as devnull:
                labels = ListCommand().execute(largs.filter, out=devnull)

        bibtex_parser = BibtexParser()

        bib = Database()

        for label in labels:
            try:
                LOGGER.info('Exporting entry "%s".', label)
                entry = bib[label]
                if largs.bibtex is not None:
                    if largs.abbreviate and "journal" in entry.data.keys():
                        entry.data["journal"] = JournalAbbreviations.abbreviate(
                            entry.data["journal"], dotless=largs.dotless
                        )
                    entry_str = bibtex_parser.dump(entry)
                    largs.bibtex.write(entry_str)
                if largs.zip is not None:
                    if "file" in entry.data.keys() and entry.file is not None:
                        files = entry.file
                        if not isinstance(files, list):
                            files = [files]
                        for file in files:
                            path = RelPath(file).path
                            LOGGER.debug(
                                'Adding "%s" associated with "%s" to the zip file.', path, label
                            )
                            largs.zip.write(path, path.name)
            except KeyError:
                msg = f"No entry with the label '{label}' could be found."
                LOGGER.warning(msg)

        Event.PostExportCommand.fire(labels, largs)

        if largs.zip is not None:
            largs.zip.close()
Exemplo n.º 29
0
    def execute(self, args: List[str], out: IO[Any] = sys.stdout) -> None:
        """Deletes an entry.

        This command deletes one (or multiple) entries from the database.

        Args:
            args: a sequence of additional arguments used for the execution. The following values
                are allowed for this command:
                    * `labels`: one (or multiple) labels of the entries to be deleted.
            out: the output IO stream. This defaults to `sys.stdout`.
        """
        LOGGER.debug("Starting Delete command.")
        parser = ArgumentParser(prog="delete",
                                description="Delete subcommand parser.")
        parser.add_argument("labels",
                            type=str,
                            nargs="+",
                            help="labels of the entries")
        parser.add_argument("--preserve-files",
                            action="store_true",
                            help="do not delete associated files")

        if not args:
            parser.print_usage(sys.stderr)
            sys.exit(1)

        try:
            largs = parser.parse_args(args)
        except argparse.ArgumentError as exc:
            LOGGER.error(exc.message)
            return

        Event.PreDeleteCommand.fire(largs)

        deleted_entries = set()

        bib = Database()
        for label in largs.labels:
            try:
                LOGGER.debug("Attempting to delete entry '%s'.", label)
                entry = bib.pop(label)
                if not largs.preserve_files:
                    for file in entry.file:
                        path = RelPath(file)
                        try:
                            LOGGER.debug(
                                "Attempting to remove associated file '%s'.",
                                str(path))
                            os.remove(path.path)
                        except FileNotFoundError:
                            pass

                deleted_entries.add(label)
            except KeyError:
                pass

        Event.PostDeleteCommand.fire(deleted_entries)
        bib.save()

        self.git(args=vars(largs))

        for label in deleted_entries:
            msg = f"'{label}' was removed from the database."
            LOGGER.info(msg)