Exemplo n.º 1
0
def test_prepare_package_bad_file():
    "Tests that the prepare_package function fails for unknown files"

    err = ErrorBundle()
    submain.prepare_package(err, "tests/resources/main/foo.bar")

    assert err.failed()
Exemplo n.º 2
0
def test_prepare_package_webapp(fake_webapp_validator):
    err = ErrorBundle()
    package = "tests/resources/main/mozball.webapp"
    submain.prepare_package(err, package)
    assert not err.failed()

    fake_webapp_validator.assert_called_with(err, package)
def test_prepare_package_missing():
    "Tests that the prepare_package function fails when file is not found"

    err = ErrorBundle()
    submain.prepare_package(err, "foo/bar/asdf/qwerty.xyz")

    assert err.failed()
Exemplo n.º 4
0
def test_prepare_package_missing():
    "Tests that the prepare_package function fails when file is not found"

    err = ErrorBundle()
    submain.prepare_package(err, "foo/bar/asdf/qwerty.xyz")

    assert err.failed()
def _test_xul_raw(data, path, should_fail=False, should_fail_csp=None,
                  type_=None):
    filename = path.split("/")[-1]
    extension = filename.split(".")[-1]

    err = ErrorBundle()
    if type_:
        err.set_type(type_)

    parser = markuptester.MarkupParser(err, debug=True)
    parser.process(filename, data, extension)

    print err.print_summary(verbose=True)

    if should_fail:
        assert any(m for m in (err.errors + err.warnings) if
                   m["id"][0] != "csp")
    else:
        assert not any(m for m in (err.errors + err.warnings) if
                       m["id"][0] != "csp")

    if should_fail_csp == True:
        assert any(m for m in (err.errors + err.warnings) if
                   m["id"][0] == "csp")
    elif should_fail_csp == False:
        assert not any(m for m in (err.errors + err.warnings) if
                       m["id"][0] == "csp")

    return err
def test_prepare_package_bad_file():
    "Tests that the prepare_package function fails for unknown files"

    err = ErrorBundle()
    submain.prepare_package(err, "tests/resources/main/foo.bar")

    assert err.failed()
def test_prepare_package_webapp(fake_webapp_validator):
    err = ErrorBundle()
    package = "tests/resources/main/mozball.webapp"
    submain.prepare_package(err, package)
    assert not err.failed()

    fake_webapp_validator.assert_called_with(err, package)
def test_version_control():
    """Test that version control in a package are caught."""

    package = MockXPI({".git/foo/bar": None})

    err = ErrorBundle()
    packagelayout.test_blacklisted_files(err, package)
    assert err.failed()
Exemplo n.º 9
0
def test_boring():
    """Test that boring output strips out color sequences."""

    # Use the StringIO as an output buffer.
    bundle = ErrorBundle()
    bundle.error((), "<<BLUE>><<GREEN>><<YELLOW>>")
    bundle.print_summary(no_color=True)

    sys.stdout.seek(0)
    eq_(sys.stdout.getvalue().count("<<GREEN>>"), 0)
def test_duplicate_files():
    """Test that duplicate files in a package are caught."""

    package = MagicMock()
    package.zf = zf = MagicMock()
    zf.namelist.return_value = ["foo.bar", "foo.bar"]

    err = ErrorBundle()
    packagelayout.test_layout_all(err, package)
    assert err.failed()
Exemplo n.º 11
0
def test_duplicate_files():
    """Test that duplicate files in a package are caught."""

    package = MagicMock()
    package.subpackage = False
    zf = MagicMock()
    zf.namelist.return_value = ["foo.bar", "foo.bar"]
    package.zf = zf

    err = ErrorBundle()
    packagelayout.test_layout_all(err, package)
    assert err.failed()
Exemplo n.º 12
0
    def setup_err(self):
        """
        Instantiate the error bundle object. Use the `instant` parameter to
        have it output errors as they're generated. `for_appversions` may be set
        to target the test cases at a specific Gecko version range.

        An existing error bundle will be overwritten with a fresh one that has
        the state that the test case was setup with.
        """
        self.err = ErrorBundle(instant=True,
                               listed=getattr(self, "listed", True))
        self.err.handler = OutputHandler(sys.stdout, True)
def test_spaces_in_names():
    """Test that spaces in filenames are errors."""

    package = MockXPI({
        "foo/bar/foo.bar ": None,
        "foo/bar/ foo.bar": None,
    })

    err = ErrorBundle()
    packagelayout.test_blacklisted_files(err, package)
    assert err.failed()
    assert len(err.errors) == 2
Exemplo n.º 14
0
def _do_test(path, should_fail=False):

    data = open(path).read()
    err = ErrorBundle()

    csstester.test_css_file(err, "css.css", data)
    err.print_summary(True)

    if should_fail:
        assert err.failed()
    else:
        assert not err.failed()

    return err
def _test_xul_raw(data,
                  path,
                  should_fail=False,
                  should_fail_csp=None,
                  type_=None):
    filename = path.split("/")[-1]
    extension = filename.split(".")[-1]

    err = ErrorBundle()
    err.save_resource("app_type", "certified")
    if type_:
        err.set_type(type_)

    parser = markuptester.MarkupParser(err, debug=True)
    parser.process(filename, data, extension)

    print err.print_summary(verbose=True)

    if should_fail:
        assert any(m for m in (err.errors + err.warnings)
                   if m["id"][0] != "csp")
    else:
        assert not any(
            m for m in (err.errors + err.warnings) if m["id"][0] != "csp")

    if should_fail_csp == True:
        assert any(m for m in (err.errors + err.warnings)
                   if m["id"][0] == "csp")
    elif should_fail_csp == False:
        assert not any(
            m for m in (err.errors + err.warnings) if m["id"][0] == "csp")

    return err
Exemplo n.º 16
0
def test_notice_friendly():
    """
    Test notice-related human-friendly text output functions of the error
    bundler.
    """

    # Use the StringIO as an output buffer.
    bundle = ErrorBundle()

    bundle.notice((), "foobar")

    # Load the JSON output as an object.
    output = bundle.print_summary(verbose=True, no_color=True)
    print output

    assert output.count("foobar")
Exemplo n.º 17
0
def test_json():
    """Test the JSON output capability of the error bundler."""

    # Use the StringIO as an output buffer.
    bundle = ErrorBundle() # No color since no output
    bundle.set_tier(4)
    bundle.set_tier(3)

    bundle.error((), "error", "description")
    bundle.warning((), "warning", "description")
    bundle.notice((), "notice", "description")

    results = json.loads(bundle.render_json())

    eq_(len(results["messages"]), 3)
    assert not results["success"]
    eq_(results["ending_tier"], 4)
    def test_initializer(self):
        """Test that the __init__ paramaters are doing their jobs."""

        e = ErrorBundle()
        assert e.determined
        assert e.get_resource("listed")

        e = ErrorBundle(determined=False)
        assert not e.determined
        assert e.get_resource("listed")

        e = ErrorBundle(listed=False)
        assert e.determined
        assert not e.get_resource("listed")
Exemplo n.º 19
0
def _do_test(path, test, failure=True, set_type=0, listed=False, xpi_mode="r"):

    package_data = open(path, "rb")
    package = ZipPackage(package_data, mode=xpi_mode, name=path)
    err = ErrorBundle()
    if listed:
        err.save_resource("listed", True)

    # Populate in the dependencies.
    if set_type:
        err.set_type(set_type)  # Conduit test requires type

    test(err, package)

    print err.print_summary(verbose=True)
    assert err.failed() if failure else not err.failed()

    return err
Exemplo n.º 20
0
    def setup_err(self):
        """
        Instantiate the error bundle object. Use the `instant` parameter to
        have it output errors as they're generated. `for_appversions` may be set
        to target the test cases at a specific Gecko version range.

        An existing error bundle will be overwritten with a fresh one that has
        the state that the test case was setup with.
        """
        self.err = ErrorBundle(instant=True,
                               listed=getattr(self, "listed", True))
        self.err.handler = OutputHandler(sys.stdout, True)
Exemplo n.º 21
0
def test_notice():
    """Test notice-related functions of the error bundler."""

    # Use the StringIO as an output buffer.
    bundle = ErrorBundle()

    bundle.notice((), "")

    # Load the JSON output as an object.
    output = json.loads(bundle.render_json())

    # Run some basic tests
    assert len(output["messages"]) == 1

    print output

    has_ = False

    for message in output["messages"]:
        print message

        if message["type"] == "notice":
            has_ = True

    assert has_
    assert not bundle.failed()
    assert not bundle.failed(True)
Exemplo n.º 22
0
def _do_test(path, test, failure=True, set_type=0,
             listed=False, xpi_mode="r"):

    package_data = open(path, "rb")
    package = ZipPackage(package_data, mode=xpi_mode, name=path)
    err = ErrorBundle()
    if listed:
        err.save_resource("listed", True)

    # Populate in the dependencies.
    if set_type:
        err.set_type(set_type) # Conduit test requires type

    test(err, package)

    print err.print_summary(verbose=True)
    assert err.failed() if failure else not err.failed()

    return err
Exemplo n.º 23
0
def test_message_completeness():
    """Test we're fully expecting all of the values for a message."""

    bundle = ErrorBundle()

    bundle.error(
        ("id", ),
        "error",
        "description",
        "file",
        123,  # line
        456  # column
    )

    results = json.loads(bundle.render_json())
    eq_(len(results["messages"]), 1, "Unexpected number of messages.")

    message = results["messages"][0]
    eq_(message["id"], ["id"])
    eq_(message["message"], "error")
    eq_(message["description"], "description")
    eq_(message["file"], "file")
    eq_(message["line"], 123)
    eq_(message["column"], 456)
def test_script_scraping():
    """Test that the scripts in a document are collected properly."""

    err = ErrorBundle()
    parser = markuptester.MarkupParser(err, debug=True)
    parser.process(
        "foo.xul", """
    <doc>
    <!-- One to be ignored -->
    <script type="text/javascript">
    eval("asdf");
    </script>
    </doc>
    """, "xul")

    assert err.errors
def test_local_url_detector():
    "Tests that local URLs can be detected."

    err = ErrorBundle()
    mp = markuptester.MarkupParser(err)
    tester = mp._is_url_local

    assert tester("chrome://xyz/content/abc")
    assert tester("chrome://whatever/")
    assert tester("local.xul")
    assert not tester("http://foo.bar/")
    assert not tester("https://abc.def/")

    assert tester(u"chrome://xyz/content/abc")
    assert tester(u"chrome://whatever/")
    assert tester(u"local.xul")
    assert not tester(u"http://foo.bar/")
    assert not tester(u"https://abc.def/")
def _do_test(path, should_fail=False):

    data = open(path).read()
    err = ErrorBundle()

    csstester.test_css_file(err, "css.css", data)
    err.print_summary(True)

    if should_fail:
        assert err.failed()
    else:
        assert not err.failed()

    return err
Exemplo n.º 27
0
    def test_path(self):
        """Test that paths are tested properly for allowances."""

        s = WebappSpec("{}", ErrorBundle())

        eq_(s._path_valid("*"), False)
        eq_(s._path_valid("*", can_be_asterisk=True), True)
        eq_(s._path_valid("/foo/bar"), False)
        eq_(s._path_valid("/foo/bar", can_be_absolute=True), True)
        eq_(s._path_valid("//foo/bar"), False)
        eq_(s._path_valid("//foo/bar", can_be_absolute=True), False)
        eq_(s._path_valid("//foo/bar", can_be_relative=True), False)
        eq_(s._path_valid("http://asdf/"), False)
        eq_(s._path_valid("https://asdf/"), False)
        eq_(s._path_valid("ftp://asdf/"), False)
        eq_(s._path_valid("http://asdf/", can_have_protocol=True), True)
        eq_(s._path_valid("https://asdf/", can_have_protocol=True), True)
        # No FTP for you!
        eq_(s._path_valid("ftp://asdf/", can_have_protocol=True), False)
        eq_(s._path_valid("data:asdf"), False)
        eq_(s._path_valid("data:asdf", can_be_data=True), True)
def test_prepare_package():
    "Tests that the prepare_package function passes for valid data"

    err = ErrorBundle()
    eq_(submain.prepare_package(err, "tests/resources/main/foo.xpi"), err)
    assert not err.failed()
Exemplo n.º 29
0
                  help="Path to test, or directory containing tests. "
                       "Directories will be recursively searched."
parser.add_option("-v", "--verbose", dest="verbose",
                  default=False,
                  action="store_true",
                  help="Use verbose mode for output.")

opt, args = parser.parse_args()

if os.path.isfile(opt.test):
    files = [opt.test]
else:
    files = []
    for root, folders, fileList in os.walk(opt.test):
        for f in fileList:
            files.append(os.path.join(root,f))

bundle = ErrorBundle()
for name in files:
    print name
    f = open(name, 'r')
    if name.endswith('.js'):
        test_js_file(bundle, name, f.read())
    else:
        soup = BeautifulSoup(f.read())
        for script in soup.find_all('script'):
            test_js_snippet(bundle, script.renderContents(), name)
    f.close()

print bundle.print_summary(verbose=opt.verbose)
Exemplo n.º 30
0
def test_prepare_package():
    "Tests that the prepare_package function passes for valid data"

    err = ErrorBundle()
    eq_(submain.prepare_package(err, "tests/resources/main/foo.xpi"), err)
    assert not err.failed()
def test_tempfiles_are_not_used_when_not_needed(run_with_tempfile):
    run_with_tempfile.return_value = "{}"
    err = ErrorBundle()
    scripting.test_js_file(err, "foo.js", "var x = [123, 456];")
    assert not run_with_tempfile.called
Exemplo n.º 32
0
def test_json_constructs():
    """This tests some of the internal JSON stuff so we don't break zamboni."""

    e = ErrorBundle()
    e.error(("a", "b", "c"), "Test")
    e.error(("a", "b", "foo"), "Test")
    e.error(("a", "foo", "c"), "Test")
    e.error(("a", "foo", "c"), "Test")
    e.error(("b", "foo", "bar"), "Test")
    e.warning((), "Context test",
              context=ContextGenerator("x\ny\nz\n"),
              line=2, column=0)
    e.notice((), "none")
    e.notice((), "line", line=1)
    e.notice((), "column", column=0)
    e.notice((), "line column", line=1, column=1)

    results = e.render_json()
    print results
    j = json.loads(results)

    assert "messages" in j
    for m in j["messages"]:
        if m["type"] == "warning":
            assert m["context"] == ["x", "y", "z"]

    for m in (m for m in j["messages"] if m["type"] == "notice"):
        if "line" in m["message"]:
            assert m["line"] is not None
            assert isinstance(m["line"], int)
            assert m["line"] > 0
        else:
            assert m["line"] is None

        if "column" in m["message"]:
            assert m["column"] is not None
            assert isinstance(m["column"], int)
            assert m["column"] > -1
        else:
            assert m["column"] is None
Exemplo n.º 33
0
class TestCase(object):
    def setUp(self):
        self.err = None
        self.is_bootstrapped = False
        self.detected_type = None
        self.listed = True

    def reset(self):
        """
        Reset the test case so that it can be run a second time (ideally with
        different parameters).
        """
        self.err = None

    def setup_err(self):
        """
        Instantiate the error bundle object. Use the `instant` parameter to
        have it output errors as they're generated. `for_appversions` may be set
        to target the test cases at a specific Gecko version range.

        An existing error bundle will be overwritten with a fresh one that has
        the state that the test case was setup with.
        """
        self.err = ErrorBundle(instant=True,
                               listed=getattr(self, "listed", True))
        self.err.handler = OutputHandler(sys.stdout, True)

    def assert_failed(self, with_errors=False, with_warnings=None):
        """
        First, asserts that the error bundle registers a failure (recognizing
        whether warnings are acknowledged). Second, if with_errors is True,
        the presence of errors is asserted. If it is not true (default), it
        is tested that errors are not present. If with_warnings is not None,
        the presence of warnings is tested just like with_errors)
        """
        assert self.err.failed(fail_on_warnings=with_warnings or
                                                with_warnings is None), \
                "Test did not fail; failure was expected."

        if with_errors:
            assert self.err.errors, "Errors were expected."
        elif self.err.errors:
            raise AssertionError("Tests found unexpected errors: %s" %
                                 self.err.print_summary(verbose=True))

        if with_warnings is not None:
            if with_warnings:
                assert self.err.warnings, "Warnings were expected."
            elif self.err.warnings:
                raise ("Tests found unexpected warnings: %s" %
                       self.err.print_summary())

    def assert_notices(self):
        """
        Assert that notices have been generated during the validation process.
        """
        assert self.err.notices, "Notices were expected."

    def assert_passes(self, warnings_pass=False):
        """
        Assert that no errors have been raised. If warnings_pass is True, also
        assert that there are no warnings.
        """
        assert not self.failed(fail_on_warnings=not warnings_pass), \
                ("Test was intended to pass%s, but it did not." %
                     (" with warnings" if warnings_pass else ""))

    def assert_silent(self):
        """
        Assert that no messages (errors, warnings, or notices) have been
        raised.
        """
        assert not self.err.errors, 'Got these: %s' % self.err.errors
        assert not self.err.warnings, 'Got these: %s' % self.err.warnings
        assert not self.err.notices, 'Got these: %s' % self.err.notices

    def assert_got_errid(self, errid):
        """
        Assert that a message with the given errid has been generated during
        the validation process.
        """
        assert any(msg["id"] == errid for msg in
                   (self.err.errors + self.err.warnings + self.err.notices)), \
                "%s was expected, but it was not found." % repr(errid)

    def assert_has_feature(self, name):
        assert name in self.err.feature_profile, (
            '"%s" not found in feature profile (%s)' %
            (name, ', '.join(self.err.feature_profile)))
Exemplo n.º 34
0
def test_scripting_enabled():
    err = ErrorBundle()
    err.save_resource("SPIDERMONKEY", None)
    assert scripting.test_js_file(err, "abc def", "foo bar") is None
Exemplo n.º 35
0
#!/usr/bin/env python

import sys
import os

from appvalidator.constants import SPIDERMONKEY_INSTALLATION
from appvalidator.errorbundle import ErrorBundle
from appvalidator.errorbundle.outputhandlers.shellcolors import OutputHandler
import appvalidator.testcases.scripting as scripting
import appvalidator.testcases.javascript.traverser
from appvalidator.testcases.javascript.predefinedentities import GLOBAL_ENTITIES
import appvalidator.testcases.javascript.spidermonkey as spidermonkey
appvalidator.testcases.javascript.traverser.DEBUG = True

if __name__ == '__main__':
    err = ErrorBundle(instant=True)
    err.handler = OutputHandler(sys.stdout, False)
    err.supported_versions = {}
    if len(sys.argv) > 1:
        path = sys.argv[1]
        script = open(path).read()
        scripting.test_js_file(err=err,
                               filename=path,
                               data=script)
    else:
        trav = appvalidator.testcases.javascript.traverser.Traverser(err, "stdin")
        trav._push_context()

        def do_inspect(wrapper, arguments, traverser):
            print "~" * 50
            for arg in arguments:
Exemplo n.º 36
0
def test_validation_timeout():
    err = ErrorBundle()
    submain.prepare_package(err, "tests/resources/main/foo.xpi", timeout=0.1)
    assert len(err.errors) == 1
Exemplo n.º 37
0
import sys
import os

from appvalidator.constants import SPIDERMONKEY_INSTALLATION
from appvalidator.errorbundle import ErrorBundle
from appvalidator.errorbundle.outputhandlers.shellcolors import OutputHandler
import appvalidator.testcases.scripting as scripting
import appvalidator.testcases.javascript.traverser
from appvalidator.testcases.javascript.predefinedentities import GLOBAL_ENTITIES
import appvalidator.testcases.javascript.spidermonkey as spidermonkey

appvalidator.testcases.javascript.traverser.DEBUG = True

if __name__ == "__main__":
    err = ErrorBundle(instant=True)
    err.handler = OutputHandler(sys.stdout, False)
    err.supported_versions = {}
    if len(sys.argv) > 1:
        path = sys.argv[1]
        script = open(path).read()
        scripting.test_js_file(err=err, filename=path, data=script)
    else:
        trav = appvalidator.testcases.javascript.traverser.Traverser(err, "stdin")
        trav._push_context()

        def do_inspect(wrapper, arguments, traverser):
            print "~" * 50
            for arg in arguments:
                if arg["type"] == "Identifier":
                    print 'Identifier: "%s"' % arg["name"]
Exemplo n.º 38
0
class TestCase(object):
    def setUp(self):
        self.err = None
        self.is_bootstrapped = False
        self.detected_type = None
        self.listed = True

    def reset(self):
        """
        Reset the test case so that it can be run a second time (ideally with
        different parameters).
        """
        self.err = None

    def setup_err(self):
        """
        Instantiate the error bundle object. Use the `instant` parameter to
        have it output errors as they're generated. `for_appversions` may be set
        to target the test cases at a specific Gecko version range.

        An existing error bundle will be overwritten with a fresh one that has
        the state that the test case was setup with.
        """
        self.err = ErrorBundle(instant=True,
                               listed=getattr(self, "listed", True))
        self.err.handler = OutputHandler(sys.stdout, True)

    def assert_failed(self, with_errors=False, with_warnings=None):
        """
        First, asserts that the error bundle registers a failure (recognizing
        whether warnings are acknowledged). Second, if with_errors is True,
        the presence of errors is asserted. If it is not true (default), it
        is tested that errors are not present. If with_warnings is not None,
        the presence of warnings is tested just like with_errors)
        """
        assert self.err.failed(fail_on_warnings=with_warnings or
                                                with_warnings is None), \
                "Test did not fail; failure was expected."

        if with_errors:
            assert self.err.errors, "Errors were expected."
        elif self.err.errors:
            raise AssertionError("Tests found unexpected errors: %s" %
                                self.err.print_summary(verbose=True))

        if with_warnings is not None:
            if with_warnings:
                assert self.err.warnings, "Warnings were expected."
            elif self.err.warnings:
                raise ("Tests found unexpected warnings: %s" %
                           self.err.print_summary())

    def assert_notices(self):
        """
        Assert that notices have been generated during the validation process.
        """
        assert self.err.notices, "Notices were expected."

    def assert_passes(self, warnings_pass=False):
        """
        Assert that no errors have been raised. If warnings_pass is True, also
        assert that there are no warnings.
        """
        assert not self.failed(fail_on_warnings=not warnings_pass), \
                ("Test was intended to pass%s, but it did not." %
                     (" with warnings" if warnings_pass else ""))

    def assert_silent(self):
        """
        Assert that no messages (errors, warnings, or notices) have been
        raised.
        """
        assert not self.err.errors, 'Got these: %s' % self.err.errors
        assert not self.err.warnings, 'Got these: %s' % self.err.warnings
        assert not self.err.notices, 'Got these: %s' % self.err.notices

    def assert_got_errid(self, errid):
        """
        Assert that a message with the given errid has been generated during
        the validation process.
        """
        assert any(msg["id"] == errid for msg in
                   (self.err.errors + self.err.warnings + self.err.notices)), \
                "%s was expected, but it was not found." % repr(errid)
Exemplo n.º 39
0
def test_prepare_package_webapp(fake_webapp_validator):
    fake_webapp_validator.expects_call().with_arg_count(2)

    err = ErrorBundle()
    submain.prepare_package(err, "tests/resources/main/mozball.webapp")
    assert not err.failed()
Exemplo n.º 40
0
def test_scripting_enabled():
    err = ErrorBundle()
    err.save_resource("SPIDERMONKEY", None)
    assert scripting.test_js_file(err, "abc def", "foo bar") is None
Exemplo n.º 41
0
def test_prepare_package_webapp(fake_webapp_validator):
    fake_webapp_validator.expects_call().with_arg_count(2)

    err = ErrorBundle()
    submain.prepare_package(err, "tests/resources/main/mozball.webapp")
    assert not err.failed()
Exemplo n.º 42
0
def test_scripting_disabled():
    """Ensures that Spidermonkey is not run if it is set to be disabled."""
    err = ErrorBundle()
    assert scripting.test_js_file(err, "abc def", "foo bar") is None
def test_crazy_unicode():
    err = ErrorBundle()
    with open('tests/resources/spidermonkey_unicode.js', 'r') as f:
        scripting.test_js_file(err, "foo.js", f.read())
    assert not err.failed(), err.errors + err.warnings
Exemplo n.º 44
0
def test_file_structure():
    """
    Test the means by which file names and line numbers are stored in errors,
    warnings, and messages.
    """

    # Use the StringIO as an output buffer.
    bundle = ErrorBundle()

    # Populate the bundle with some test data.
    bundle.error((), "error", "", "file1", 123)
    bundle.error((), "error", "", "file2")

    bundle.warning((), "warning", "", "file4", 123)
    bundle.warning((), "warning", "", "file5")
    bundle.warning((), "warning")

    # Load the JSON output as an object.
    output = json.loads(bundle.render_json())

    # Do the same for friendly output
    output2 = bundle.print_summary(verbose=False)

    # Do the same for verbose friendly output
    output3 = bundle.print_summary(verbose=True)

    # Run some basic tests
    eq_(len(output["messages"]), 5)
    assert len(output2) < len(output3)

    messages = ["file1", "file2", "", "file4", "file5"]

    for message in output["messages"]:
        print message

        assert message["file"] in messages
        messages.remove(message["file"])

        if isinstance(message["file"], list):
            pattern = message["file"][:]
            pattern.pop()
            pattern.append("")
            file_merge = " > ".join(pattern)
            print file_merge
            assert output3.count(file_merge)
        else:
            assert output3.count(message["file"])

    assert not messages