Example #1
0
def _do_test(path,
             test,
             failure=True,
             require_install=False,
             set_type=0,
             listed=False,
             xpi_mode='r'):

    package_data = open(path, 'rb')
    package = XPIManager(package_data, mode=xpi_mode, name=path)
    err = ErrorBundle()
    if listed:
        err.save_resource('listed', True)

    # Populate in the dependencies.
    if set_type:
        err.detected_type = set_type  # Conduit test requires type
    if require_install:
        err.save_resource('has_install_rdf', True)
        rdf_data = package.read('install.rdf')
        install_rdf = RDFParser(err, rdf_data)
        err.save_resource('install_rdf', install_rdf)

    populate_chrome_manifest(err, package)

    test(err, package)

    print err.print_summary(verbose=True)

    if failure:
        assert err.failed()
    else:
        assert not err.failed()

    return err
Example #2
0
def test_webapp_pass():
    """Test that a bland webapp file throws no errors."""

    err = ErrorBundle()
    _detect(err, _get_json())
    print err.print_summary(verbose=True)
    assert not err.failed()
def _run_test_raw(data,
                  failure=True,
                  detected_type=0,
                  listed=True,
                  overrides=None,
                  compat=False):
    'Runs a test on an install.rdf snippet'

    data = data.strip()

    err = ErrorBundle()
    err.detected_type = detected_type
    err.save_resource('listed', listed)
    err.overrides = overrides

    if compat:
        err.save_resource('is_compat_test', True)

    err.save_resource('has_install_rdf', True)
    err.save_resource('install_rdf', RDFParser(err, data))
    installrdf.test_install_rdf_params(err)

    print err.print_summary(verbose=True)

    if failure:  # pragma: no cover
        assert err.failed() or err.notices
    else:
        assert not err.failed() and not err.notices

    return err
Example #4
0
def _do_test(path, test, failure=True,
             require_install=False, set_type=0,
             listed=False, xpi_mode="r"):

    package_data = open(path, "rb")
    package = XPIManager(package_data, mode=xpi_mode, name=path)
    err = ErrorBundle()
    if listed:
        err.save_resource("listed", True)

    # Populate in the dependencies.
    if set_type:
        err.detected_type = set_type # Conduit test requires type
    if require_install:
        err.save_resource("has_install_rdf", True)
        rdf_data = package.read("install.rdf")
        install_rdf = RDFParser(err, rdf_data)
        err.save_resource("install_rdf", install_rdf)

    populate_chrome_manifest(err, package)

    test(err, package)

    print err.print_summary(verbose=True)

    if failure:
        assert err.failed()
    else:
        assert not err.failed()

    return err
Example #5
0
 def test_structure(structure):
     err = ErrorBundle()
     err.supported_versions = {}
     mock_package = MockXPI(structure)
     content.test_packed_packages(err, mock_package)
     print err.print_summary(verbose=True)
     assert err.failed()
Example #6
0
 def test_structure(structure):
     err = ErrorBundle()
     err.supported_versions = {}
     mock_package = MockXPI(structure)
     content.test_packed_packages(err, mock_package)
     print err.print_summary(verbose=True)
     assert err.failed()
Example #7
0
def test_blacklisted_files():
    """
    Tests the validator's ability to hash each individual file and (based on
    this information) determine whether the addon passes or fails the
    validation process.
    """

    package_data = open('tests/resources/libraryblacklist/blocked.xpi')
    package = XPIManager(package_data, mode='r', name='blocked.xpi')
    err = ErrorBundle()

    test_content.test_packed_packages(err, package)

    print err.print_summary()

    assert err.notices
    assert not err.failed()
    eq_(
        err.metadata.get('identified_files'), {
            'test.js': {
                'path':
                'This file is a false script to facilitate '
                'testing of library blacklisting.'
            }
        })
Example #8
0
def _do_test(path, test, failure=True,
             require_install=False, set_type=0):
    
    package_data = open(path, "rb")
    package = XPIManager(package_data, path)
    contents = package.get_file_data()
    err = ErrorBundle()
    
    # Populate in the dependencies.
    if set_type:
        err.set_type(set_type) # Conduit test requires type
    if require_install:
        err.save_resource("has_install_rdf", True)
        rdf_data = package.read("install.rdf")
        install_rdf = RDFParser(rdf_data)
        err.save_resource("install_rdf", install_rdf)
    
    test(err, contents, package)
    
    print err.print_summary(verbose=True)
    
    if failure:
        assert err.failed()
    else:
        assert not err.failed()
    
    return err
def _run_test_raw(data, failure=True, detected_type=0, listed=True,
                  overrides=None, compat=False):
    "Runs a test on an install.rdf snippet"

    data = data.strip()

    err = ErrorBundle()
    err.detected_type = detected_type
    err.save_resource("listed", listed)
    err.overrides = overrides

    if compat:
        err.save_resource("is_compat_test", True)

    err.save_resource("has_install_rdf", True)
    err.save_resource("install_rdf", RDFParser(err, data))
    installrdf.test_install_rdf_params(err)

    print err.print_summary(verbose=True)

    if failure:  # pragma: no cover
        assert err.failed() or err.notices
    else:
        assert not err.failed() and not err.notices

    return err
def test_webapp_pass():
    """Test that a bland webapp file throws no errors."""

    err = ErrorBundle()
    validator.webapp.detect_webapp(err, json.dumps(_get_json()))
    print err.print_summary(verbose=True)
    assert not err.failed()
Example #11
0
def test_hidden_files(test_input):
    """Tests that hidden files are reported."""

    err = ErrorBundle()
    err.supported_versions = {}
    mock_package = MockXPI(test_input)
    content.test_packed_packages(err, mock_package)
    print err.print_summary(verbose=True)
    assert err.failed()
Example #12
0
def test_webapp_no_default_locale():
    """Test that locales require default_locale."""

    err = ErrorBundle()
    data = _get_json()
    del data["default_locale"]
    _detect(err, data)
    print err.print_summary(verbose=True)
    assert err.failed()
def test_hidden_files(test_input):
    """Tests that hidden files are reported."""

    err = ErrorBundle()
    err.supported_versions = {}
    mock_package = MockXPI(test_input)
    content.test_packed_packages(err, mock_package)
    print err.print_summary(verbose=True)
    assert err.failed()
Example #14
0
 def test(versions):
     err = ErrorBundle()
     err.supported_versions = versions
     parser = MarkupParser(err)
     parser.process(name,
                    data,
                    name.split(".")[-1])
     print err.print_summary(verbose=True)
     assert not err.failed()
     return err
 def test(versions):
     err = ErrorBundle()
     err.supported_versions = versions
     parser = MarkupParser(err)
     parser.process(name,
                    data,
                    name.split(".")[-1])
     print err.print_summary(verbose=True)
     assert not err.failed()
     return err
def _do_test_raw(rdf, listed=True, overrides=None):
    err = ErrorBundle(listed=listed)
    err.overrides = overrides
    rdf = RDFParser(err, rdf.strip())
    err.save_resource("has_install_rdf", True)
    err.save_resource("install_rdf", rdf)

    targetapp.test_targetedapplications(err)
    print err.print_summary()
    return err
Example #17
0
def test_skip_blacklisted_file():
    """Ensure blacklisted files are skipped for processing."""

    package_data = open('tests/resources/libraryblacklist/errors.xpi')
    package = XPIManager(package_data, mode='r', name='errors.xpi')
    err = ErrorBundle()

    test_content.test_packed_packages(err, package)

    print err.print_summary()
    assert err.notices
    assert not err.failed()
def test_skip_blacklisted_file():
    """Ensure blacklisted files are skipped for processing."""

    package_data = open("tests/resources/libraryblacklist/errors.xpi")
    package = XPIManager(package_data, mode="r", name="errors.xpi")
    err = ErrorBundle()

    test_content.test_packed_packages(err, package)

    print err.print_summary()
    assert err.notices
    assert not err.failed()
def test_package_corrupt():
    "Tests the test_package function fails with a non-zip"
    
    tip = submain.test_inner_package
    submain.test_inner_package = lambda x, y, z: "success"
    
    name = "tests/resources/junk.xpi"
    err = ErrorBundle(None, True)
    
    result = submain.test_package(err, name, name)
    submain.test_inner_package = tip
    
    err.print_summary(True);
    assert err.failed()
def _do_test(path, should_fail=False):

    data = open(path).read()
    err = ErrorBundle()

    csstester.test_css_file(err, "css.css", data)
    err.print_summary(True)

    if should_fail:
        assert err.failed()
    else:
        assert not err.failed()

    return err
def test_package_corrupt():
    "Tests the test_package function fails with a corrupt file"

    tip = submain.test_inner_package
    submain.test_inner_package = lambda x, z, for_appversions: "success"

    name = "tests/resources/corrupt.xpi"
    err = ErrorBundle()

    result = submain.test_package(err, name, name)
    submain.test_inner_package = tip

    err.print_summary(True);
    assert err.failed()
def test_package_corrupt():
    "Tests the test_package function fails with a corrupt file"

    tip = submain.test_inner_package
    submain.test_inner_package = lambda x, z, for_appversions: "success"

    name = "tests/resources/corrupt.xpi"
    err = ErrorBundle()

    result = submain.test_package(err, name, name)
    submain.test_inner_package = tip

    err.print_summary(True)
    assert err.failed()
def test_blacklisted_files():
    """Tests the validator's ability to hash each individual file and
    (based on this information) determine whether the addon passes or
    fails the validation process."""

    package_data = open("tests/resources/libraryblacklist/blocked.xpi")
    package = XPIManager(package_data, "blocked.xpi")
    contents = package.get_file_data()
    err = ErrorBundle()

    libblacklist.test_library_blacklist(err, contents, package)

    print err.print_summary()

    assert err.notices
def test_boring():
    """Test that boring output strips out color sequences."""

    stdout = sys.stdout
    sys.stdout = StringIO()

    # Use the StringIO as an output buffer.
    bundle = ErrorBundle()
    bundle.error((), "<<BLUE>><<GREEN>><<YELLOW>>")
    bundle.print_summary(no_color=True)

    outbuffer = sys.stdout
    sys.stdout = stdout
    outbuffer.seek(0)

    assert outbuffer.getvalue().count("<<GREEN>>") == 0
def test_boring():
    """Test that boring output strips out color sequences."""

    stdout = sys.stdout
    sys.stdout = StringIO()

    # Use the StringIO as an output buffer.
    bundle = ErrorBundle()
    bundle.error((), '<<BLUE>><<GREEN>><<YELLOW>>')
    bundle.print_summary(no_color=True)

    outbuffer = sys.stdout
    sys.stdout = stdout
    outbuffer.seek(0)

    assert outbuffer.getvalue().count('<<GREEN>>') == 0
def _do_test(path, should_fail=False, detected_type=None):

    data = open(path).read()
    err = ErrorBundle()
    if detected_type is not None:
        err.detected_type = detected_type

    csstester.test_css_file(err, "css.css", data)
    err.print_summary(True)

    if should_fail:
        assert err.failed()
    else:
        assert not err.failed()

    return err
def _do_test(path, should_fail=False, detected_type=None):

    data = open(path).read()
    err = ErrorBundle()
    if detected_type is not None:
        err.detected_type = detected_type

    csstester.test_css_file(err, 'css.css', data)
    err.print_summary(True)

    if should_fail:
        assert err.failed()
    else:
        assert not err.failed()

    return err
def test_password_in_defaults_prefs():
    """
    Tests that passwords aren't stored in the defaults/preferences/*.js files
    for bug 647109.
    """

    password_js = open("tests/resources/content/password.js").read()
    assert not _do_test_raw(password_js).failed()

    err = ErrorBundle()
    err.supported_versions = {}
    mock_package = MockXPI({"defaults/preferences/foo.js":
                                "tests/resources/content/password.js"})

    content.test_packed_packages(err, mock_package)
    print err.print_summary()
    assert err.failed()
def test_blacklisted_files():
    """
    Tests the validator's ability to hash each individual file and (based on
    this information) determine whether the addon passes or fails the
    validation process.
    """

    package_data = open("tests/resources/libraryblacklist/blocked.xpi")
    package = XPIManager(package_data, mode="r", name="blocked.xpi")
    err = ErrorBundle()

    test_content.test_packed_packages(err, package)

    print err.print_summary()

    assert err.notices
    assert not err.failed()
def test_hidden_files():
    """Tests that hidden files are reported."""

    err = ErrorBundle()
    err.supported_versions = {}
    mock_package = MockXPI({".hidden": "tests/resources/content/junk.xpi"})

    content.test_packed_packages(err, mock_package)
    print err.print_summary(verbose=True)
    assert err.failed()

    err = ErrorBundle()
    mock_package_mac = MockXPI({"dir/__MACOSX/foo":
                                          "tests/resources/content/junk.xpi"})
    content.test_packed_packages(err, mock_package_mac)
    print err.print_summary(verbose=True)
    assert err.failed()
Example #31
0
def test_password_in_defaults_prefs():
    """
    Tests that passwords aren't stored in the defaults/preferences/*.js files
    for bug 647109.
    """

    password_js = open("tests/resources/content/password.js").read()
    assert not _do_test_raw(password_js).failed()

    err = ErrorBundle()
    err.supported_versions = {}
    mock_package = MockXPI(
        {"defaults/preferences/foo.js": "tests/resources/content/password.js"})

    content._process_file(err, mock_package, "defaults/preferences/foo.js",
                          password_js, "foo.js")
    print err.print_summary()
    assert err.failed()
Example #32
0
def _run_test(filename, failure=True, detected_type=0):
    "Runs a test on an install.rdf file"
    
    err = ErrorBundle()
    err.detected_type = detected_type
    
    data = open(filename).read()
    
    parser = RDFParser(data)
    installrdf._test_rdf(err, parser)
    
    print err.print_summary()

    if failure: # pragma: no cover
        assert err.failed() or err.notices
    else:
        assert not err.failed() and not err.notices
    
    return err
def test_blacklisted_files():
    """
    Tests the validator's ability to hash each individual file and (based on
    this information) determine whether the addon passes or fails the
    validation process.
    """

    package_data = open("tests/resources/libraryblacklist/blocked.xpi")
    package = XPIManager(package_data, mode="r", name="blocked.xpi")
    err = ErrorBundle()

    test_content.test_packed_packages(err, package)

    print err.print_summary()

    assert err.notices
    assert not err.failed()
    eq_(err.metadata.get("identified_files"),
        {'test.js': {'path': 'This file is a false script to facilitate '
                             'testing of library blacklisting.'}})
def _test_xul_raw(data, path, should_fail=False, type_=None):
    filename = path.split("/")[-1]
    extension = filename.split(".")[-1]

    err = ErrorBundle()
    err.supported_versions = {}
    if type_:
        err.detected_type = type_

    parser = markuptester.MarkupParser(err, debug=True)
    parser.process(filename, data, extension)

    print err.print_summary(verbose=True)

    if should_fail:
        assert err.failed()
    else:
        assert not err.failed(fail_on_warnings=False)

    return err
def _do_simulated_test(function, structure, failure=False, ff4=False):
    """"Performs a test on a function or set of functions without
    generating a full package."""

    dict_structure = {'__MACOSX/foo.bar': True}
    for item in structure:
        dict_structure[item] = True

    err = ErrorBundle()
    err.save_resource('ff4', ff4)
    function(err, structure)

    err.print_summary(True)

    if failure:
        assert err.failed()
    else:
        assert not err.failed()

    return err
def _test_xul_raw(data, path, should_fail=False, type_=None):
    filename = path.split("/")[-1]
    extension = filename.split(".")[-1]

    err = ErrorBundle()
    err.supported_versions = {}
    if type_:
        err.detected_type = type_

    parser = markuptester.MarkupParser(err, debug=True)
    parser.process(filename, data, extension)

    print err.print_summary(verbose=True)

    if should_fail:
        assert err.failed()
    else:
        assert not err.failed(fail_on_warnings=False)

    return err
def _do_simulated_test(function, structure, failure=False, ff4=False):
    """"Performs a test on a function or set of functions without
    generating a full package."""
    
    dict_structure = {"__MACOSX/foo.bar": True}
    for item in structure:
        dict_structure[item] = True
    
    err = ErrorBundle(None, True)
    err.save_resource("ff4", ff4)
    function(err, dict_structure, None)
    
    err.print_summary(True)
    
    if failure:
        assert err.failed()
    else:
        assert not err.failed()
    
    return err
Example #38
0
def test_linked_manifest_recursion():
    """Test that recursive linked manifests are flagged properly."""

    err = ErrorBundle()
    package = MockXPI({
        "chrome.manifest": "tests/resources/submain/linkman/base1.manifest",
        "submanifest.manifest":
            "tests/resources/submain/linkman/recurse.manifest"})

    submain.populate_chrome_manifest(err, package)
    chrome = err.get_resource("chrome.manifest")
    assert chrome

    print err.print_summary(verbose=True)

    assert err.failed()
    assert not err.notices

    # From the base file:
    assert list(chrome.get_triples(subject="foo"))
    # From the linked manifest:
    assert not list(chrome.get_triples(subject="zap"))
def _run_test_raw(data, failure=True, detected_type=0, listed=True,
                  overrides=None):
    "Runs a test on an install.rdf snippet"

    data = data.strip()

    err = ErrorBundle()
    err.detected_type = detected_type
    err.save_resource("listed", listed)
    err.overrides = overrides

    parser = RDFParser(data)
    installrdf._test_rdf(err, parser)

    print err.print_summary(verbose=True)

    if failure: # pragma: no cover
        assert err.failed() or err.notices
    else:
        assert not err.failed() and not err.notices

    return err
def test_linked_manifest_recursion():
    """Test that recursive linked manifests are flagged properly."""

    err = ErrorBundle()
    package = MockXPI({
        'chrome.manifest': 'tests/resources/submain/linkman/base1.manifest',
        'submanifest.manifest':
            'tests/resources/submain/linkman/recurse.manifest'})

    submain.populate_chrome_manifest(err, package)
    chrome = err.get_resource('chrome.manifest')
    assert chrome

    print err.print_summary(verbose=True)

    assert err.failed()
    assert not err.notices

    # From the base file:
    assert list(chrome.get_triples(subject='foo'))
    # From the linked manifest:
    assert not list(chrome.get_triples(subject='zap'))
Example #41
0
def test_notice_friendly():
    """Tests notice-related human-friendly text output functions of the
    error bundler."""
    
    # Use the StringIO as an output buffer.
    bundle = ErrorBundle()
    
    bundle.notice((), "foobar")
    
    # Load the JSON output as an object.
    output = bundle.print_summary(verbose=True, no_color=True)
    print output
    
    assert output.count("foobar")
Example #42
0
def _do_test(path, test, failure=True,
             require_install=False, set_type=0,
             listed=False, xpi_mode='r'):

    package_data = open(path, 'rb')
    package = XPIManager(package_data, mode=xpi_mode, name=path)
    err = ErrorBundle()
    if listed:
        err.save_resource('listed', True)

    # Populate in the dependencies.
    if set_type:
        err.detected_type = set_type # Conduit test requires type
    if require_install:
        if 'install.rdf' in package:
            err.save_resource('has_install_rdf', True)
            rdf_data = package.read('install.rdf')
            install_rdf = RDFParser(err, rdf_data)
            err.save_resource('install_rdf', install_rdf)
        elif 'manifest.json' in package:
            err.save_resource('has_manifest_json', True)
            manifest_data = package.read('manifest.json')
            manifest_json = ManifestJsonParser(err, manifest_data)
            err.save_resource('install_rdf', manifest_json)

    populate_chrome_manifest(err, package)

    test(err, package)

    print err.print_summary(verbose=True)

    if failure:
        assert err.failed()
    else:
        assert not err.failed()

    return err
def _do_test(path, should_fail=False, type_=None):
    
    markup_file = open(path)
    data = markup_file.read()
    markup_file.close()
    
    filename = path.split("/")[-1]
    extension = filename.split(".")[-1]
    
    err = ErrorBundle(None, True)
    if type_:
        err.set_type(type_)
    
    parser = markuptester.MarkupParser(err, True)
    parser.process(filename, data, extension)
    
    err.print_summary(True)
    
    if should_fail:
        assert err.failed()
    else:
        assert not err.failed()
    
    return err
Example #44
0
def test_linked_manifest_recursion():
    """Test that recursive linked manifests are flagged properly."""

    err = ErrorBundle()
    package = MockXPI({
        'chrome.manifest':
        'tests/resources/submain/linkman/base1.manifest',
        'submanifest.manifest':
        'tests/resources/submain/linkman/recurse.manifest'
    })

    submain.populate_chrome_manifest(err, package)
    chrome = err.get_resource('chrome.manifest')
    assert chrome

    print err.print_summary(verbose=True)

    assert not err.failed()
    assert not err.notices

    # From the base file:
    assert list(chrome.get_entries('foo'))
    # From the linked manifest:
    assert not list(chrome.get_entries('zap'))
Example #45
0
def test_linked_manifest_recursion():
    """Test that recursive linked manifests are flagged properly."""

    err = ErrorBundle()
    package = MockXPI(
        {
            "chrome.manifest": "tests/resources/submain/linkman/base1.manifest",
            "submanifest.manifest": "tests/resources/submain/linkman/recurse.manifest",
        }
    )

    submain.populate_chrome_manifest(err, package)
    chrome = err.get_resource("chrome.manifest")
    assert chrome

    print err.print_summary(verbose=True)

    assert not err.failed()
    assert not err.notices

    # From the base file:
    assert list(chrome.get_entries("foo"))
    # From the linked manifest:
    assert not list(chrome.get_entries("zap"))
def test_notice_friendly():
    """
    Test notice-related human-friendly text output functions of the error
    bundler.
    """

    # Use the StringIO as an output buffer.
    bundle = ErrorBundle()

    bundle.notice((), 'foobar')

    # Load the JSON output as an object.
    output = bundle.print_summary(verbose=True, no_color=True)
    print output

    assert output.count('foobar')
def test_file_structure():
    """
    Test the means by which file names and line numbers are stored in errors,
    warnings, and messages.
    """

    # Use the StringIO as an output buffer.
    bundle = ErrorBundle(True)  # No color since no output

    # Populate the bundle with some test data.
    bundle.error((), "error", description="", filename="file1", column=123)
    bundle.error((), "error", description="", filename="file2")
    bundle.error((), "error")

    # Push a state
    bundle.push_state("foo")

    bundle.warning((), "warning", description="", filename="file4", column=123)
    bundle.warning((), "warning", description="", filename="file5")
    bundle.warning((), "warning")

    bundle.pop_state()

    # Load the JSON output as an object.
    output = json.loads(bundle.render_json())

    # Do the same for friendly output
    output2 = bundle.print_summary(verbose=False)

    # Do the same for verbose friendly output
    output3 = bundle.print_summary(verbose=True)

    # Run some basic tests
    assert len(output["messages"]) == 6
    assert len(output2) < len(output3)

    print output
    print "*" * 50
    print output2
    print "*" * 50
    print output3
    print "*" * 50

    messages = [
        "file1", "file2", "", ["foo", "file4"], ["foo", "file5"], ["foo", ""]
    ]

    for message in output["messages"]:
        print message

        assert message["file"] in messages
        messages.remove(message["file"])

        if isinstance(message["file"], list):
            pattern = message["file"][:]
            pattern.pop()
            pattern.append("")
            file_merge = " > ".join(pattern)
            print file_merge
            assert output3.count(file_merge)
        else:
            assert output3.count(message["file"])

    assert not messages
Example #48
0
class TestCase(object):
    def setUp(self):
        self.err = None
        self.is_jetpack = False
        self.is_bootstrapped = False
        self.detected_type = None
        self.listed = True

    def reset(self):
        """
        Reset the test case so that it can be run a second time (ideally with
        different parameters).
        """
        self.err = None

    def setup_err(self, for_appversions=None):
        """
        Instantiate the error bundle object. Use the `instant` parameter to
        have it output errors as they're generated. `for_appversions` may be set
        to target the test cases at a specific Gecko version range.

        An existing error bundle will be overwritten with a fresh one that has
        the state that the test case was setup with.
        """
        self.err = ErrorBundle(instant=True,
                               for_appversions=for_appversions or {},
                               listed=self.listed)
        self.err.handler = OutputHandler(sys.stdout, True)

        if self.is_jetpack:
            self.err.metadata["is_jetpack"] = True
        if self.is_bootstrapped:
            self.err.save_resource("em:bootstrap", True)
        if self.detected_type is not None:
            self.err.detected_Type = self.detected_type

    def assert_failed(self, with_errors=False, with_warnings=None):
        """First, asserts that the error bundle registers a failure
        (recognizing whether warnings are acknowledged). Second, if
        `with_errors`is True, the presence of errors is asserted. If it is not
        true (default), it is tested that errors are not present. If
        `with_warnings` is not None, the presence of warnings is tested just
        like `with_errors`.

        """
        assert self.err.failed(
            fail_on_warnings=with_warnings or with_warnings is None), \
                "Test did not fail; failure was expected."

        if with_errors:
            assert self.err.errors, "Errors were expected."
        elif self.err.errors:
            raise AssertionError("Tests found unexpected errors: %s" %
                                 self.err.print_summary(verbose=True))

        if with_warnings is not None:
            if with_warnings:
                assert self.err.warnings, "Warnings were expected."
            elif self.err.warnings:
                raise ("Tests found unexpected warnings: %s" %
                       self.err.print_summary())

    def assert_notices(self):
        """Assert that notices have been generated during the validation
        process.

        """
        assert self.err.notices, "Notices were expected."

    def assert_passes(self, warnings_pass=False):
        """Assert that no errors have been raised. If `warnings_pass` is True,
        also assert that there are no warnings.

        """
        assert not self.failed(fail_on_warnings=not warnings_pass), \
                ("Test was intended to pass%s, but it did not." %
                     (" with warnings" if warnings_pass else ""))

    def assert_silent(self):
        """
        Assert that no messages (errors, warnings, or notices) have been
        raised.
        """
        assert not self.err.errors, 'Got these: %s' % self.err.errors
        assert not self.err.warnings, 'Got these: %s' % self.err.warnings
        assert not self.err.notices, 'Got these: %s' % self.err.notices
        assert not any(self.err.compat_summary.values()), \
                "Found compatibility messages."

    def assert_got_errid(self, errid):
        """
        Assert that a message with the given errid has been generated during
        the validation process.
        """
        assert any(msg["id"] == errid for msg in
                   (self.err.errors + self.err.warnings + self.err.notices)), \
                "%s was expected, but it was not found." % repr(errid)
Example #49
0
class TestCase(object):
    def setUp(self):
        self.err = None
        self.is_jetpack = False
        self.is_bootstrapped = False
        self.detected_type = None
        self.listed = True

    def reset(self):
        """
        Reset the test case so that it can be run a second time (ideally with
        different parameters).
        """
        self.err = None

    def setup_err(self, for_appversions=None):
        """
        Instantiate the error bundle object. Use the `instant` parameter to
        have it output errors as they're generated. `for_appversions` may be set
        to target the test cases at a specific Gecko version range.

        An existing error bundle will be overwritten with a fresh one that has
        the state that the test case was setup with.
        """
        self.err = ErrorBundle(instant=True,
                               for_appversions=for_appversions or {},
                               listed=self.listed)
        self.err.handler = OutputHandler(sys.stdout, True)

        if self.is_jetpack:
            self.err.metadata['is_jetpack'] = True
        if self.is_bootstrapped:
            self.err.save_resource('em:bootstrap', True)
        if self.detected_type is not None:
            self.err.detected_Type = self.detected_type

    def assert_failed(self, with_errors=False, with_warnings=None):
        """
        Asserts that the error bundle has registered a failure. If
        `with_warnings` is any true value, or `None`, a warning is
        considered a failure.

        `with_warnings` or `with_errors` may be any of the following:

        * True: Messages of this type must be present.
        * False: Messages of this type must not be present.
        * None: Messages of this type may or may not be present.
        * Iterable of dicts: For dict returned by the iterator, at least
        one message must have a matching item for every key/value pair in the
        dict.
        """
        assert self.err.failed(
            fail_on_warnings=with_warnings or with_warnings is None), \
            'Test did not fail; failure was expected.'

        def find_message(messages, props):
            # Returns true if any message in messages has all of the
            # key/value pairs in props.
            return any(
                set(props.iteritems()) <= set(message.iteritems())
                for message in messages)

        def test_messages(mtype, expected):
            messages = getattr(self.err, mtype)

            if isinstance(expected, collections.Iterable):
                assert all(find_message(messages, props) for props in expected)
            elif expected:
                assert messages, 'Expected %s.' % mtype
            elif expected is not None:
                assert not messages, ('Tests found unexpected %s: %s' % mtype,
                                      self.err.print_summary(verbose=True))

        test_messages('errors', with_errors)
        test_messages('warnings', with_warnings)

    def assert_notices(self):
        """Assert that notices have been generated during the validation
        process.

        """
        assert self.err.notices, 'Notices were expected.'

    def assert_passes(self, warnings_pass=False):
        """Assert that no errors have been raised. If `warnings_pass` is True,
        also assert that there are no warnings.

        """
        assert not self.failed(fail_on_warnings=not warnings_pass), \
                ('Test was intended to pass%s, but it did not.' %
                     (' with warnings' if warnings_pass else ''))

    def assert_silent(self):
        """
        Assert that no messages (errors, warnings, or notices) have been
        raised.
        """
        assert not self.err.errors, 'Got these: %s' % self.err.errors
        assert not self.err.warnings, 'Got these: %s' % self.err.warnings
        assert not self.err.notices, 'Got these: %s' % self.err.notices
        assert not any(self.err.compat_summary.values()), \
                'Found compatibility messages.'

    def assert_got_errid(self, errid):
        """
        Assert that a message with the given errid has been generated during
        the validation process.
        """
        assert any(msg['id'] == errid for msg in
                   (self.err.errors + self.err.warnings + self.err.notices)), \
                '%s was expected, but it was not found.' % repr(errid)
def test_file_structure():
    """
    Test the means by which file names and line numbers are stored in errors,
    warnings, and messages.
    """

    # Use the StringIO as an output buffer.
    bundle = ErrorBundle(True)  # No color since no output

    # Populate the bundle with some test data.
    bundle.error((), 'error', description='', filename='file1', column=123)
    bundle.error((), 'error', description='', filename='file2')
    bundle.error((), 'error')

    # Push a state
    bundle.push_state('foo')

    bundle.warning((), 'warning', description='', filename='file4', column=123)
    bundle.warning((), 'warning', description='', filename='file5')
    bundle.warning((), 'warning')

    bundle.pop_state()

    # Load the JSON output as an object.
    output = json.loads(bundle.render_json())

    # Do the same for friendly output
    output2 = bundle.print_summary(verbose=False)

    # Do the same for verbose friendly output
    output3 = bundle.print_summary(verbose=True)

    # Run some basic tests
    assert len(output['messages']) == 6
    assert len(output2) < len(output3)

    print output
    print '*' * 50
    print output2
    print '*' * 50
    print output3
    print '*' * 50

    messages = [
        'file1', 'file2', '', ['foo', 'file4'], ['foo', 'file5'], ['foo', '']
    ]

    for message in output['messages']:
        print message

        assert message['file'] in messages
        messages.remove(message['file'])

        if isinstance(message['file'], list):
            pattern = message['file'][:]
            pattern.pop()
            pattern.append('')
            file_merge = ' > '.join(pattern)
            print file_merge
            assert output3.count(file_merge)
        else:
            assert output3.count(message['file'])

    assert not messages