コード例 #1
0
def _get_tree(code, shell):
    "Returns an AST tree of the JS passed in `code`."

    if not code:
        return None

    cmd = [shell, "-e", BOOTSTRAP_SCRIPT]
    shell_obj = subprocess.Popen(
        cmd, shell=False, stdin=subprocess.PIPE, stderr=subprocess.PIPE,
        stdout=subprocess.PIPE)

    code = json.dumps(JS_ESCAPE.sub("u", unicodehelper.decode(code)))
    data, stderr = shell_obj.communicate(code)

    if stderr:
        raise JSReflectException(stderr)

    if not data:
        raise JSReflectException("Reflection failed")

    data = unicodehelper.decode(data)
    parsed = json.loads(data, strict=False)

    if parsed.get("error"):
        if parsed["error_message"].startswith("ReferenceError: Reflect"):
            raise RuntimeError("Spidermonkey version too old; "
                               "1.8pre+ required; error='%s'; "
                               "spidermonkey='%s'" % (parsed["error_message"],
                                                      shell))
        else:
            raise JSReflectException(parsed["error_message"]).line_num(
                    parsed["line_number"])

    return parsed
コード例 #2
0
def _get_tree(code, shell=SPIDERMONKEY_INSTALLATION):
    "Returns an AST tree of the JS passed in `code`."

    if not code:
        return None

    code = unicodehelper.decode(code)

    temp = tempfile.NamedTemporaryFile(mode="w+b", delete=False)
    #temp.write(codecs.BOM_UTF8)
    temp.write(code.encode("utf_8"))
    temp.flush()

    data = """try{
        print(JSON.stringify(Reflect.parse(read(%s))));
    } catch(e) {
        print(JSON.stringify({
            "error":true,
            "error_message":e.toString(),
            "line_number":e.lineNumber
        }));
    }""" % json.dumps(temp.name)

    try:
        cmd = [shell, "-e", data, "-U"]
        shell_obj = subprocess.Popen(cmd,
                                     shell=False,
                                     stderr=subprocess.PIPE,
                                     stdout=subprocess.PIPE)

        data, stderr = shell_obj.communicate()
        if stderr:
            raise RuntimeError('Error calling %r: %s' % (cmd, stderr))

        # Closing the temp file will delete it.
    finally:
        try:
            temp.close()
            os.unlink(temp.name)
        except:
            pass

    if not data:
        raise JSReflectException("Reflection failed")

    data = unicodehelper.decode(data)
    parsed = json.loads(data, strict=False)

    if "error" in parsed and parsed["error"]:
        if parsed["error_message"].startswith("ReferenceError: Reflect"):
            raise RuntimeError("Spidermonkey version too old; "
                               "1.8pre+ required; error='%s'; "
                               "spidermonkey='%s'" % (parsed["error_message"],
                                                      shell))
        else:
            raise JSReflectException(parsed["error_message"]).line_num(
                    parsed["line_number"])

    return parsed
コード例 #3
0
    def _feed_parser(self, line):
        """Feed incoming data into the underlying HTMLParser."""

        line = unicodehelper.decode(line)

        try:
            try:
                self.feed(line + '\n')
            except UnicodeDecodeError:
                line = line.decode('ascii', 'ignore')
                self.feed(line + '\n')

        except HTMLParseError as err:
            if DEBUG:  # pragma: no cover
                print self.xml_state, err

            if 'markup' in self.reported:
                return

            if self.strict:
                self.err.warning(
                    err_id=('markup', '_feed', 'parse_error'),
                    warning='Markup parsing error',
                    description=('There was an error parsing a markup '
                                 'file.', str(err)),
                    filename=self.filename,
                    line=self.line,
                    context=self.context)
            self.reported.add('markup')
コード例 #4
0
ファイル: spidermonkey.py プロジェクト: dimonov/amo-validator
def prepare_code(code):
    """Prepare code for tree generation."""

    code = unicodehelper.decode(code)
    # Acceptable unicode characters still need to be stripped. Just remove the
    # slash: a character is necessary to prevent bad identifier errors.
    return JS_ESCAPE.sub("u", code)
コード例 #5
0
ファイル: utils.py プロジェクト: kleopatra999/addons-server
    def __init__(self, path, data=''):
        self.path = path

        if not data:
            with open(path) as fobj:
                data = fobj.read()

        lexer = JsLexer()

        json_string = ''

        # Run through the JSON and remove all comments, then try to read
        # the manifest file.
        # Note that Firefox and the WebExtension spec only allow for
        # line comments (starting with `//`), not block comments (starting with
        # `/*`). We strip out both in AMO because the linter will flag the
        # block-level comments explicitly as an error (so the developer can
        # change them to line-level comments).
        #
        # But block level comments are not allowed. We just flag them elsewhere
        # (in the linter).
        for name, token in lexer.lex(data):
            if name not in ('blockcomment', 'linecomment'):
                json_string += token

        self.data = json.loads(unicodehelper.decode(json_string))
コード例 #6
0
ファイル: utils.py プロジェクト: justinpotts/addons-server
    def __init__(self, path, data=''):
        self.path = path

        if not data:
            with open(self.path) as fobj:
                data = fobj.read()

        lexer = JsLexer()

        json_string = ''

        # Run through the JSON and remove all comments, then try to read
        # the manifest file.
        # Note that Firefox and the WebExtension spec only allow for
        # line comments (starting with `//`), not block comments (starting with
        # `/*`). We strip out both in AMO because the linter will flag the
        # block-level comments explicitly as an error (so the developer can
        # change them to line-level comments).
        #
        # But block level comments are not allowed. We just flag them elsewhere
        # (in the linter).
        for name, token in lexer.lex(data):
            if name not in ('blockcomment', 'linecomment'):
                json_string += token

        self.data = json.loads(unicodehelper.decode(json_string))
コード例 #7
0
    def _feed_parser(self, line):
        """Feed incoming data into the underlying HTMLParser."""

        line = unicodehelper.decode(line)

        try:
            try:
                self.feed(line + '\n')
            except UnicodeDecodeError:
                line = line.decode('ascii', 'ignore')
                self.feed(line + '\n')

        except HTMLParseError as err:
            if DEBUG:  # pragma: no cover
                print self.xml_state, err

            if 'markup' in self.reported:
                return

            if self.strict:
                self.err.warning(
                    err_id=('markup', '_feed', 'parse_error'),
                    warning='Markup parsing error',
                    description=('There was an error parsing a markup '
                                 'file.', str(err)),
                    filename=self.filename,
                    line=self.line,
                    context=self.context)
            self.reported.add('markup')
コード例 #8
0
def _do_test(path):
    'Performs a test on a JS file'

    text = open(path).read()
    utext = unicodehelper.decode(text)

    print utext.encode('ascii', 'backslashreplace')
    nose.tools.eq_(utext, COMPARISON)
コード例 #9
0
def _do_test(path):
    "Performs a test on a JS file"

    text = open(path).read()
    utext = unicodehelper.decode(text)

    print utext.encode("ascii", "backslashreplace")
    nose.tools.eq_(utext, COMPARISON)
コード例 #10
0
def _do_test(path):
    'Performs a test on a JS file'

    text = open(path).read()
    utext = unicodehelper.decode(text)

    print utext.encode('ascii', 'backslashreplace')
    assert utext == COMPARISON
コード例 #11
0
def prepare_code(code, err, filename):
    "Prepares code for tree generation"
    # Acceptable unicode characters still need to be stripped. Just remove the
    # slash: a character is necessary to prevent bad identifier errors
    code = JS_ESCAPE.sub("u", code)

    code = unicodehelper.decode(code)
    return code
コード例 #12
0
    def _save_to_buffer(self, data):
        """Save data to the XML buffer for the current tag."""

        # We're not interested in data that isn't in a tag.
        if not self.xml_buffer:
            return

        data = unicodehelper.decode(data)

        self.xml_buffer[-1] += data
コード例 #13
0
    def _save_to_buffer(self, data):
        """Save data to the XML buffer for the current tag."""

        # We're not interested in data that isn't in a tag.
        if not self.xml_buffer:
            return

        data = unicodehelper.decode(data)

        self.xml_buffer[-1] += data
コード例 #14
0
    def _save_to_buffer(self, data):
        """Save data to the XML buffer for the current tag."""

        # We're not interested in data that isn't in a tag.
        if not self.xml_buffer:
            return

        data = unicodehelper.decode(data)

        self.xml_buffer[-1] += data
        if self.xml_position_stack[-1] is None:
            self.xml_position_stack[-1] = self.getpos()
コード例 #15
0
    def _save_to_buffer(self, data):
        """Save data to the XML buffer for the current tag."""

        # We're not interested in data that isn't in a tag.
        if not self.xml_buffer:
            return

        data = unicodehelper.decode(data)

        self.xml_buffer[-1] += data
        if self.xml_position_stack[-1] is None:
            self.xml_position_stack[-1] = self.getpos()
コード例 #16
0
ファイル: content.py プロジェクト: fox2mike/amo-validator
def test_packed_scripts(err, xpi_package):
    """
    Scripts must be tested separately from normal files to allow for markup
    files to mark scripts as being potentially polluting.
    """

    # This test doesn't apply to subpackages. We keep references to the
    # subpackage bundles so we can process everything at once in an unpushed
    # state.
    if err.is_nested_package():
        return

    scripts = err.get_resource("scripts")
    if not scripts:
        return

    # Get the chrome manifest in case there's information about pollution
    # exemptions.
    chrome = err.get_resource("chrome.manifest_nopush")
    marked_scripts = err.get_resource("marked_scripts")
    if not marked_scripts:
        marked_scripts = set()

    # Process all of the scripts that were found seperately from the rest of
    # the package contents.
    for script_bundle in scripts:
        package = script_bundle["package"]

        # Set the error bundle's package state to what it was when we first
        # encountered the script file during the content tests.
        err.package_stack = script_bundle["state"]

        for script in script_bundle["scripts"]:
            file_data = unicodehelper.decode(package.read(script))

            if marked_scripts:
                reversed_script = chrome.reverse_lookup(script_bundle["state"],
                                                        script)
                # Run the standard script tests on the script, but mark the
                # script as pollutable if its chrome URL is marked as being so.
                testendpoint_js.test_js_file(
                        err, script, file_data,
                        pollutable=reversed_script in marked_scripts)
            else:
                # Run the standard script tests on the scripts.
                testendpoint_js.test_js_file(err, script, file_data)
            run_regex_tests(file_data, err, script, is_js=True)

    # We only run this testcase if the package stack is empty, return it to its
    # original state.
    err.package_stack = []
コード例 #17
0
ファイル: markuptester.py プロジェクト: Rob--W/amo-validator
    def _feed_parser(self, line):
        """Feed incoming data into the underlying HTMLParser."""

        line = unicodehelper.decode(line)

        try:
            self.feed(line + "\n")
        except UnicodeDecodeError, exc_instance:
            exc_class, val, traceback = sys.exc_info()
            try:
                line = line.decode("ascii", "ignore")
                self.feed(line + "\n")
            except Exception:
                raise exc_instance, None, traceback
コード例 #18
0
    def _feed_parser(self, line):
        """Feed incoming data into the underlying HTMLParser."""

        line = unicodehelper.decode(line)

        try:
            self.feed(line + "\n")
        except UnicodeDecodeError, exc_instance:
            exc_class, val, traceback = sys.exc_info()
            try:
                line = line.decode("ascii", "ignore")
                self.feed(line + "\n")
            except Exception:
                raise exc_instance, None, traceback
コード例 #19
0
    def _feed_parser(self, line):
        "Feeds data into the parser"

        line = unicodehelper.decode(line)

        try:
            self.feed(line + "\n")
        except UnicodeDecodeError, exc_instance:
            exc_class, val, traceback = sys.exc_info()
            try:
                line = line.decode("ascii", "ignore")
                self.feed(line + "\n")
            except:
                raise exc_instance, None, traceback
コード例 #20
0
ファイル: jsshell.py プロジェクト: pombredanne/amo-validator
    def get_tree(self, code):
        if isinstance(code, str):
            code = unicodehelper.decode(code)

        try:
            self.stdin.write(json.dumps(code))
            self.stdin.write('\n')

            output = json.loads(self.stdout.readline(), strict=False)
        except Exception:
            # If this instance is the cached instance, clear it.
            if self == self.__class__.instance:
                self.__class__.instance = None
            raise

        if output.get('error'):
            raise JSReflectException(output['error_message'],
                                     output['line_number'])

        return output
コード例 #21
0
ファイル: jsshell.py プロジェクト: Sancus/amo-validator
    def get_tree(self, code):
        if isinstance(code, str):
            code = unicodehelper.decode(code)

        try:
            self.stdin.write(json.dumps(code))
            self.stdin.write('\n')

            output = json.loads(self.stdout.readline(), strict=False)
        except Exception:
            # If this instance is the cached instance, clear it.
            if self == self.__class__.instance:
                self.__class__.instance = None
            raise

        if output.get('error'):
            raise JSReflectException(output['error_message'],
                                     output['line_number'])

        return output
コード例 #22
0
 def __init__(self, err, data, namespace=None):
     self.err = err
     self.data = json.loads(unicodehelper.decode(data))
コード例 #23
0
ファイル: content.py プロジェクト: Rob--W/amo-validator
def _process_file(err, xpi_package, name, file_data, name_lower,
                  pollutable=False):
    """Process a single file's content tests."""

    # If that item is a container file, unzip it and scan it.
    if name_lower.endswith(".jar"):
        # This is either a subpackage or a nested theme.
        is_subpackage = not err.get_resource("is_multipackage")
        # Unpack the package and load it up.
        package = StringIO(file_data)
        try:
            sub_xpi = XPIManager(package, mode="r", name=name,
                                 subpackage=is_subpackage)
        except Exception:
            err.error(("testcases_content",
                       "test_packed_packages",
                       "jar_subpackage_corrupt"),
                      "Subpackage corrupt.",
                      "The subpackage could not be opened due to issues "
                      "with corruption. Ensure that the file is valid.",
                      name)
            return None

        # Let the error bunder know we're in a sub-package.
        err.push_state(name)
        err.detected_type = (PACKAGE_SUBPACKAGE if is_subpackage else
                             PACKAGE_THEME)
        err.set_tier(1)
        supported_versions = (err.supported_versions.copy() if
                              err.supported_versions else
                              err.supported_versions)

        if is_subpackage:
            testendpoint_validator.test_inner_package(err, sub_xpi)
        else:
            testendpoint_validator.test_package(err, package, name)

        err.pop_state()
        err.set_tier(2)

        err.supported_versions = supported_versions

    elif name_lower.endswith(".xpi"):
        # It's not a subpackage, it's a nested extension. These are
        # found in multi-extension packages.

        # Unpack!
        package = StringIO(file_data)

        err.push_state(name_lower)
        err.set_tier(1)

        # There are no expected types for packages within a multi-
        # item package.
        testendpoint_validator.test_package(err, package, name)

        err.pop_state()
        err.set_tier(2)  # Reset to the current tier

    elif name_lower.endswith((".css", ".js", ".jsm")):

        if not file_data:
            return None

        # Convert the file data to unicode
        file_data = unicodehelper.decode(file_data)
        is_js = False

        if name_lower.endswith(".css"):
            testendpoint_css.test_css_file(err, name, file_data)

        elif name_lower.endswith((".js", ".jsm")):
            is_js = True
            testendpoint_js.test_js_file(err, name, file_data,
                                         pollutable=pollutable)

        run_regex_tests(file_data, err, name, is_js=is_js)

        return True

    return False
コード例 #24
0
ファイル: content.py プロジェクト: Rob--W/amo-validator
def test_packed_scripts(err, xpi_package):
    """
    Scripts must be tested separately from normal files to allow for markup
    files to mark scripts as being potentially polluting.
    """

    # This test doesn't apply to subpackages. We keep references to the
    # subpackage bundles so we can process everything at once in an unpushed
    # state.
    if err.is_nested_package:
        return

    scripts = err.get_resource("scripts")
    if not scripts:
        return

    total_scripts = sum(len(bundle["scripts"]) for bundle in scripts)
    exhaustive = True
    if total_scripts > MAX_JS_THRESHOLD:
        err.warning(
            err_id=("testcases_content", "packed_js", "too_much_js"),
            warning="TOO MUCH JS FOR EXHAUSTIVE VALIDATION",
            description="There are too many JS files for the validator to "
                        "process sequentially. An editor must manually "
                        "review the JS in this add-on.")
        exhaustive = False

    # Get the chrome manifest in case there's information about pollution
    # exemptions.
    chrome = err.get_resource("chrome.manifest_nopush")
    marked_scripts = err.get_resource("marked_scripts")
    if not marked_scripts:
        marked_scripts = set()

    # Process all of the scripts that were found seperately from the rest of
    # the package contents.
    for script_bundle in scripts:
        package = script_bundle["package"]

        # Set the error bundle's package state to what it was when we first
        # encountered the script file during the content tests.
        for archive in script_bundle["state"]:
            err.push_state(archive)

        for script in script_bundle["scripts"]:
            file_data = unicodehelper.decode(package.read(script))

            run_regex_tests(file_data, err, script, is_js=True)
            # If we're not running an exhaustive set of tests, skip the full JS
            # parse and traversal.
            if not exhaustive:
                continue

            if marked_scripts:
                reversed_script = chrome.reverse_lookup(script_bundle["state"],
                                                        script)
                # Run the standard script tests on the script, but mark the
                # script as pollutable if its chrome URL is marked as being so.
                testendpoint_js.test_js_file(
                        err, script, file_data,
                        pollutable=reversed_script in marked_scripts)
            else:
                # Run the standard script tests on the scripts.
                testendpoint_js.test_js_file(err, script, file_data)

        for i in range(len(script_bundle["state"])):
            err.pop_state()
コード例 #25
0
ファイル: content.py プロジェクト: pombreda/amo-validator
def _process_file(err,
                  xpi_package,
                  name,
                  file_data,
                  name_lower,
                  pollutable=False):
    """Process a single file's content tests."""

    # If that item is a container file, unzip it and scan it.
    if name_lower.endswith(".jar"):
        # This is either a subpackage or a nested theme.
        is_subpackage = not err.get_resource("is_multipackage")
        # Unpack the package and load it up.
        package = StringIO(file_data)
        try:
            sub_xpi = XPIManager(package,
                                 mode="r",
                                 name=name,
                                 subpackage=is_subpackage)
        except Exception:
            err.error(("testcases_content", "test_packed_packages",
                       "jar_subpackage_corrupt"), "Subpackage corrupt.",
                      "The subpackage could not be opened due to issues "
                      "with corruption. Ensure that the file is valid.", name)
            return None

        # Let the error bunder know we're in a sub-package.
        err.push_state(name)
        err.detected_type = (PACKAGE_SUBPACKAGE
                             if is_subpackage else PACKAGE_THEME)
        err.set_tier(1)
        supported_versions = (err.supported_versions.copy()
                              if err.supported_versions else
                              err.supported_versions)

        if is_subpackage:
            testendpoint_validator.test_inner_package(err, sub_xpi)
        else:
            testendpoint_validator.test_package(err, package, name)

        err.pop_state()
        err.set_tier(2)

        err.supported_versions = supported_versions

    elif name_lower.endswith(".xpi"):
        # It's not a subpackage, it's a nested extension. These are
        # found in multi-extension packages.

        # Unpack!
        package = StringIO(file_data)

        err.push_state(name_lower)
        err.set_tier(1)

        # There are no expected types for packages within a multi-
        # item package.
        testendpoint_validator.test_package(err, package, name)

        err.pop_state()
        err.set_tier(2)  # Reset to the current tier

    elif name_lower.endswith((".css", ".js", ".jsm")):

        if not file_data:
            return None

        # Convert the file data to unicode
        file_data = unicodehelper.decode(file_data)
        is_js = False

        if name_lower.endswith(".css"):
            testendpoint_css.test_css_file(err, name, file_data)

        elif name_lower.endswith((".js", ".jsm")):
            is_js = True
            testendpoint_js.test_js_file(err,
                                         name,
                                         file_data,
                                         pollutable=pollutable)

        run_regex_tests(file_data, err, name, is_js=is_js)

        return True

    return False
コード例 #26
0
ファイル: content.py プロジェクト: kumar303/amo-validator
def _process_file(err, xpi_package, name, file_data, name_lower,
                  pollutable=False):
    """Process a single file's content tests."""

    # If that item is a container file, unzip it and scan it.
    if name_lower.endswith('.jar'):
        # This is either a subpackage or a nested theme.
        is_subpackage = not err.get_resource('is_multipackage')
        # Unpack the package and load it up.
        package = StringIO(file_data)
        try:
            sub_xpi = XPIManager(package, mode='r', name=name,
                                 subpackage=is_subpackage)
        except BadZipfile:
            err.error(('testcases_content',
                       'test_packed_packages',
                       'jar_subpackage_corrupt'),
                      'Subpackage corrupt.',
                      'The subpackage appears to be corrupt, and could not '
                      'be opened.',
                      name)
            return None

        # Let the error bunder know we're in a sub-package.
        err.push_state(name)
        err.detected_type = (PACKAGE_SUBPACKAGE if is_subpackage else
                             PACKAGE_THEME)
        err.set_tier(1)
        supported_versions = (err.supported_versions.copy() if
                              err.supported_versions else
                              err.supported_versions)

        if is_subpackage:
            testendpoint_validator.test_inner_package(err, sub_xpi)
        else:
            testendpoint_validator.test_package(err, package, name)

        err.pop_state()
        err.set_tier(2)

        err.supported_versions = supported_versions

    elif name_lower.endswith('.xpi'):
        # It's not a subpackage, it's a nested extension. These are
        # found in multi-extension packages.

        # Unpack!
        package = StringIO(file_data)

        err.push_state(name_lower)
        err.set_tier(1)

        # There are no expected types for packages within a multi-
        # item package.
        testendpoint_validator.test_package(err, package, name)

        err.pop_state()
        err.set_tier(2)  # Reset to the current tier

    elif name_lower.endswith(('.css', '.js', '.jsm')):

        if not file_data:
            return None

        # Convert the file data to unicode
        file_data = unicodehelper.decode(file_data)
        is_js = name_lower.endswith(('.js', '.jsm'))

        if name_lower.endswith('.css'):
            testendpoint_css.test_css_file(err, name, file_data)

        elif is_js:
            testendpoint_js.test_js_file(err, name, file_data,
                                         pollutable=pollutable)

        run_regex_tests(file_data, err, name, is_js=is_js)

        return True

    else:
        if file_data:
            file_data = unicodehelper.decode(file_data)
            run_regex_tests(file_data, err, name, explicit=True)

    return False
コード例 #27
0
def decode_json(json_string):
    """Helper that transparently handles BOM encoding."""
    return json.loads(unicodehelper.decode(json_string))
コード例 #28
0
def test_packed_scripts(err, xpi_package):
    """
    Scripts must be tested separately from normal files to allow for markup
    files to mark scripts as being potentially polluting.
    """

    # This test doesn't apply to subpackages. We keep references to the
    # subpackage bundles so we can process everything at once in an unpushed
    # state.
    if err.is_nested_package:
        return

    scripts = err.get_resource('scripts')
    if not scripts:
        return

    total_scripts = sum(len(bundle['scripts']) for bundle in scripts)
    exhaustive = True
    if total_scripts > MAX_JS_THRESHOLD:
        err.warning(
            err_id=('testcases_content', 'packed_js', 'too_much_js'),
            warning='TOO MUCH JS FOR EXHAUSTIVE VALIDATION',
            description='There are too many JS files for the validator to '
            'process sequentially. An editor must manually '
            'review the JS in this add-on.')
        exhaustive = False

    # Get the chrome manifest in case there's information about pollution
    # exemptions.
    chrome = err.get_resource('chrome.manifest_nopush')
    marked_scripts = err.get_resource('marked_scripts')
    if not marked_scripts:
        marked_scripts = set()

    # Process all of the scripts that were found seperately from the rest of
    # the package contents.
    for script_bundle in scripts:
        package = script_bundle['package']

        # Set the error bundle's package state to what it was when we first
        # encountered the script file during the content tests.
        for archive in script_bundle['state']:
            err.push_state(archive)

        for script in script_bundle['scripts']:
            file_data = unicodehelper.decode(package.read(script))

            run_regex_tests(file_data, err, script)
            # If we're not running an exhaustive set of tests, skip the full JS
            # parse and traversal.
            if not exhaustive:
                continue

            if marked_scripts:
                reversed_script = chrome.reverse_lookup(
                    script_bundle['state'], script)
                # Run the standard script tests on the script, but mark the
                # script as pollutable if its chrome URL is marked as being so.
                testendpoint_js.test_js_file(err,
                                             script,
                                             file_data,
                                             pollutable=reversed_script
                                             in marked_scripts)
            else:
                # Run the standard script tests on the scripts.
                testendpoint_js.test_js_file(err, script, file_data)

        for i in range(len(script_bundle['state'])):
            err.pop_state()
コード例 #29
0
def _process_file(err,
                  xpi_package,
                  name,
                  file_data,
                  name_lower,
                  pollutable=False):
    """Process a single file's content tests."""

    extension = os.path.splitext(name_lower)[1]

    # If that item is a container file, unzip it and scan it.
    if extension == '.jar':
        # This is either a subpackage or a nested theme.
        is_subpackage = not err.get_resource('is_multipackage')
        # Unpack the package and load it up.
        package = StringIO(file_data)
        try:
            sub_xpi = XPIManager(package,
                                 mode='r',
                                 name=name,
                                 subpackage=is_subpackage)
        except BadZipfile:
            err.error(('testcases_content', 'test_packed_packages',
                       'jar_subpackage_corrupt'), 'Subpackage corrupt.',
                      'The subpackage appears to be corrupt, and could not '
                      'be opened.', name)
            return

        # Let the error bunder know we're in a sub-package.
        err.push_state(name)
        err.detected_type = (PACKAGE_SUBPACKAGE
                             if is_subpackage else PACKAGE_THEME)
        err.set_tier(1)
        supported_versions = (err.supported_versions.copy()
                              if err.supported_versions else
                              err.supported_versions)

        if is_subpackage:
            testendpoint_validator.test_inner_package(err, sub_xpi)
        else:
            testendpoint_validator.test_package(err, package, name)

        err.pop_state()
        err.set_tier(2)

        err.supported_versions = supported_versions

    elif extension == '.xpi':
        # It's not a subpackage, it's a nested extension. These are
        # found in multi-extension packages.

        # Unpack!
        package = StringIO(file_data)

        err.push_state(name_lower)
        err.set_tier(1)

        # There are no expected types for packages within a multi-
        # item package.
        testendpoint_validator.test_package(err, package, name)

        err.pop_state()
        err.set_tier(2)  # Reset to the current tier

    else:

        if not file_data:
            return

        # Convert the file data to unicode.
        file_data = unicodehelper.decode(file_data)

        if extension in ('.js', '.jsm'):
            testendpoint_js.test_js_file(err,
                                         name,
                                         file_data,
                                         pollutable=pollutable)
        elif extension == '.css':
            testendpoint_css.test_css_file(err, name, file_data)

        run_regex_tests(file_data, err, filename=name)
コード例 #30
0
ファイル: utils.py プロジェクト: tsl143/addons-server
def decode_json(json_string):
    """Helper that transparently handles BOM encoding."""
    return json.loads(unicodehelper.decode(json_string))