Exemple #1
0
  def buildTestPath(self, options):
    """ Build the url path to the specific test harness and test file or directory
        Build a manifest of tests to run and write out a json file for the harness to read
    """
    if options.manifestFile and os.path.isfile(options.manifestFile):
      manifest = TestManifest(strict=False)
      manifest.read(options.manifestFile)
      # Bug 883858 - return all tests including disabled tests 
      tests = manifest.active_tests(disabled=False, **mozinfo.info)
      paths = []
      for test in tests:
        tp = test['path'].split(self.getTestRoot(options), 1)[1].strip('/')

        # Filter out tests if we are using --test-path
        if options.testPath and not tp.startswith(options.testPath):
          continue

        paths.append({'path': tp})

      # Bug 883865 - add this functionality into manifestDestiny
      with open('tests.json', 'w') as manifestFile:
        manifestFile.write(json.dumps({'tests': paths}))
      options.manifestFile = 'tests.json'

    testHost = "http://mochi.test:8888"
    testURL = ("/").join([testHost, self.TEST_PATH, options.testPath])
    if os.path.isfile(os.path.join(self.oldcwd, os.path.dirname(__file__), self.TEST_PATH, options.testPath)) and options.repeat > 0:
       testURL = ("/").join([testHost, self.PLAIN_LOOP_PATH])
    if options.chrome or options.a11y:
       testURL = ("/").join([testHost, self.CHROME_PATH])
    elif options.browserChrome:
      testURL = "about:blank"
    elif options.ipcplugins:
      testURL = ("/").join([testHost, self.TEST_PATH, "dom/plugins/test"])
    return testURL
    def add_test(self, test, expected='pass', test_container=None):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if (filename.endswith('.ini')):
                        msg_tmpl = ("Ignoring manifest '{0}'; running all tests in '{1}'."
                                    " See --help for details.")
                        relpath = os.path.relpath(os.path.join(root, filename), filepath)
                        self.logger.warning(msg_tmpl.format(relpath, filepath))
                    elif (filename.startswith('test_') and
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        test_file = os.path.join(root, filename)
                        self.add_test(test_file)
            return


        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))
            json_path = update_mozinfo(filepath)
            self.logger.info("mozinfo updated with the following: {}".format(None))
            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   app=self.appName,
                                                   e10s=self.e10s,
                                                   **mozinfo.info)
            if len(manifest_tests) == 0:
                self.logger.error("no tests to run using specified "
                                  "combination of filters: {}".format(
                                       manifest.fmt_filters()))

            target_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    target_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])

                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]
                test_container = None

                self.add_test(i["path"], i["expected"], test_container)
            return

        self.tests.append({'filepath': filepath, 'expected': expected, 'test_container': test_container})
    def __init__(self, options, **kwargs):
        self.options = options
        self.server = None
        self.logger = mozlog.getLogger('PEP')

        # create the profile
        self.profile = self.profile_class(profile=self.options.profilePath,
                                          addons=[os.path.join(here, 'extension')])

        # fork a server to serve the test related files
        if self.options.serverPath:
            self.runServer()

        tests = []
        # TODO is there a better way of doing this?
        if self.options.testPath.endswith('.js'):
            # a single test file was passed in
            testObj = {}
            testObj['path'] = os.path.realpath(self.options.testPath)
            testObj['name'] = os.path.basename(self.options.testPath)
            tests.append(testObj)
        else:
            # a test manifest was passed in
            # open and convert the manifest to json
            manifest = TestManifest()
            manifest.read(self.options.testPath)
            tests = manifest.get()

        # create a manifest object to be read by the JS side
        manifestObj = {}
        manifestObj['tests'] = tests

        # write manifest to a JSON file
        jsonManifest = open(os.path.join(here, 'manifest.json'), 'w')
        jsonManifest.write(str(manifestObj).replace("'", "\""))
        jsonManifest.close()

        # setup environment
        env = os.environ.copy()
        env['MOZ_INSTRUMENT_EVENT_LOOP'] = '1'
        env['MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD'] = str(options.tracerThreshold)
        env['MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL'] = str(options.tracerInterval)
        env['MOZ_CRASHREPORTER_NO_REPORT'] = '1'

        # construct the browser arguments
        cmdargs = []
        # TODO Make browserArgs a list
        cmdargs.extend(self.options.browserArgs)
        cmdargs.extend(['-pep-start', os.path.realpath(jsonManifest.name)])

        # run with managed process handler
        self.runner = self.runner_class(profile=self.profile,
                                        binary=self.options.binary,
                                        cmdargs=cmdargs,
                                        env=env,
                                        process_class=PepProcess)
Exemple #4
0
    def add_test(self, test, expected="pass", test_container=None):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if filename.startswith("test_") and (filename.endswith(".py") or filename.endswith(".js")):
                        filepath = os.path.join(root, filename)
                        self.add_test(filepath)
            return

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == ".ini":
            manifest = TestManifest()
            manifest.read(filepath)

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))
            json_path = update_mozinfo(filepath)
            self.logger.info("mozinfo updated with the following: {}".format(None))
            manifest_tests = manifest.active_tests(
                exists=False,
                disabled=True,
                filters=filters,
                device=self.device,
                app=self.appName,
                e10s=self.e10s,
                **mozinfo.info
            )
            if len(manifest_tests) == 0:
                self.logger.error(
                    "no tests to run using specified " "combination of filters: {}".format(manifest.fmt_filters())
                )

            target_tests = []
            for test in manifest_tests:
                if test.get("disabled"):
                    self.manifest_skipped_tests.append(test)
                else:
                    target_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])

                file_ext = os.path.splitext(os.path.split(i["path"])[-1])[-1]
                test_container = None

                self.add_test(i["path"], i["expected"], test_container)
            return

        self.tests.append({"filepath": filepath, "expected": expected, "test_container": test_container})
    def get_test_list(self, manifest):
        self.logger.info("Reading test manifest: %s" % manifest)
        mft = TestManifest()
        mft.read(manifest)

        # In the future if we want to add in more processing to the manifest
        # here is where you'd do that. Right now, we just return a list of
        # tests
        testlist = []
        for i in mft.active_tests(exists=False, disabled=False):
            testlist.append(i["path"])

        return testlist
Exemple #6
0
    def runTests(self):
        self.startup()
        if isinstance(self.options.manifestFile, TestManifest):
            mp = self.options.manifestFile
        else:
            mp = TestManifest(strict=False)
            mp.read("robocop.ini")
        filters = []
        if self.options.totalChunks:
            filters.append(
                chunk_by_slice(self.options.thisChunk,
                               self.options.totalChunks))
        robocop_tests = mp.active_tests(exists=False,
                                        filters=filters,
                                        **mozinfo.info)
        if not self.options.autorun:
            # Force a single loop iteration. The iteration will start Fennec and
            # the httpd server, but not actually run a test.
            self.options.test_paths = [robocop_tests[0]['name']]
        active_tests = []
        for test in robocop_tests:
            if self.options.test_paths and test[
                    'name'] not in self.options.test_paths:
                continue
            if 'disabled' in test:
                self.log.info('TEST-INFO | skipping %s | %s' %
                              (test['name'], test['disabled']))
                continue
            active_tests.append(test)

        tests_by_manifest = defaultdict(list)
        for test in active_tests:
            tests_by_manifest[test['manifest']].append(test['name'])
        self.log.suite_start(tests_by_manifest)

        worstTestResult = None
        for test in active_tests:
            result = self.runSingleTest(test)
            if worstTestResult is None or worstTestResult == 0:
                worstTestResult = result
        if worstTestResult is None:
            self.log.warning(
                "No tests run. Did you pass an invalid TEST_PATH?")
            worstTestResult = 1
        else:
            print "INFO | runtests.py | Test summary: start."
            logResult = self.logTestSummary()
            print "INFO | runtests.py | Test summary: end."
            if worstTestResult == 0:
                worstTestResult = logResult
        return worstTestResult
Exemple #7
0
    def add_test(self, test, expected='pass'):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if (filename.startswith('test_')
                            and (filename.endswith('.py')
                                 or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.add_test(filepath)
            return

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))
            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   e10s=self.e10s,
                                                   **mozinfo.info)
            if len(manifest_tests) == 0:
                self.logger.error("no tests to run using specified "
                                  "combination of filters: {}".format(
                                      manifest.fmt_filters()))

            target_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    target_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: {} does not exist".format(
                        i["path"]))

                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]

                self.add_test(i["path"], i["expected"])
            return

        self.tests.append({'filepath': filepath, 'expected': expected})
Exemple #8
0
    def add_test(self, test, expected='pass', test_container=None):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if (filename.startswith('test_') and
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.add_test(filepath)
            return

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))
            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   e10s=self.e10s,
                                                   **mozinfo.info)
            if len(manifest_tests) == 0:
                self.logger.error("no tests to run using specified "
                                  "combination of filters: {}".format(
                                       manifest.fmt_filters()))

            target_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    target_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])

                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]
                test_container = None

                self.add_test(i["path"], i["expected"], test_container)
            return

        self.tests.append({'filepath': filepath, 'expected': expected, 'test_container': test_container})
Exemple #9
0
    def runTests(self):
        self.startup()
        if isinstance(self.options.manifestFile, TestManifest):
            mp = self.options.manifestFile
        else:
            mp = TestManifest(strict=False)
            mp.read(self.options.robocopIni)
        filters = []
        if self.options.totalChunks:
            filters.append(
                chunk_by_slice(self.options.thisChunk, self.options.totalChunks))
        robocop_tests = mp.active_tests(
            exists=False, filters=filters, **mozinfo.info)
        if not self.options.autorun:
            # Force a single loop iteration. The iteration will start Fennec and
            # the httpd server, but not actually run a test.
            self.options.test_paths = [robocop_tests[0]['name']]
        active_tests = []
        for test in robocop_tests:
            if self.options.test_paths and test['name'] not in self.options.test_paths:
                continue
            if 'disabled' in test:
                self.log.info('TEST-INFO | skipping %s | %s' %
                              (test['name'], test['disabled']))
                continue
            active_tests.append(test)

        tests_by_manifest = defaultdict(list)
        for test in active_tests:
            tests_by_manifest[test['manifest']].append(test['name'])
        self.log.suite_start(tests_by_manifest)

        worstTestResult = None
        for test in active_tests:
            result = self.runSingleTest(test)
            if worstTestResult is None or worstTestResult == 0:
                worstTestResult = result
        if worstTestResult is None:
            self.log.warning(
                "No tests run. Did you pass an invalid TEST_PATH?")
            worstTestResult = 1
        else:
            print "INFO | runtests.py | Test summary: start."
            logResult = self.logTestSummary()
            print "INFO | runtests.py | Test summary: end."
            if worstTestResult == 0:
                worstTestResult = logResult
        return worstTestResult
Exemple #10
0
def find_manifest_dirs(topsrcdir, manifests):
    """Routine to retrieve directories specified in a manifest, relative to topsrcdir.

    It does not recurse into manifests, as we currently have no need for that.
    """
    dirs = set()

    for p in manifests:
        p = os.path.join(topsrcdir, p)

        if p.endswith(".ini"):
            test_manifest = TestManifest()
            test_manifest.read(p)
            dirs |= set([os.path.dirname(m) for m in test_manifest.manifests()])

        elif p.endswith(".list"):
            m = ReftestManifest()
            m.load(p)
            dirs |= m.dirs

        else:
            raise Exception(
                '"{}" is not a supported manifest format.'.format(
                    os.path.splitext(p)[1]
                )
            )

    dirs = {mozpath.normpath(d[len(topsrcdir) :]).lstrip("/") for d in dirs}

    # Filter out children captured by parent directories because duplicates
    # will confuse things later on.
    def parents(p):
        while True:
            p = mozpath.dirname(p)
            if not p:
                break
            yield p

    seen = set()
    for d in sorted(dirs, key=len):
        if not any(p in seen for p in parents(d)):
            seen.add(d)

    return sorted(seen)
Exemple #11
0
 def read_tests(self):
     self._tests = []
     manifest = TestManifest()
     manifest.read(self._test_path)
     tests_info = manifest.get()
     for t in tests_info:
         if not t['here'] in sys.path:
             sys.path.append(t['here'])
         if t['name'].endswith('.py'):
             t['name'] = t['name'][:-3]
         # add all classes in module that are derived from PhoneTest to
         # the test list
         tests = [(x[1], os.path.normpath(os.path.join(t['here'],
                                                       t.get('config', ''))))
                  for x in inspect.getmembers(__import__(t['name']),
                                              inspect.isclass)
                  if x[0] != 'PhoneTest' and issubclass(x[1],
                                                        phonetest.PhoneTest)]
         self._tests.extend(tests)
def print_test_dirs(topsrcdir, manifest_file):
    """
    Simple routine which prints the paths of directories specified
    in a Marionette manifest, relative to topsrcdir.  This does not recurse 
    into manifests, as we currently have no need for that.
    """

    # output the directory of this (parent) manifest
    topsrcdir = os.path.abspath(topsrcdir)
    scriptdir = os.path.abspath(os.path.dirname(__file__))
    print scriptdir[len(topsrcdir) + 1:]

    # output the directories of all the other manifests
    dirs = set()
    manifest = TestManifest()
    manifest.read(manifest_file)
    for i in manifest.get():
        dirs.add(os.path.dirname(i['manifest'])[len(topsrcdir) + 1:])
    for path in dirs:
        print path
def find_manifest_dirs(topsrcdir, manifests):
    """Routine to retrieve directories specified in a manifest, relative to topsrcdir.

    It does not recurse into manifests, as we currently have no need for that.
    """
    dirs = set()

    for p in manifests:
        p = os.path.join(topsrcdir, p)

        if p.endswith('.ini'):
            test_manifest = TestManifest()
            test_manifest.read(p)
            dirs |= set([os.path.dirname(m) for m in test_manifest.manifests()])

        elif p.endswith('.list'):
            m = ReftestManifest()
            m.load(p)
            dirs |= m.dirs

        else:
            raise Exception('"{}" is not a supported manifest format.'.format(
                os.path.splitext(p)[1]))

    dirs = {mozpath.normpath(d[len(topsrcdir):]).lstrip('/') for d in dirs}

    # Filter out children captured by parent directories because duplicates
    # will confuse things later on.
    def parents(p):
        while True:
            p = mozpath.dirname(p)
            if not p:
                break
            yield p

    seen = set()
    for d in sorted(dirs, key=len):
        if not any(p in seen for p in parents(d)):
            seen.add(d)

    return sorted(seen)
Exemple #14
0
    def buildTestPath(self, options):
        """ Build the url path to the specific test harness and test file or directory
        Build a manifest of tests to run and write out a json file for the harness to read
    """
        if options.manifestFile and os.path.isfile(options.manifestFile):
            manifest = TestManifest(strict=False)
            manifest.read(options.manifestFile)
            # Bug 883858 - return all tests including disabled tests
            tests = manifest.active_tests(disabled=False, **mozinfo.info)
            paths = []
            for test in tests:
                tp = test['path'].split(self.getTestRoot(options),
                                        1)[1].strip('/')

                # Filter out tests if we are using --test-path
                if options.testPath and not tp.startswith(options.testPath):
                    continue

                paths.append({'path': tp})

            # Bug 883865 - add this functionality into manifestDestiny
            with open('tests.json', 'w') as manifestFile:
                manifestFile.write(json.dumps({'tests': paths}))
            options.manifestFile = 'tests.json'

        testHost = "http://mochi.test:8888"
        testURL = ("/").join([testHost, self.TEST_PATH, options.testPath])
        if os.path.isfile(
                os.path.join(self.oldcwd, os.path.dirname(__file__),
                             self.TEST_PATH,
                             options.testPath)) and options.repeat > 0:
            testURL = ("/").join([testHost, self.PLAIN_LOOP_PATH])
        if options.chrome or options.a11y:
            testURL = ("/").join([testHost, self.CHROME_PATH])
        elif options.browserChrome:
            testURL = "about:blank"
        elif options.ipcplugins:
            testURL = ("/").join(
                [testHost, self.TEST_PATH, "dom/plugins/test"])
        return testURL
def print_test_dirs(topsrcdir, manifest_file):
    """
    Simple routine which prints the paths of directories specified
    in a Marionette manifest, relative to topsrcdir.  This does not recurse 
    into manifests, as we currently have no need for that.
    """

    dirs = set()
    # output the directory of this (parent) manifest
    topsrcdir = os.path.abspath(topsrcdir)
    scriptdir = os.path.abspath(os.path.dirname(__file__))
    dirs.add(scriptdir[len(topsrcdir) + 1:])

    # output the directories of all the other manifests
    manifest = TestManifest()
    manifest.read(manifest_file)
    for i in manifest.get():
        d = os.path.dirname(i['manifest'])[len(topsrcdir) + 1:]
        dirs.add(d)
    for path in dirs:
        path = path.replace('\\', '/')
        print path
Exemple #16
0
    def __init__(self, options, **kwargs):
        self.options = options
        self.server = None
        self.logger = mozlog.getLogger('PEP')

        # create the profile
        enable_proxy = False
        locations = ServerLocations()
        if self.options.proxyLocations:
            if not self.options.serverPath:
                self.logger.warning('Can\'t set up proxy without server path')
            else:
                enable_proxy = True
                for proxyLocation in self.options.proxyLocations:
                    locations.read(proxyLocation, False)
                locations.add_host(host='127.0.0.1',
                                   port=self.options.serverPort,
                                   options='primary,privileged')

        self.profile = self.profile_class(profile=self.options.profilePath,
                                          addons=[os.path.join(here, 'extension')],
                                          locations=locations,
                                          proxy=enable_proxy)

        # fork a server to serve the test related files
        if self.options.serverPath:
            self.runServer()

        tests = []
        # TODO is there a better way of doing this?
        if self.options.testPath.endswith('.js'):
            # a single test file was passed in
            testObj = {}
            testObj['path'] = os.path.realpath(self.options.testPath)
            testObj['name'] = os.path.basename(self.options.testPath)
            testObj['here'] = os.path.dirname(testObj['path'])
            tests.append(testObj)
        else:
            # a test manifest was passed in
            # open and convert the manifest to json
            manifest = TestManifest()
            manifest.read(self.options.testPath)
            tests = manifest.get()

        # create a manifest object to be read by the JS side
        manifestObj = {}
        manifestObj['tests'] = tests
        manifestObj['options'] = options.__dict__

        # write manifest to a JSON file
        jsonManifest = open(os.path.join(here, 'manifest.json'), 'w')
        jsonManifest.write(json.dumps(manifestObj))
        jsonManifest.close()

        # setup environment
        env = os.environ.copy()
        env['MOZ_INSTRUMENT_EVENT_LOOP'] = '1'
        env['MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD'] = str(options.tracerThreshold)
        env['MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL'] = str(options.tracerInterval)
        env['MOZ_CRASHREPORTER_NO_REPORT'] = '1'

        # construct the browser arguments
        cmdargs = []
        # TODO Make browserArgs a list
        cmdargs.extend(self.options.browserArgs)
        cmdargs.extend(['-pep-start', os.path.realpath(jsonManifest.name)])

        # run with managed process handler
        self.runner = self.runner_class(profile=self.profile,
                                        binary=self.options.binary,
                                        cmdargs=cmdargs,
                                        env=env,
                                        process_class=PepProcess)
Exemple #17
0
    def run_test(self, test, testtype):
        if not self.httpd:
            print "starting httpd"
            self.start_httpd()

        if not self.marionette:
            self.start_marionette()

        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if (filename.startswith("test_") or filename.startswith("browser_")) and (
                        filename.endswith(".py") or filename.endswith(".js")
                    ):
                        filepath = os.path.join(root, filename)
                        self.run_test(filepath, testtype)
            return

        mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])

        testloader = unittest.TestLoader()
        suite = unittest.TestSuite()

        if file_ext == ".ini":
            testargs = {"skip": "false"}
            if testtype is not None:
                testtypes = testtype.replace("+", " +").replace("-", " -").split()
                for atype in testtypes:
                    if atype.startswith("+"):
                        testargs.update({atype[1:]: "true"})
                    elif atype.startswith("-"):
                        testargs.update({atype[1:]: "false"})
                    else:
                        testargs.update({atype: "true"})

            manifest = TestManifest()
            manifest.read(filepath)
            if options.perf:
                if options.perfserv is None:
                    options.perfserv = manifest.get("perfserv")[0]
                machine_name = socket.gethostname()
                try:
                    manifest.has_key("machine_name")
                    machine_name = manifest.get("machine_name")[0]
                except:
                    self.logger.info("Using machine_name: %s" % machine_name)
                os_name = platform.system()
                os_version = platform.release()
                self.perfrequest = datazilla.DatazillaRequest(
                    server=options.perfserv,
                    machine_name=machine_name,
                    os=os_name,
                    os_version=os_version,
                    platform=manifest.get("platform")[0],
                    build_name=manifest.get("build_name")[0],
                    version=manifest.get("version")[0],
                    revision=self.revision,
                    branch=manifest.get("branch")[0],
                    id=os.getenv("BUILD_ID"),
                    test_date=int(time.time()),
                )

            manifest_tests = manifest.get(**testargs)

            for i in manifest_tests:
                self.run_test(i["path"], testtype)
            return

        self.logger.info("TEST-START %s" % os.path.basename(test))

        if file_ext == ".py":
            test_mod = imp.load_source(mod_name, filepath)

            for name in dir(test_mod):
                obj = getattr(test_mod, name)
                if isinstance(obj, (type, types.ClassType)) and issubclass(obj, unittest.TestCase):
                    testnames = testloader.getTestCaseNames(obj)
                    for testname in testnames:
                        suite.addTest(obj(self.marionette, methodName=testname))

        elif file_ext == ".js":
            suite.addTest(MarionetteJSTestCase(self.marionette, jsFile=filepath))

        if suite.countTestCases():
            results = MarionetteTextTestRunner(verbosity=3).run(suite)
            self.results.append(results)

            self.failed += len(results.failures) + len(results.errors)
            if results.perfdata and options.perf:
                self.perfrequest.add_datazilla_result(results.perfdata)
            if hasattr(results, "skipped"):
                self.todo += len(results.skipped) + len(results.expectedFailures)
            self.passed += results.passed
            for failure in results.failures + results.errors:
                self.failures.append((results.getInfo(failure[0]), failure[1], "TEST-UNEXPECTED-FAIL"))
            if hasattr(results, "unexpectedSuccess"):
                self.failed += len(results.unexpectedSuccesses)
                for failure in results.unexpectedSuccesses:
                    self.failures.append((results.getInfo(failure[0]), failure[1], "TEST-UNEXPECTED-PASS"))
Exemple #18
0
def main(args):
    parser = Options()
    options, args = parser.parse_args()
    kill_port = 20703

    if not options.html_manifest or not options.specialpowers or not options.host1 or not options.host2 or not options.signalling_server:
        parser.print_usage()
        return 2

    package_options = get_package_options(parser, options)
    if not package_options:
        parser.print_usage()
        return 2

    if not os.path.isdir(options.specialpowers):
        parser.error("SpecialPowers directory %s does not exist" %
                     options.specialpowers)
        return 2
    if options.prefs and not os.path.isfile(options.prefs):
        parser.error("Prefs file %s does not exist" % options.prefs)
        return 2
    if options.log_dest and not os.path.isdir(options.log_dest):
        parser.error("Log directory %s does not exist" % options.log_dest)
        return 2

    log = mozlog.unstructured.getLogger('steeplechase')
    log.setLevel(logging.DEBUG)
    if ':' in options.host1:
        host1, port = options.host1.split(':')
        dm1 = DeviceManagerSUT(host1, port)
    else:
        dm1 = DeviceManagerSUT(options.host1)
    if ':' in options.host2:
        host2, port = options.host2.split(':')
        dm2 = DeviceManagerSUT(host2, port)
    else:
        dm2 = DeviceManagerSUT(options.host2)

    if (options.killall is not None) and (options.killall == 1):
        kill_dm1 = DeviceManagerSUT(host1, kill_port)
        kill_dm2 = DeviceManagerSUT(host2, kill_port)
        os_type = GetOStypes(package_options)
        print("OS type of host1 is " + os_type[0] + " and host2 is " +
              os_type[1])
        KillFirefoxesCommand(kill_dm1, os_type[0])
        KillFirefoxesCommand(kill_dm2, os_type[1])

    remote_info = [{
        'dm': dm1,
        'binary': package_options.binary,
        'package': package_options.package,
        'is_initiator': True,
        'name': 'Client1'
    }, {
        'dm': dm2,
        'binary': package_options.binary2,
        'package': package_options.package2,
        'is_initiator': False,
        'name': 'Client2'
    }]
    # first, push app
    for info in remote_info:
        dm = info['dm']

        if info['binary']:
            asset = Binary(path=info['binary'],
                           log=log,
                           dm=info['dm'],
                           name=info['name'])
        else:
            asset = generate_package_asset(path=info['package'],
                                           log=log,
                                           dm=info['dm'],
                                           name=info['name'])

        if options.setup:
            asset.setup_test_root()
        info['test_root'] = asset.test_root()

        if options.setup:
            log.info("Pushing app to %s...", info["name"])
            asset.setup_client()
        info['remote_app_path'] = asset.path_to_launch()
        if not options.setup and not dm.fileExists(info['remote_app_path']):
            log.error("App does not exist on %s, don't use --noSetup",
                      info['name'])
            return 2

    pass_count, fail_count = 0, 0
    if options.html_manifest:
        manifest = TestManifest(strict=False)
        manifest.read(options.html_manifest)
        manifest_data = {
            "tests": [{
                "path": t["relpath"]
            } for t in manifest.active_tests(disabled=False, **mozinfo.info)]
        }

        remote_port = 0
        if options.remote_webserver:
            result = re.search(':(\d+)', options.remote_webserver)
            if result:
                remote_port = int(result.groups()[0])

        @json_response
        def get_manifest(req):
            return (200, manifest_data)

        handlers = [{
            'method': 'GET',
            'path': '/manifest.json',
            'function': get_manifest
        }]
        httpd = MozHttpd(
            host=moznetwork.get_ip(),
            port=remote_port,
            log_requests=True,
            docroot=os.path.join(os.path.dirname(__file__), "..",
                                 "webharness"),
            urlhandlers=handlers,
            path_mappings={"/tests": os.path.dirname(options.html_manifest)})
        httpd.start(block=False)
        test = HTMLTests(httpd, remote_info, log, options)
        html_pass_count, html_fail_count = test.run()
        pass_count += html_pass_count
        fail_count += html_fail_count
        httpd.stop()
    log.info("Result summary:")
    log.info("Passed: %d" % pass_count)
    log.info("Failed: %d" % fail_count)
    return pass_count > 0 and fail_count == 0
Exemple #19
0
    def add_test(self, test, expected="pass", test_container=None):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if filename.startswith("test_") and (filename.endswith(".py") or filename.endswith(".js")):
                        filepath = os.path.join(root, filename)
                        self.add_test(filepath)
            return

        testargs = {}
        if self.type is not None:
            testtypes = self.type.replace("+", " +").replace("-", " -").split()
            for atype in testtypes:
                if atype.startswith("+"):
                    testargs.update({atype[1:]: "true"})
                elif atype.startswith("-"):
                    testargs.update({atype[1:]: "false"})
                else:
                    testargs.update({atype: "true"})

        testarg_b2g = bool(testargs.get("b2g"))

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == ".ini":
            manifest = TestManifest()
            manifest.read(filepath)

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))
            manifest_tests = manifest.active_tests(
                exists=False, disabled=True, filters=filters, device=self.device, app=self.appName, **mozinfo.info
            )
            if len(manifest_tests) == 0:
                self.logger.error(
                    "no tests to run using specified " "combination of filters: {}".format(manifest.fmt_filters())
                )

            unfiltered_tests = []
            for test in manifest_tests:
                if test.get("disabled"):
                    self.manifest_skipped_tests.append(test)
                else:
                    unfiltered_tests.append(test)

            target_tests = manifest.get(tests=unfiltered_tests, **testargs)
            for test in unfiltered_tests:
                if test["path"] not in [x["path"] for x in target_tests]:
                    test.setdefault("disabled", "filtered by type (%s)" % self.type)
                    self.manifest_skipped_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])

                file_ext = os.path.splitext(os.path.split(i["path"])[-1])[-1]
                test_container = None
                if i.get("test_container") and testarg_b2g:
                    if i.get("test_container") == "true":
                        test_container = True
                    elif i.get("test_container") == "false":
                        test_container = False

                self.add_test(i["path"], i["expected"], test_container)
            return

        self.tests.append({"filepath": filepath, "expected": expected, "test_container": test_container})
Exemple #20
0
    def run_test(self, test, expected='pass'):
        if not self.httpd:
            print "starting httpd"
            self.start_httpd()

        if not self.marionette:
            self.start_marionette()
            if self.emulator:
                self.marionette.emulator.wait_for_homescreen(self.marionette)

        testargs = {}
        if self.type is not None:
            testtypes = self.type.replace('+', ' +').replace('-', ' -').split()
            for atype in testtypes:
                if atype.startswith('+'):
                    testargs.update({atype[1:]: 'true'})
                elif atype.startswith('-'):
                    testargs.update({atype[1:]: 'false'})
                else:
                    testargs.update({atype: 'true'})
        oop = testargs.get('oop', False)
        if isinstance(oop, basestring):
            oop = False if oop == 'false' else 'true'

        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                if self.shuffle:
                    random.shuffle(files)
                for filename in files:
                    if ((filename.startswith('test_')
                         or filename.startswith('browser_'))
                            and (filename.endswith('.py')
                                 or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.run_test(filepath)
                        if self.marionette.check_for_crash():
                            return
            return

        mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])

        testloader = unittest.TestLoader()
        suite = unittest.TestSuite()

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            all_tests = manifest.active_tests(exists=False, disabled=False)
            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=False,
                                                   device=self.device,
                                                   app=self.appName)
            skip_tests = list(
                set([x['path'] for x in all_tests]) -
                set([x['path'] for x in manifest_tests]))
            for skipped in skip_tests:
                self.logger.info(
                    'TEST-SKIP | %s | device=%s, app=%s' %
                    (os.path.basename(skipped), self.device, self.appName))
                self.todo += 1

            target_tests = manifest.get(tests=manifest_tests, **testargs)
            if self.shuffle:
                random.shuffle(target_tests)
            for i in target_tests:
                self.run_test(i["path"], i["expected"])
                if self.marionette.check_for_crash():
                    return
            return

            self.logger.info('TEST-START %s' % os.path.basename(test))

        self.test_kwargs['expected'] = expected
        self.test_kwargs['oop'] = oop
        for handler in self.test_handlers:
            if handler.match(os.path.basename(test)):
                handler.add_tests_to_suite(mod_name, filepath, suite,
                                           testloader, self.marionette,
                                           self.testvars, **self.test_kwargs)
                break

        if suite.countTestCases():
            runner = self.textrunnerclass(verbosity=3,
                                          marionette=self.marionette)
            results = runner.run(suite)
            self.results.append(results)

            self.failed += len(results.failures) + len(results.errors)
            if hasattr(results, 'skipped'):
                self.todo += len(results.skipped)
            self.passed += results.passed
            for failure in results.failures + results.errors:
                self.failures.append((results.getInfo(failure), failure.output,
                                      'TEST-UNEXPECTED-FAIL'))
            if hasattr(results, 'unexpectedSuccesses'):
                self.failed += len(results.unexpectedSuccesses)
                for failure in results.unexpectedSuccesses:
                    self.failures.append(
                        (results.getInfo(failure), 'TEST-UNEXPECTED-PASS'))
            if hasattr(results, 'expectedFailures'):
                self.passed += len(results.expectedFailures)
Exemple #21
0
    def __init__(self, options, **kwargs):
        self.options = options
        self.server = None
        self.logger = mozlog.getLogger('PEP')

        # create the profile
        enable_proxy = False
        locations = ServerLocations()
        if self.options.proxyLocations:
            if not self.options.serverPath:
                self.logger.warning('Can\'t set up proxy without server path')
            else:
                enable_proxy = True
                for proxyLocation in self.options.proxyLocations:
                    locations.read(proxyLocation, False)
                locations.add_host(host='127.0.0.1',
                                   port=self.options.serverPort,
                                   options='primary,privileged')

        self.profile = self.profile_class(
            profile=self.options.profilePath,
            addons=[os.path.join(here, 'extension')],
            locations=locations,
            proxy=enable_proxy)

        # fork a server to serve the test related files
        if self.options.serverPath:
            self.runServer()

        tests = []
        # TODO is there a better way of doing this?
        if self.options.testPath.endswith('.js'):
            # a single test file was passed in
            testObj = {}
            testObj['path'] = os.path.realpath(self.options.testPath)
            testObj['name'] = os.path.basename(self.options.testPath)
            testObj['here'] = os.path.dirname(testObj['path'])
            tests.append(testObj)
        else:
            # a test manifest was passed in
            # open and convert the manifest to json
            manifest = TestManifest()
            manifest.read(self.options.testPath)
            tests = manifest.get()

        # create a manifest object to be read by the JS side
        manifestObj = {}
        manifestObj['tests'] = tests
        manifestObj['options'] = options.__dict__

        # write manifest to a JSON file
        jsonManifest = open(os.path.join(here, 'manifest.json'), 'w')
        jsonManifest.write(json.dumps(manifestObj))
        jsonManifest.close()

        # setup environment
        env = os.environ.copy()
        env['MOZ_INSTRUMENT_EVENT_LOOP'] = '1'
        env['MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD'] = str(
            options.tracerThreshold)
        env['MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL'] = str(options.tracerInterval)
        env['MOZ_CRASHREPORTER_NO_REPORT'] = '1'

        # construct the browser arguments
        cmdargs = []
        # TODO Make browserArgs a list
        cmdargs.extend(self.options.browserArgs)
        cmdargs.extend(['-pep-start', os.path.realpath(jsonManifest.name)])

        # run with managed process handler
        self.runner = self.runner_class(profile=self.profile,
                                        binary=self.options.binary,
                                        cmdargs=cmdargs,
                                        env=env,
                                        process_class=PepProcess)
Exemple #22
0
    def run_test(self, test):
        if not self.httpd:
            print "starting httpd"
            self.start_httpd()

        if not self.marionette:
            self.start_marionette()

        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if ((filename.startswith('test_') or filename.startswith('browser_')) and
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.run_test(filepath)
                        if self.marionette.check_for_crash():
                            return
            return

        mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])

        testloader = unittest.TestLoader()
        suite = unittest.TestSuite()

        if file_ext == '.ini':
            testargs = {}
            if self.type is not None:
                testtypes = self.type.replace('+', ' +').replace('-', ' -').split()
                for atype in testtypes:
                    if atype.startswith('+'):
                        testargs.update({ atype[1:]: 'true' })
                    elif atype.startswith('-'):
                        testargs.update({ atype[1:]: 'false' })
                    else:
                        testargs.update({ atype: 'true' })

            manifest = TestManifest()
            manifest.read(filepath)

            all_tests = manifest.active_tests(disabled=False)
            manifest_tests = manifest.active_tests(disabled=False,
                                                   device=self.device,
                                                   app=self.appName)
            skip_tests = list(set([x['path'] for x in all_tests]) -
                              set([x['path'] for x in manifest_tests]))
            for skipped in skip_tests:
                self.logger.info('TEST-SKIP | %s | device=%s, app=%s' %
                                 (os.path.basename(skipped),
                                  self.device,
                                  self.appName))
                self.todo += 1

            for i in manifest.get(tests=manifest_tests, **testargs):
                self.run_test(i["path"])
                if self.marionette.check_for_crash():
                    return
            return

        self.logger.info('TEST-START %s' % os.path.basename(test))

        for handler in self.test_handlers:
            if handler.match(os.path.basename(test)):
                handler.add_tests_to_suite(mod_name,
                                           filepath,
                                           suite,
                                           testloader,
                                           self.marionette,
                                           self.testvars,
                                           **self.test_kwargs)
                break

        if suite.countTestCases():
            runner = self.textrunnerclass(verbosity=3,
                                          marionette=self.marionette)
            results = runner.run(suite)
            self.results.append(results)

            self.failed += len(results.failures) + len(results.errors)
            if hasattr(results, 'skipped'):
                self.todo += len(results.skipped)
            self.passed += results.passed
            for failure in results.failures + results.errors:
                self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-FAIL'))
            if hasattr(results, 'unexpectedSuccesses'):
                self.failed += len(results.unexpectedSuccesses)
                for failure in results.unexpectedSuccesses:
                    self.failures.append((results.getInfo(failure), 'TEST-UNEXPECTED-PASS'))
            if hasattr(results, 'expectedFailures'):
                self.passed += len(results.expectedFailures)
Exemple #23
0
    def add_test(self, test, expected='pass', test_container=None):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if (filename.startswith('test_') and
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.add_test(filepath)
            return

        testargs = {}
        if self.type is not None:
            testtypes = self.type.replace('+', ' +').replace('-', ' -').split()
            for atype in testtypes:
                if atype.startswith('+'):
                    testargs.update({ atype[1:]: 'true' })
                elif atype.startswith('-'):
                    testargs.update({ atype[1:]: 'false' })
                else:
                    testargs.update({ atype: 'true' })

        testarg_b2g = bool(testargs.get('b2g'))

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))
            json_path = update_mozinfo(filepath)
            self.logger.info("mozinfo updated with the following: {}".format(None))
            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   device=self.device,
                                                   app=self.appName,
                                                   e10s=self.e10s,
                                                   **mozinfo.info)
            if len(manifest_tests) == 0:
                self.logger.error("no tests to run using specified "
                                  "combination of filters: {}".format(
                                       manifest.fmt_filters()))

            unfiltered_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    unfiltered_tests.append(test)

            target_tests = manifest.get(tests=unfiltered_tests, **testargs)
            for test in unfiltered_tests:
                if test['path'] not in [x['path'] for x in target_tests]:
                    test.setdefault('disabled', 'filtered by type (%s)' % self.type)
                    self.manifest_skipped_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])

                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]
                test_container = None
                if i.get('test_container') and testarg_b2g:
                    if i.get('test_container') == "true":
                        test_container = True
                    elif i.get('test_container') == "false":
                        test_container = False

                self.add_test(i["path"], i["expected"], test_container)
            return

        self.tests.append({'filepath': filepath, 'expected': expected, 'test_container': test_container})
Exemple #24
0
def main(args):
    message_logger = MessageLogger(logger=None)
    process_args = {'messageLogger': message_logger}
    auto = RemoteAutomation(None, "fennec", processArgs=process_args)

    parser = RemoteOptions(auto)
    structured.commandline.add_logging_group(parser)
    options, args = parser.parse_args(args)

    if (options.dm_trans == "adb"):
        if (options.deviceIP):
            dm = droid.DroidADB(options.deviceIP,
                                options.devicePort,
                                deviceRoot=options.remoteTestRoot)
        elif (options.deviceSerial):
            dm = droid.DroidADB(None,
                                None,
                                deviceSerial=options.deviceSerial,
                                deviceRoot=options.remoteTestRoot)
        else:
            dm = droid.DroidADB(deviceRoot=options.remoteTestRoot)
    else:
        dm = droid.DroidSUT(options.deviceIP,
                            options.devicePort,
                            deviceRoot=options.remoteTestRoot)
    auto.setDeviceManager(dm)
    options = parser.verifyRemoteOptions(options, auto)

    mochitest = MochiRemote(auto, dm, options)

    log = mochitest.log
    message_logger.logger = log
    mochitest.message_logger = message_logger

    if (options is None):
        log.error(
            "Invalid options specified, use --help for a list of valid options"
        )
        return 1

    productPieces = options.remoteProductName.split('.')
    if (productPieces is not None):
        auto.setProduct(productPieces[0])
    else:
        auto.setProduct(options.remoteProductName)
    auto.setAppName(options.remoteappname)

    options = parser.verifyOptions(options, mochitest)
    if (options is None):
        return 1

    logParent = os.path.dirname(options.remoteLogFile)
    dm.mkDir(logParent)
    auto.setRemoteLog(options.remoteLogFile)
    auto.setServerInfo(options.webServer, options.httpPort, options.sslPort)

    mochitest.printDeviceInfo()

    # Add Android version (SDK level) to mozinfo so that manifest entries
    # can be conditional on android_version.
    androidVersion = dm.shellCheckOutput(['getprop', 'ro.build.version.sdk'])
    log.info("Android sdk version '%s'; will use this to filter manifests" %
             str(androidVersion))
    mozinfo.info['android_version'] = androidVersion

    deviceRoot = dm.deviceRoot
    if options.dmdPath:
        dmdLibrary = "libdmd.so"
        dmdPathOnDevice = os.path.join(deviceRoot, dmdLibrary)
        dm.removeFile(dmdPathOnDevice)
        dm.pushFile(os.path.join(options.dmdPath, dmdLibrary), dmdPathOnDevice)
        options.dmdPath = deviceRoot

    options.dumpOutputDirectory = deviceRoot

    procName = options.app.split('/')[-1]
    dm.killProcess(procName)

    if options.robocopIni != "":
        # turning buffering off as it's not used in robocop
        message_logger.buffering = False

        # sut may wait up to 300 s for a robocop am process before returning
        dm.default_timeout = 320
        mp = TestManifest(strict=False)
        # TODO: pull this in dynamically
        mp.read(options.robocopIni)

        filters = []
        if options.totalChunks:
            filters.append(
                chunk_by_slice(options.thisChunk, options.totalChunks))
        robocop_tests = mp.active_tests(exists=False,
                                        filters=filters,
                                        **mozinfo.info)

        options.extraPrefs.append('browser.search.suggest.enabled=true')
        options.extraPrefs.append('browser.search.suggest.prompted=true')
        options.extraPrefs.append('layout.css.devPixelsPerPx=1.0')
        options.extraPrefs.append('browser.chrome.dynamictoolbar=false')
        options.extraPrefs.append('browser.snippets.enabled=false')
        options.extraPrefs.append('browser.casting.enabled=true')

        if (options.dm_trans == 'adb' and options.robocopApk):
            dm._checkCmd(["install", "-r", options.robocopApk])

        retVal = None
        # Filtering tests
        active_tests = []
        for test in robocop_tests:
            if options.testPath and options.testPath != test['name']:
                continue

            if 'disabled' in test:
                log.info('TEST-INFO | skipping %s | %s' %
                         (test['name'], test['disabled']))
                continue

            active_tests.append(test)

        log.suite_start([t['name'] for t in active_tests])

        for test in active_tests:
            # When running in a loop, we need to create a fresh profile for
            # each cycle
            if mochitest.localProfile:
                options.profilePath = mochitest.localProfile
                os.system("rm -Rf %s" % options.profilePath)
                options.profilePath = None
                mochitest.localProfile = options.profilePath

            options.app = "am"
            options.browserArgs = [
                "instrument", "-w", "-e", "deviceroot", deviceRoot, "-e",
                "class"
            ]
            options.browserArgs.append("org.mozilla.gecko.tests.%s" %
                                       test['name'])
            options.browserArgs.append(
                "org.mozilla.roboexample.test/org.mozilla.gecko.FennecInstrumentationTestRunner"
            )
            mochitest.nsprLogName = "nspr-%s.log" % test['name']

            # If the test is for checking the import from bookmarks then make
            # sure there is data to import
            if test['name'] == "testImportFromAndroid":

                # Get the OS so we can run the insert in the apropriate
                # database and following the correct table schema
                osInfo = dm.getInfo("os")
                devOS = " ".join(osInfo['os'])

                if ("pandaboard" in devOS):
                    delete = [
                        'execsu', 'sqlite3',
                        "/data/data/com.android.browser/databases/browser2.db \'delete from bookmarks where _id > 14;\'"
                    ]
                else:
                    delete = [
                        'execsu', 'sqlite3',
                        "/data/data/com.android.browser/databases/browser.db \'delete from bookmarks where _id > 14;\'"
                    ]
                if (options.dm_trans == "sut"):
                    dm._runCmds([{"cmd": " ".join(delete)}])

                # Insert the bookmarks
                log.info(
                    "Insert bookmarks in the default android browser database")
                for i in range(20):
                    if ("pandaboard" in devOS):
                        cmd = [
                            'execsu', 'sqlite3',
                            "/data/data/com.android.browser/databases/browser2.db 'insert or replace into bookmarks(_id,title,url,folder,parent,position) values ("
                            + str(30 + i) + ",\"Bookmark" + str(i) +
                            "\",\"http://www.bookmark" + str(i) +
                            ".com\",0,1," + str(100 + i) + ");'"
                        ]
                    else:
                        cmd = [
                            'execsu', 'sqlite3',
                            "/data/data/com.android.browser/databases/browser.db 'insert into bookmarks(title,url,bookmark) values (\"Bookmark"
                            + str(i) + "\",\"http://www.bookmark" + str(i) +
                            ".com\",1);'"
                        ]
                    if (options.dm_trans == "sut"):
                        dm._runCmds([{"cmd": " ".join(cmd)}])
            try:
                screenShotDir = "/mnt/sdcard/Robotium-Screenshots"
                dm.removeDir(screenShotDir)
                dm.recordLogcat()
                result = mochitest.runTests(options)
                if result != 0:
                    log.error("runTests() exited with code %s" % result)
                log_result = mochitest.addLogData()
                if result != 0 or log_result != 0:
                    mochitest.printDeviceInfo(printLogcat=True)
                    mochitest.printScreenshots(screenShotDir)
                # Ensure earlier failures aren't overwritten by success on this
                # run
                if retVal is None or retVal == 0:
                    retVal = result
            except:
                log.error(
                    "Automation Error: Exception caught while running tests")
                traceback.print_exc()
                mochitest.stopServers()
                try:
                    mochitest.cleanup(options)
                except devicemanager.DMError:
                    # device error cleaning up... oh well!
                    pass
                retVal = 1
                break
            finally:
                # Clean-up added bookmarks
                if test['name'] == "testImportFromAndroid":
                    if ("pandaboard" in devOS):
                        cmd_del = [
                            'execsu', 'sqlite3',
                            "/data/data/com.android.browser/databases/browser2.db \'delete from bookmarks where _id > 14;\'"
                        ]
                    else:
                        cmd_del = [
                            'execsu', 'sqlite3',
                            "/data/data/com.android.browser/databases/browser.db \'delete from bookmarks where _id > 14;\'"
                        ]
                    if (options.dm_trans == "sut"):
                        dm._runCmds([{"cmd": " ".join(cmd_del)}])
        if retVal is None:
            log.warning("No tests run. Did you pass an invalid TEST_PATH?")
            retVal = 1
        else:
            # if we didn't have some kind of error running the tests, make
            # sure the tests actually passed
            print "INFO | runtests.py | Test summary: start."
            overallResult = mochitest.printLog()
            print "INFO | runtests.py | Test summary: end."
            if retVal == 0:
                retVal = overallResult
    else:
        mochitest.nsprLogName = "nspr.log"
        try:
            dm.recordLogcat()
            retVal = mochitest.runTests(options)
        except:
            log.error("Automation Error: Exception caught while running tests")
            traceback.print_exc()
            mochitest.stopServers()
            try:
                mochitest.cleanup(options)
            except devicemanager.DMError:
                # device error cleaning up... oh well!
                pass
            retVal = 1

        mochitest.printDeviceInfo(printLogcat=True)

    message_logger.finish()

    return retVal
Exemple #25
0
    def add_test(self, test, expected='pass'):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if filename.endswith('.ini'):
                        msg_tmpl = ("Ignoring manifest '{0}'; running all tests in '{1}'."
                                    " See --help for details.")
                        relpath = os.path.relpath(os.path.join(root, filename), filepath)
                        self.logger.warning(msg_tmpl.format(relpath, filepath))
                    elif self._is_filename_valid(filename):
                        test_file = os.path.join(root, filename)
                        self.add_test(test_file)
            return

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            json_path = update_mozinfo(filepath)
            self.logger.info("mozinfo updated from: {}".format(json_path))
            self.logger.info("mozinfo is: {}".format(mozinfo.info))

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))

            values = {
                "appname": self.appName,
                "e10s": self.e10s,
                "manage_instance": self.marionette.instance is not None,
            }
            values.update(mozinfo.info)

            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   **values)
            if len(manifest_tests) == 0:
                self.logger.error("No tests to run using specified "
                                  "combination of filters: {}".format(
                                      manifest.fmt_filters()))

            target_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    target_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: {} does not exist".format(i["path"]))

                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]

                self.add_test(i["path"], i["expected"])
            return

        self.tests.append({'filepath': filepath, 'expected': expected})
Exemple #26
0
    def add_test(self, test, expected='pass', oop=None):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if ((filename.startswith('test_') or filename.startswith('browser_')) and
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.add_test(filepath)
            return

        testargs = {}
        if self.type is not None:
            testtypes = self.type.replace('+', ' +').replace('-', ' -').split()
            for atype in testtypes:
                if atype.startswith('+'):
                    testargs.update({ atype[1:]: 'true' })
                elif atype.startswith('-'):
                    testargs.update({ atype[1:]: 'false' })
                else:
                    testargs.update({ atype: 'true' })

        # testarg_oop = either None, 'true' or 'false'.
        testarg_oop = testargs.get('oop')

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   device=self.device,
                                                   app=self.appName,
                                                   **mozinfo.info)
            unfiltered_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    unfiltered_tests.append(test)

            # Don't filter tests with "oop" flag because manifest parser can't
            # handle it well.
            if testarg_oop is not None:
                del testargs['oop']

            target_tests = manifest.get(tests=unfiltered_tests, **testargs)
            for test in unfiltered_tests:
                if test['path'] not in [x['path'] for x in target_tests]:
                    test.setdefault('disabled', 'filtered by type (%s)' % self.type)
                    self.manifest_skipped_tests.append(test)

            for test in self.manifest_skipped_tests:
                self.logger.info('TEST-SKIP | %s | %s' % (
                    os.path.basename(test['path']),
                    test['disabled']))
                self.todo += 1

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])

                # manifest_oop is either 'false', 'true' or 'both'.  Anything
                # else implies 'false'.
                manifest_oop = i.get('oop', 'false')

                # We only add an oop test when following conditions are met:
                # 1) It's written by javascript because we have only
                #    MarionetteJSTestCase that supports oop mode.
                # 2) we're running with "--type=+oop" or no "--type=-oop", which
                #    follows testarg_oop is either None or 'true' and must not
                #    be 'false'.
                # 3) When no "--type=[+-]oop" is applied, all active tests are
                #    included in target_tests, so we must filter out those
                #    really capable of running in oop mode. Besides, oop tests
                #    must be explicitly specified for backward compatibility. So
                #    test manifest_oop equals to either 'both' or 'true'.
                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]
                if (file_ext == '.js' and
                    testarg_oop != 'false' and
                    (manifest_oop == 'both' or manifest_oop == 'true')):
                    self.add_test(i["path"], i["expected"], True)

                # We only add an in-process test when following conditions are
                # met:
                # 1) we're running with "--type=-oop" or no "--type=+oop", which
                #    follows testarg_oop is either None or 'false' and must not
                #    be 'true'.
                # 2) When no "--type=[+-]oop" is applied, all active tests are
                #    included in target_tests, so we must filter out those
                #    really capable of running in in-process mode.
                if (testarg_oop != 'true' and
                    (manifest_oop == 'both' or manifest_oop != 'true')):
                    self.add_test(i["path"], i["expected"], False)
            return

        if oop is None:
            # This test is added by directory enumeration or directly specified
            # in argument list.  We have no manifest information here so we just
            # respect the "--type=[+-]oop" argument here.
            oop = file_ext == '.js' and testarg_oop == 'true'

        self.tests.append({'filepath': filepath, 'expected': expected, 'oop': oop})
def run_test_harness(options):
    message_logger = MessageLogger(logger=None)
    process_args = {'messageLogger': message_logger}
    auto = RemoteAutomation(None, "fennec", processArgs=process_args)

    if options is None:
        raise ValueError("Invalid options specified, use --help for a list of valid options")

    dm = options.dm
    auto.setDeviceManager(dm)
    mochitest = MochiRemote(auto, dm, options)

    log = mochitest.log
    message_logger.logger = log
    mochitest.message_logger = message_logger

    productPieces = options.remoteProductName.split('.')
    if (productPieces is not None):
        auto.setProduct(productPieces[0])
    else:
        auto.setProduct(options.remoteProductName)
    auto.setAppName(options.remoteappname)

    logParent = os.path.dirname(options.remoteLogFile)
    dm.mkDir(logParent)
    auto.setRemoteLog(options.remoteLogFile)
    auto.setServerInfo(options.webServer, options.httpPort, options.sslPort)

    mochitest.printDeviceInfo()

    # Add Android version (SDK level) to mozinfo so that manifest entries
    # can be conditional on android_version.
    androidVersion = dm.shellCheckOutput(['getprop', 'ro.build.version.sdk'])
    log.info(
        "Android sdk version '%s'; will use this to filter manifests" %
        str(androidVersion))
    mozinfo.info['android_version'] = androidVersion

    deviceRoot = dm.deviceRoot
    if options.dmdPath:
        dmdLibrary = "libdmd.so"
        dmdPathOnDevice = os.path.join(deviceRoot, dmdLibrary)
        dm.removeFile(dmdPathOnDevice)
        dm.pushFile(os.path.join(options.dmdPath, dmdLibrary), dmdPathOnDevice)
        options.dmdPath = deviceRoot

    options.dumpOutputDirectory = deviceRoot

    procName = options.app.split('/')[-1]
    dm.killProcess(procName)

    if options.robocopIni != "":
        # turning buffering off as it's not used in robocop
        message_logger.buffering = False

        # sut may wait up to 300 s for a robocop am process before returning
        dm.default_timeout = 320
        if isinstance(options.manifestFile, TestManifest):
            mp = options.manifestFile
        else:
            mp = TestManifest(strict=False)
            mp.read(options.robocopIni)

        filters = []
        if options.totalChunks:
            filters.append(
                chunk_by_slice(options.thisChunk, options.totalChunks))
        robocop_tests = mp.active_tests(exists=False, filters=filters, **mozinfo.info)

        options.extraPrefs.append('browser.search.suggest.enabled=true')
        options.extraPrefs.append('browser.search.suggest.prompted=true')
        options.extraPrefs.append('layout.css.devPixelsPerPx=1.0')
        options.extraPrefs.append('browser.chrome.dynamictoolbar=false')
        options.extraPrefs.append('browser.snippets.enabled=false')
        options.extraPrefs.append('browser.casting.enabled=true')
        options.extraPrefs.append('extensions.autoupdate.enabled=false')

        if (options.dm_trans == 'adb' and options.robocopApk):
            dm._checkCmd(["install", "-r", options.robocopApk])

        if not options.autorun:
            # Force a single loop iteration. The iteration will start Fennec and
            # the httpd server, but not actually run a test.
            options.test_paths = [robocop_tests[0]['name']]

        retVal = None
        # Filtering tests
        active_tests = []
        for test in robocop_tests:
            if options.test_paths and test['name'] not in options.test_paths:
                continue

            if 'disabled' in test:
                log.info(
                    'TEST-INFO | skipping %s | %s' %
                    (test['name'], test['disabled']))
                continue

            active_tests.append(test)

        log.suite_start([t['name'] for t in active_tests])

        for test in active_tests:
            # When running in a loop, we need to create a fresh profile for
            # each cycle
            if mochitest.localProfile:
                options.profilePath = mochitest.localProfile
                os.system("rm -Rf %s" % options.profilePath)
                options.profilePath = None
                mochitest.localProfile = options.profilePath

            options.app = "am"
            mochitest.nsprLogName = "nspr-%s.log" % test['name']
            if options.autorun:
                # This launches a test (using "am instrument") and instructs
                # Fennec to /quit/ the browser (using Robocop:Quit) and to
                # /finish/ all opened activities.
                options.browserArgs = [
                    "instrument",
                    "-w",
                    "-e", "quit_and_finish", "1",
                    "-e", "deviceroot", deviceRoot,
                    "-e",
                    "class"]
                options.browserArgs.append(
                    "org.mozilla.gecko.tests.%s" %
                    test['name'].split('.java')[0])
                options.browserArgs.append(
                    "org.mozilla.roboexample.test/org.mozilla.gecko.FennecInstrumentationTestRunner")
            else:
                # This does not launch a test at all. It launches an activity
                # that starts Fennec and then waits indefinitely, since cat
                # never returns.
                options.browserArgs = ["start",
                                       "-n", "org.mozilla.roboexample.test/org.mozilla.gecko.LaunchFennecWithConfigurationActivity",
                                       "&&", "cat"]
                dm.default_timeout = sys.maxint # Forever.

                mochitest.log.info("")
                mochitest.log.info("Serving mochi.test Robocop root at http://%s:%s/tests/robocop/" %
                    (options.remoteWebServer, options.httpPort))
                mochitest.log.info("")

            # If the test is for checking the import from bookmarks then make
            # sure there is data to import
            if test['name'] == "testImportFromAndroid":

                # Get the OS so we can run the insert in the apropriate
                # database and following the correct table schema
                osInfo = dm.getInfo("os")
                devOS = " ".join(osInfo['os'])

                if ("pandaboard" in devOS):
                    delete = [
                        'execsu',
                        'sqlite3',
                        "/data/data/com.android.browser/databases/browser2.db \'delete from bookmarks where _id > 14;\'"]
                else:
                    delete = [
                        'execsu',
                        'sqlite3',
                        "/data/data/com.android.browser/databases/browser.db \'delete from bookmarks where _id > 14;\'"]
                if (options.dm_trans == "sut"):
                    dm._runCmds([{"cmd": " ".join(delete)}])

                # Insert the bookmarks
                log.info(
                    "Insert bookmarks in the default android browser database")
                for i in range(20):
                    if ("pandaboard" in devOS):
                        cmd = [
                            'execsu',
                            'sqlite3',
                            "/data/data/com.android.browser/databases/browser2.db 'insert or replace into bookmarks(_id,title,url,folder,parent,position) values (" +
                            str(
                                30 +
                                i) +
                            ",\"Bookmark" +
                            str(i) +
                            "\",\"http://www.bookmark" +
                            str(i) +
                            ".com\",0,1," +
                            str(
                                100 +
                                i) +
                            ");'"]
                    else:
                        cmd = [
                            'execsu',
                            'sqlite3',
                            "/data/data/com.android.browser/databases/browser.db 'insert into bookmarks(title,url,bookmark) values (\"Bookmark" +
                            str(i) +
                            "\",\"http://www.bookmark" +
                            str(i) +
                            ".com\",1);'"]
                    if (options.dm_trans == "sut"):
                        dm._runCmds([{"cmd": " ".join(cmd)}])
            try:
                screenShotDir = "/mnt/sdcard/Robotium-Screenshots"
                dm.removeDir(screenShotDir)
                dm.recordLogcat()
                result = mochitest.runTests(options)
                if result != 0:
                    log.error("runTests() exited with code %s" % result)
                log_result = mochitest.addLogData()
                if result != 0 or log_result != 0:
                    mochitest.printDeviceInfo(printLogcat=True)
                    mochitest.printScreenshots(screenShotDir)
                # Ensure earlier failures aren't overwritten by success on this
                # run
                if retVal is None or retVal == 0:
                    retVal = result
            except:
                log.error(
                    "Automation Error: Exception caught while running tests")
                traceback.print_exc()
                mochitest.stopServers()
                try:
                    mochitest.cleanup(options)
                except devicemanager.DMError:
                    # device error cleaning up... oh well!
                    pass
                retVal = 1
                break
            finally:
                # Clean-up added bookmarks
                if test['name'] == "testImportFromAndroid":
                    if ("pandaboard" in devOS):
                        cmd_del = [
                            'execsu',
                            'sqlite3',
                            "/data/data/com.android.browser/databases/browser2.db \'delete from bookmarks where _id > 14;\'"]
                    else:
                        cmd_del = [
                            'execsu',
                            'sqlite3',
                            "/data/data/com.android.browser/databases/browser.db \'delete from bookmarks where _id > 14;\'"]
                    if (options.dm_trans == "sut"):
                        dm._runCmds([{"cmd": " ".join(cmd_del)}])
        if retVal is None:
            log.warning("No tests run. Did you pass an invalid TEST_PATH?")
            retVal = 1
        else:
            # if we didn't have some kind of error running the tests, make
            # sure the tests actually passed
            print "INFO | runtests.py | Test summary: start."
            overallResult = mochitest.printLog()
            print "INFO | runtests.py | Test summary: end."
            if retVal == 0:
                retVal = overallResult
    else:
        mochitest.nsprLogName = "nspr.log"
        try:
            dm.recordLogcat()
            retVal = mochitest.runTests(options)
        except:
            log.error("Automation Error: Exception caught while running tests")
            traceback.print_exc()
            mochitest.stopServers()
            try:
                mochitest.cleanup(options)
            except devicemanager.DMError:
                # device error cleaning up... oh well!
                pass
            retVal = 1

        mochitest.printDeviceInfo(printLogcat=True)

    message_logger.finish()

    return retVal
Exemple #28
0
    def __init__(self, options, **kwargs):
        self.options = options
        self.server = None
        self.logger = mozlog.getLogger("PEP")

        # create the profile
        enable_proxy = False
        locations = ServerLocations()
        if self.options.proxyLocations:
            if not self.options.serverPath:
                self.logger.warning("Can't set up proxy without server path")
            else:
                enable_proxy = True
                locations.read(self.options.proxyLocations, False)
                locations.add_host(host="127.0.0.1", port=self.options.serverPort, options="primary,privileged")

        self.profile = self.profile_class(
            profile=self.options.profilePath,
            addons=[os.path.join(here, "extension")],
            locations=locations,
            proxy=enable_proxy,
        )

        # fork a server to serve the test related files
        if self.options.serverPath:
            self.runServer()

        tests = []
        # TODO is there a better way of doing this?
        if self.options.testPath.endswith(".js"):
            # a single test file was passed in
            testObj = {}
            testObj["path"] = os.path.realpath(self.options.testPath)
            testObj["name"] = os.path.basename(self.options.testPath)
            testObj["here"] = os.path.dirname(testObj["path"])
            tests.append(testObj)
        else:
            # a test manifest was passed in
            # open and convert the manifest to json
            manifest = TestManifest()
            manifest.read(self.options.testPath)
            tests = manifest.get()

        # create a manifest object to be read by the JS side
        manifestObj = {}
        manifestObj["tests"] = tests
        manifestObj["options"] = options.__dict__

        # write manifest to a JSON file
        jsonManifest = open(os.path.join(here, "manifest.json"), "w")
        jsonManifest.write(json.dumps(manifestObj))
        jsonManifest.close()

        # setup environment
        env = os.environ.copy()
        env["MOZ_INSTRUMENT_EVENT_LOOP"] = "1"
        env["MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD"] = str(options.tracerThreshold)
        env["MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL"] = str(options.tracerInterval)
        env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"

        # construct the browser arguments
        cmdargs = []
        # TODO Make browserArgs a list
        cmdargs.extend(self.options.browserArgs)
        cmdargs.extend(["-pep-start", os.path.realpath(jsonManifest.name)])

        # run with managed process handler
        self.runner = self.runner_class(
            profile=self.profile, binary=self.options.binary, cmdargs=cmdargs, env=env, process_class=PepProcess
        )
Exemple #29
0
    def add_test(self, test, expected='pass', group='default'):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if filename.endswith('.ini'):
                        msg_tmpl = (
                            "Ignoring manifest '{0}'; running all tests in '{1}'."
                            " See --help for details.")
                        relpath = os.path.relpath(os.path.join(root, filename),
                                                  filepath)
                        self.logger.warning(msg_tmpl.format(relpath, filepath))
                    elif self._is_filename_valid(filename):
                        test_file = os.path.join(root, filename)
                        self.add_test(test_file)
            return

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            group = filepath

            manifest = TestManifest()
            manifest.read(filepath)

            json_path = update_mozinfo(filepath)
            self.logger.info("mozinfo updated from: {}".format(json_path))
            self.logger.info("mozinfo is: {}".format(mozinfo.info))

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))

            values = {
                "appname": self.appName,
                "e10s": self.e10s,
                "manage_instance": self.marionette.instance is not None,
                "headless": self.headless
            }
            values.update(mozinfo.info)

            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   **values)
            if len(manifest_tests) == 0:
                self.logger.error("No tests to run using specified "
                                  "combination of filters: {}".format(
                                      manifest.fmt_filters()))

            target_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    target_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: {} does not exist".format(
                        i["path"]))

                self.add_test(i["path"], i["expected"], group=group)
            return

        self.tests.append({
            'filepath': filepath,
            'expected': expected,
            'group': group
        })
Exemple #30
0
    def add_test(self, test, expected='pass', test_container=None):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if (filename.endswith('.ini')):
                        msg_tmpl = (
                            "Ignoring manifest '{0}'; running all tests in '{1}'."
                            " See --help for details.")
                        relpath = os.path.relpath(os.path.join(root, filename),
                                                  filepath)
                        self.logger.warning(msg_tmpl.format(relpath, filepath))
                    elif (filename.startswith('test_')
                          and (filename.endswith('.py')
                               or filename.endswith('.js'))):
                        test_file = os.path.join(root, filename)
                        self.add_test(test_file)
            return

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))
            json_path = update_mozinfo(filepath)
            self.logger.info(
                "mozinfo updated with the following: {}".format(None))
            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   device=self.device,
                                                   app=self.appName,
                                                   e10s=self.e10s,
                                                   **mozinfo.info)
            if len(manifest_tests) == 0:
                self.logger.error("no tests to run using specified "
                                  "combination of filters: {}".format(
                                      manifest.fmt_filters()))

            target_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    target_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])

                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]
                test_container = None

                self.add_test(i["path"], i["expected"], test_container)
            return

        self.tests.append({
            'filepath': filepath,
            'expected': expected,
            'test_container': test_container
        })
Exemple #31
0
    def run_test(self, test, testtype):
        if not self.httpd:
            print "starting httpd"
            self.start_httpd()

        if not self.marionette:
            self.start_marionette()

        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if ((filename.startswith('test_') or filename.startswith('browser_')) and 
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.run_test(filepath, testtype)
                        if self.marionette.check_for_crash():
                            return
            return

        mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])

        testloader = unittest.TestLoader()
        suite = unittest.TestSuite()

        if file_ext == '.ini':
            testargs = {}
            if testtype is not None:
                testtypes = testtype.replace('+', ' +').replace('-', ' -').split()
                for atype in testtypes:
                    if atype.startswith('+'):
                        testargs.update({ atype[1:]: 'true' })
                    elif atype.startswith('-'):
                        testargs.update({ atype[1:]: 'false' })
                    else:
                        testargs.update({ atype: 'true' })

            manifest = TestManifest()
            manifest.read(filepath)

            if self.perf:
                if self.perfserv is None:
                    self.perfserv = manifest.get("perfserv")[0]
                machine_name = socket.gethostname()
                try:
                    manifest.has_key("machine_name")
                    machine_name = manifest.get("machine_name")[0]
                except:
                    self.logger.info("Using machine_name: %s" % machine_name)
                os_name = platform.system()
                os_version = platform.release()
                self.perfrequest = datazilla.DatazillaRequest(
                             server=self.perfserv,
                             machine_name=machine_name,
                             os=os_name,
                             os_version=os_version,
                             platform=manifest.get("platform")[0],
                             build_name=manifest.get("build_name")[0],
                             version=manifest.get("version")[0],
                             revision=self.revision,
                             branch=manifest.get("branch")[0],
                             id=os.getenv('BUILD_ID'),
                             test_date=int(time.time()))

            manifest_tests = manifest.active_tests(disabled=False)

            for i in manifest.get(tests=manifest_tests, **testargs):
                self.run_test(i["path"], testtype)
                if self.marionette.check_for_crash():
                    return
            return

        self.logger.info('TEST-START %s' % os.path.basename(test))

        for handler in self.test_handlers:
            if handler.match(os.path.basename(test)):
                handler.add_tests_to_suite(mod_name, filepath, suite, testloader, self.marionette, self.testvars)
                break

        if suite.countTestCases():
            runner = MarionetteTextTestRunner(verbosity=3,
                                              perf=self.perf,
                                              marionette=self.marionette)
            results = runner.run(suite)
            self.results.append(results)

            self.failed += len(results.failures) + len(results.errors)
            if results.perfdata and options.perf:
                self.perfrequest.add_datazilla_result(results.perfdata)
            if hasattr(results, 'skipped'):
                self.todo += len(results.skipped) + len(results.expectedFailures)
            self.passed += results.passed
            for failure in results.failures + results.errors:
                self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-FAIL'))
            if hasattr(results, 'unexpectedSuccess'):
                self.failed += len(results.unexpectedSuccesses)
                for failure in results.unexpectedSuccesses:
                    self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-PASS'))
    def run_test(self, test, testtype):
        if not self.httpd:
            self.start_httpd()
        if not self.marionette:
            self.start_marionette()

        if not os.path.isabs(test):
            filepath = os.path.join(os.path.dirname(__file__), test)
        else:
            filepath = test

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if ((filename.startswith('test_')
                         or filename.startswith('browser_'))
                            and (filename.endswith('.py')
                                 or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.run_test(filepath, testtype)
            return

        mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])

        testloader = unittest.TestLoader()
        suite = unittest.TestSuite()

        if file_ext == '.ini':
            if testtype is not None:
                testargs = {}
                testtypes = testtype.replace('+', ' +').replace('-',
                                                                ' -').split()
                for atype in testtypes:
                    if atype.startswith('+'):
                        testargs.update({atype[1:]: 'true'})
                    elif atype.startswith('-'):
                        testargs.update({atype[1:]: 'false'})
                    else:
                        testargs.update({atype: 'true'})
            manifest = TestManifest()
            manifest.read(filepath)

            if testtype is None:
                manifest_tests = manifest.get()
            else:
                manifest_tests = manifest.get(**testargs)

            for i in manifest_tests:
                self.run_test(i["path"], testtype)
            return

        self.logger.info('TEST-START %s' % os.path.basename(test))

        if file_ext == '.py':
            test_mod = imp.load_source(mod_name, filepath)

            for name in dir(test_mod):
                obj = getattr(test_mod, name)
                if (isinstance(obj, (type, types.ClassType))
                        and issubclass(obj, unittest.TestCase)):
                    testnames = testloader.getTestCaseNames(obj)
                    for testname in testnames:
                        suite.addTest(obj(self.marionette,
                                          methodName=testname))

        elif file_ext == '.js':
            suite.addTest(
                MarionetteJSTestCase(self.marionette, jsFile=filepath))

        if suite.countTestCases():
            results = MarionetteTextTestRunner(verbosity=3).run(suite)
            self.failed += len(results.failures) + len(results.errors)
            self.todo = 0
            if hasattr(results, 'skipped'):
                self.todo += len(results.skipped) + len(
                    results.expectedFailures)
            self.passed += results.passed
            for failure in results.failures + results.errors:
                self.failures.append((results.getInfo(failure[0]), failure[1],
                                      'TEST-UNEXPECTED-FAIL'))
            if hasattr(results, 'unexpectedSuccess'):
                self.failed += len(results.unexpectedSuccesses)
                for failure in results.unexpectedSuccesses:
                    self.failures.append((results.getInfo(failure[0]),
                                          failure[1], 'TEST-UNEXPECTED-PASS'))
Exemple #33
0
    def run_test(self, test, testtype):
        if not self.httpd:
            print "starting httpd"
            self.start_httpd()
        
        if not self.marionette:
            self.start_marionette()

        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if ((filename.startswith('test_') or filename.startswith('browser_')) and 
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.run_test(filepath, testtype)
            return

        mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])

        testloader = unittest.TestLoader()
        suite = unittest.TestSuite()

        if file_ext == '.ini':
            testargs = { 'skip': 'false' }
            if testtype is not None:
                testtypes = testtype.replace('+', ' +').replace('-', ' -').split()
                for atype in testtypes:
                    if atype.startswith('+'):
                        testargs.update({ atype[1:]: 'true' })
                    elif atype.startswith('-'):
                        testargs.update({ atype[1:]: 'false' })
                    else:
                        testargs.update({ atype: 'true' })

            manifest = TestManifest()
            manifest.read(filepath)

            if self.perf:
                if self.perfserv is None:
                    self.perfserv = manifest.get("perfserv")[0]
                machine_name = socket.gethostname()
                try:
                    manifest.has_key("machine_name")
                    machine_name = manifest.get("machine_name")[0]
                except:
                    self.logger.info("Using machine_name: %s" % machine_name)
                os_name = platform.system()
                os_version = platform.release()
                self.perfrequest = datazilla.DatazillaRequest(
                             server=self.perfserv,
                             machine_name=machine_name,
                             os=os_name,
                             os_version=os_version,
                             platform=manifest.get("platform")[0],
                             build_name=manifest.get("build_name")[0],
                             version=manifest.get("version")[0],
                             revision=self.revision,
                             branch=manifest.get("branch")[0],
                             id=os.getenv('BUILD_ID'),
                             test_date=int(time.time()))

            manifest_tests = manifest.get(**testargs)

            for i in manifest_tests:
                self.run_test(i["path"], testtype)
            return

        self.logger.info('TEST-START %s' % os.path.basename(test))

        for handler in self.test_handlers:
            if handler.match(os.path.basename(test)):
                handler.add_tests_to_suite(mod_name, filepath, suite, testloader, self.marionette)
                break

        if suite.countTestCases():
            results = MarionetteTextTestRunner(verbosity=3, perf=self.perf).run(suite)
            self.results.append(results)

            self.failed += len(results.failures) + len(results.errors)
            if results.perfdata and options.perf:
                self.perfrequest.add_datazilla_result(results.perfdata)
            if hasattr(results, 'skipped'):
                self.todo += len(results.skipped) + len(results.expectedFailures)
            self.passed += results.passed
            for failure in results.failures + results.errors:
                self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-FAIL'))
            if hasattr(results, 'unexpectedSuccess'):
                self.failed += len(results.unexpectedSuccesses)
                for failure in results.unexpectedSuccesses:
                    self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-PASS'))
Exemple #34
0
    def add_test(self, test, expected='pass', test_container=None):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if (filename.startswith('test_') and
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.add_test(filepath)
            return

        testargs = {}
        if self.type is not None:
            testtypes = self.type.replace('+', ' +').replace('-', ' -').split()
            for atype in testtypes:
                if atype.startswith('+'):
                    testargs.update({ atype[1:]: 'true' })
                elif atype.startswith('-'):
                    testargs.update({ atype[1:]: 'false' })
                else:
                    testargs.update({ atype: 'true' })

        testarg_b2g = bool(testargs.get('b2g'))

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))
            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   device=self.device,
                                                   app=self.appName,
                                                   **mozinfo.info)
            if len(manifest_tests) == 0:
                self.logger.error("no tests to run using specified "
                                  "combination of filters: {}".format(
                                       manifest.fmt_filters()))

            unfiltered_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    unfiltered_tests.append(test)

            target_tests = manifest.get(tests=unfiltered_tests, **testargs)
            for test in unfiltered_tests:
                if test['path'] not in [x['path'] for x in target_tests]:
                    test.setdefault('disabled', 'filtered by type (%s)' % self.type)
                    self.manifest_skipped_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])

                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]
                test_container = None
                if i.get('test_container') and testarg_b2g:
                    if i.get('test_container') == "true":
                        test_container = True
                    elif i.get('test_container') == "false":
                        test_container = False

                self.add_test(i["path"], i["expected"], test_container)
            return

        self.tests.append({'filepath': filepath, 'expected': expected, 'test_container': test_container})
Exemple #35
0
    def run_test(self, test, testtype):
        if not self.httpd:
            self.start_httpd()
        if not self.marionette:
            self.start_marionette()

        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if ((filename.startswith('test_') or filename.startswith('browser_')) and 
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.run_test(filepath, testtype)
            return

        mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])

        testloader = unittest.TestLoader()
        suite = unittest.TestSuite()

        if file_ext == '.ini':
            if testtype is not None:
                testargs = {}
                testtypes = testtype.replace('+', ' +').replace('-', ' -').split()
                for atype in testtypes:
                    if atype.startswith('+'):
                        testargs.update({ atype[1:]: 'true' })
                    elif atype.startswith('-'):
                        testargs.update({ atype[1:]: 'false' })
                    else:
                        testargs.update({ atype: 'true' })
            manifest = TestManifest()
            manifest.read(filepath)

            if testtype is None:
                manifest_tests = manifest.get()
            else:
                manifest_tests = manifest.get(**testargs)

            for i in manifest_tests:
                self.run_test(i["path"], testtype)
            return

        self.logger.info('TEST-START %s' % os.path.basename(test))

        if file_ext == '.py':
            test_mod = imp.load_source(mod_name, filepath)

            for name in dir(test_mod):
                obj = getattr(test_mod, name)
                if (isinstance(obj, (type, types.ClassType)) and
                    issubclass(obj, unittest.TestCase)):
                    testnames = testloader.getTestCaseNames(obj)
                    for testname in testnames:
                        suite.addTest(obj(self.marionette, methodName=testname))

        elif file_ext == '.js':
            suite.addTest(MarionetteJSTestCase(self.marionette, jsFile=filepath))

        if suite.countTestCases():
            results = MarionetteTextTestRunner(verbosity=3).run(suite)
            self.failed += len(results.failures) + len(results.errors)
            self.todo = 0
            if hasattr(results, 'skipped'):
                self.todo += len(results.skipped) + len(results.expectedFailures)
            self.passed += results.passed
            for failure in results.failures + results.errors:
                self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-FAIL'))
            if hasattr(results, 'unexpectedSuccess'):
                self.failed += len(results.unexpectedSuccesses)
                for failure in results.unexpectedSuccesses:
                    self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-PASS'))
Exemple #36
0
    def run_test(self, test, expected='pass'):
        if not self.httpd:
            print "starting httpd"
            self.start_httpd()

        if not self.marionette:
            self.start_marionette()
            if self.emulator:
                self.marionette.emulator.wait_for_homescreen(self.marionette)
            # Retrieve capabilities for later use
            if not self._capabilities:
                self.capabilities

        testargs = {}
        if self.type is not None:
            testtypes = self.type.replace('+', ' +').replace('-', ' -').split()
            for atype in testtypes:
                if atype.startswith('+'):
                    testargs.update({ atype[1:]: 'true' })
                elif atype.startswith('-'):
                    testargs.update({ atype[1:]: 'false' })
                else:
                    testargs.update({ atype: 'true' })
        oop = testargs.get('oop', False)
        if isinstance(oop, basestring):
            oop = False if oop == 'false' else 'true'

        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                if self.shuffle:
                    random.shuffle(files)
                for filename in files:
                    if ((filename.startswith('test_') or filename.startswith('browser_')) and
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.run_test(filepath)
                        if self.marionette.check_for_crash():
                            return
            return

        mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])

        testloader = unittest.TestLoader()
        suite = unittest.TestSuite()

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   device=self.device,
                                                   app=self.appName,
                                                   **mozinfo.info)
            unfiltered_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    unfiltered_tests.append(test)

            target_tests = manifest.get(tests=unfiltered_tests, **testargs)
            for test in unfiltered_tests:
                if test['path'] not in [x['path'] for x in target_tests]:
                    test.setdefault('disabled', 'filtered by type (%s)' % self.type)
                    self.manifest_skipped_tests.append(test)

            for test in self.manifest_skipped_tests:
                self.logger.info('TEST-SKIP | %s | %s' % (
                    os.path.basename(test['path']),
                    test['disabled']))
                self.todo += 1

            if self.shuffle:
                random.shuffle(target_tests)
            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])
                self.run_test(i["path"], i["expected"])
                if self.marionette.check_for_crash():
                    return
            return

            self.logger.info('TEST-START %s' % os.path.basename(test))

        self.test_kwargs['expected'] = expected
        self.test_kwargs['oop'] = oop
        for handler in self.test_handlers:
            if handler.match(os.path.basename(test)):
                handler.add_tests_to_suite(mod_name,
                                           filepath,
                                           suite,
                                           testloader,
                                           self.marionette,
                                           self.testvars,
                                           **self.test_kwargs)
                break

        if suite.countTestCases():
            runner = self.textrunnerclass(verbosity=3,
                                          marionette=self.marionette,
                                          capabilities=self.capabilities)
            results = runner.run(suite)
            self.results.append(results)

            self.failed += len(results.failures) + len(results.errors)
            if hasattr(results, 'skipped'):
                self.todo += len(results.skipped)
            self.passed += results.passed
            for failure in results.failures + results.errors:
                self.failures.append((results.getInfo(failure), failure.output, 'TEST-UNEXPECTED-FAIL'))
            if hasattr(results, 'unexpectedSuccesses'):
                self.failed += len(results.unexpectedSuccesses)
                for failure in results.unexpectedSuccesses:
                    self.failures.append((results.getInfo(failure), 'TEST-UNEXPECTED-PASS'))
            if hasattr(results, 'expectedFailures'):
                self.passed += len(results.expectedFailures)
Exemple #37
0
    def run_test(self, test):
        if not self.httpd:
            print "starting httpd"
            self.start_httpd()

        if not self.marionette:
            self.start_marionette()

        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if ((filename.startswith('test_')
                         or filename.startswith('browser_'))
                            and (filename.endswith('.py')
                                 or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.run_test(filepath)
                        if self.marionette.check_for_crash():
                            return
            return

        mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])

        testloader = unittest.TestLoader()
        suite = unittest.TestSuite()

        if file_ext == '.ini':
            testargs = {}
            if self.type is not None:
                testtypes = self.type.replace('+', ' +').replace('-',
                                                                 ' -').split()
                for atype in testtypes:
                    if atype.startswith('+'):
                        testargs.update({atype[1:]: 'true'})
                    elif atype.startswith('-'):
                        testargs.update({atype[1:]: 'false'})
                    else:
                        testargs.update({atype: 'true'})

            manifest = TestManifest()
            manifest.read(filepath)

            manifest_tests = manifest.active_tests(disabled=False)

            for i in manifest.get(tests=manifest_tests, **testargs):
                self.run_test(i["path"])
                if self.marionette.check_for_crash():
                    return
            return

        self.logger.info('TEST-START %s' % os.path.basename(test))

        for handler in self.test_handlers:
            if handler.match(os.path.basename(test)):
                handler.add_tests_to_suite(mod_name, filepath, suite,
                                           testloader, self.marionette,
                                           self.testvars, **self.test_kwargs)
                break

        if suite.countTestCases():
            runner = MarionetteTextTestRunner(verbosity=3,
                                              marionette=self.marionette)
            results = runner.run(suite)
            self.results.append(results)

            self.failed += len(results.failures) + len(results.errors)
            if hasattr(results, 'skipped'):
                self.todo += len(results.skipped) + len(
                    results.expectedFailures)
            self.passed += results.passed
            for failure in results.failures + results.errors:
                self.failures.append((results.getInfo(failure[0]), failure[1],
                                      'TEST-UNEXPECTED-FAIL'))
            if hasattr(results, 'unexpectedSuccess'):
                self.failed += len(results.unexpectedSuccesses)
                for failure in results.unexpectedSuccesses:
                    self.failures.append((results.getInfo(failure[0]),
                                          failure[1], 'TEST-UNEXPECTED-PASS'))
def main(args):
    parser = Options()
    options, args = parser.parse_args()
    if not options.html_manifest or not options.specialpowers or not options.host1 or not options.host2 or not options.signalling_server:
        parser.print_usage()
        return 2

    package_options = get_package_options(parser, options)
    if not package_options:
        parser.print_usage()
        return 2

    if not os.path.isdir(options.specialpowers):
        parser.error("SpecialPowers directory %s does not exist" % options.specialpowers)
        return 2
    if options.prefs and not os.path.isfile(options.prefs):
        parser.error("Prefs file %s does not exist" % options.prefs)
        return 2
    if options.log_dest and not os.path.isdir(options.log_dest):
        parser.error("Log directory %s does not exist" % options.log_dest)
        return 2

    log = mozlog.getLogger('steeplechase')
    log.setLevel(mozlog.DEBUG)
    if ':' in options.host1:
        host, port = options.host1.split(':')
        dm1 = DeviceManagerSUT(host, port)
    else:
        dm1 = DeviceManagerSUT(options.host1)
    if ':' in options.host2:
        host, port = options.host2.split(':')
        dm2 = DeviceManagerSUT(host, port)
    else:
        dm2 = DeviceManagerSUT(options.host2)
    remote_info = [{'dm': dm1,
                    'binary': package_options.binary,
                    'package': package_options.package,
                    'is_initiator': True,
                    'name': 'Client1'},
                   {'dm': dm2,
                    'binary': package_options.binary2,
                    'package': package_options.package2,
                    'is_initiator': False,
                    'name': 'Client2'}]
    # first, push app
    for info in remote_info:
        dm = info['dm']

        if info['binary']:
            asset = Binary(path=info['binary'], log=log, dm=info['dm'], name=info['name'])
        else:
            asset = generate_package_asset(path=info['package'], log=log, dm=info['dm'], name=info['name'])

        if options.setup:
            asset.setup_test_root()
        info['test_root'] = asset.test_root()

        if options.setup:
            log.info("Pushing app to %s...", info["name"])
            asset.setup_client()
        info['remote_app_path'] = asset.path_to_launch()
        if not options.setup and not dm.fileExists(info['remote_app_path']):
            log.error("App does not exist on %s, don't use --noSetup", info['name'])
            return 2

    pass_count, fail_count = 0, 0
    if options.html_manifest:
        manifest = TestManifest(strict=False)
        manifest.read(options.html_manifest)
        manifest_data = {"tests": [{"path": t["relpath"]} for t in manifest.active_tests(disabled=False, **mozinfo.info)]}

        remote_port = 0
        if options.remote_webserver:
            result = re.search(':(\d+)', options.remote_webserver)
            if result:
                remote_port = int(result.groups()[0])


        @json_response
        def get_manifest(req):
            return (200, manifest_data)
        handlers = [{
            'method': 'GET',
            'path': '/manifest.json',
            'function': get_manifest
            }]
        httpd = MozHttpd(host=moznetwork.get_ip(), port=remote_port, log_requests=True,
                         docroot=os.path.join(os.path.dirname(__file__), "..", "webharness"),
                         urlhandlers=handlers,
                         path_mappings={"/tests": os.path.dirname(options.html_manifest)})
        httpd.start(block=False)
        test = HTMLTests(httpd, remote_info, log, options)
        html_pass_count, html_fail_count = test.run()
        pass_count += html_pass_count
        fail_count += html_fail_count
        httpd.stop()
    log.info("Result summary:")
    log.info("Passed: %d" % pass_count)
    log.info("Failed: %d" % fail_count)
    return pass_count > 0 and fail_count == 0