コード例 #1
0
    def post_to_autolog(self, elapsedtime):
        self.logger.info('posting results to autolog')

        # This is all autolog stuff.
        # See: https://wiki.mozilla.org/Auto-tools/Projects/Autolog
        from mozautolog import RESTfulAutologTestGroup
        testgroup = RESTfulAutologTestGroup(testgroup=self.testgroup,
                                            os='android',
                                            platform='emulator',
                                            harness='marionette',
                                            server=self.es_server,
                                            restserver=self.rest_server,
                                            machine=socket.gethostname())

        testgroup.set_primary_product(tree='b2g',
                                      buildtype='opt',
                                      revision=self.revision)

        testgroup.add_test_suite(testsuite='b2g emulator testsuite',
                                 elapsedtime=elapsedtime.seconds,
                                 cmdline='',
                                 passed=self.passed,
                                 failed=self.failed,
                                 todo=self.todo)

        # Add in the test failures.
        for f in self.failures:
            testgroup.add_test_failure(test=f[0], text=f[1], status=f[2])

        testgroup.submit()
コード例 #2
0
def submit_to_autolog(commit, logfile, error):
    print 'submitting to autolog'

    testgroup = RESTfulAutologTestGroup(
        testgroup = "Build",
        os = 'android',
        platform = 'emulator',
        harness = 'marionette',
        machine = socket.gethostname(),
        logfile=logfile)

    testgroup.set_primary_product(
        tree = 'b2g',
        buildtype = 'opt',
        revision = commit)

    testgroup.add_test_suite(
        testsuite = 'b2g emulator build',
        cmdline = '',
        passed = 1 if logfile is None else 0,
        failed = 0 if logfile is None else 1,
        todo = 0)

    if error:
        testgroup.add_test_failure(test='build', text=error, status='FAILURE')

    testgroup.submit()
コード例 #3
0
def post_to_autolog(data, testgroup, revision=None, logfile=None, harness='mochitest'):
    testgroup = AutologTestGroup(machine=socket.gethostname(),
                                 id=data['id'],
                                 platform='emulator',
                                 os='android',
                                 harness='autolog',
                                 testgroup=testgroup,
                                 logfile=logfile,
                                )

    testgroup.set_primary_product(tree='b2g',
                                  revision=revision,
                                  buildtype='opt',
                                 )

    testgroup.add_test_suite(testsuite=harness,
                             passed=data.get('passed', 0),
                             failed=data.get('failed', 0),
                             todo=data.get('todo', 0),
                             id="%s-testsuite1" % data['id'],
                            )

    for tf_index, failure in enumerate(data.get('failures', [])):
        for f in failure.get('failures', []):
            testgroup.add_test_failure(test=failure.get('test', None),
                                       id="%s-testfailure1.%d" % (data['id'], (tf_index+1)),
                                       duration=failure.get('duration', None),
                                       **f
                                      )

    testgroup.submit()
コード例 #4
0
ファイル: mochilog.py プロジェクト: ahal/b2gautomation
def post_to_autolog(data, testgroup, revision=None, logfile=None):
    testgroup = AutologTestGroup(machine=socket.gethostname(),
                                 id=data['id'],
                                 platform='emulator',
                                 os='android',
                                 harness='buildbot',
                                 testgroup=testgroup,
                                 logfile=logfile,
                                )

    testgroup.set_primary_product(tree='b2g',
                                  revision=revision,
                                  buildtype='opt',
                                 )

    testgroup.add_test_suite(testsuite='mochitest',
                             passed=data.get('passed', 0),
                             failed=data.get('failed', 0),
                             todo=data.get('todo', 0),
                             id="%s-testsuite1" % data['id'],
                            )

    for tf_index, failure in enumerate(data.get('failures', [])):
        for f in failure.get('failures', []):
            testgroup.add_test_failure(test=failure.get('test', None),
                                       id="%s-testfailure1.%d" % (data['id'], (tf_index+1)),
                                       duration=failure.get('duration', None),
                                       **f
                                      )

    testgroup.submit()
コード例 #5
0
ファイル: autolog.py プロジェクト: ahal/scripts
def post_to_autolog(data, testgroup, revision=None, logfile=None, harness="mochitest"):
    testgroup = AutologTestGroup(
        machine=socket.gethostname(),
        id=data["id"],
        platform="emulator",
        os="android",
        harness="autolog",
        testgroup=testgroup,
        logfile=logfile,
    )

    testgroup.set_primary_product(tree="b2g", revision=revision, buildtype="opt")

    testgroup.add_test_suite(
        testsuite=harness,
        passed=data.get("passed", 0),
        failed=data.get("failed", 0),
        todo=data.get("todo", 0),
        id="%s-testsuite1" % data["id"],
    )

    for tf_index, failure in enumerate(data.get("failures", [])):
        for f in failure.get("failures", []):
            testgroup.add_test_failure(
                test=failure.get("test", None),
                id="%s-testfailure1.%d" % (data["id"], (tf_index + 1)),
                duration=failure.get("duration", None),
                **f
            )

    testgroup.submit()
コード例 #6
0
ファイル: runtests.py プロジェクト: marshall/mozilla-central
    def post_to_autolog(self, elapsedtime):
        self.logger.info('posting results to autolog')

        # This is all autolog stuff.
        # See: https://wiki.mozilla.org/Auto-tools/Projects/Autolog
        from mozautolog import RESTfulAutologTestGroup
        testgroup = RESTfulAutologTestGroup(
            testgroup = self.testgroup,
            os = 'android',
            platform = 'emulator',
            harness = 'marionette',
            server = self.es_server,
            restserver = self.rest_server,
            machine = socket.gethostname())

        testgroup.set_primary_product(
            tree = 'b2g',
            buildtype = 'opt',
            revision = self.revision)

        testgroup.add_test_suite(
            testsuite = 'b2g emulator testsuite',
            elapsedtime = elapsedtime.seconds,
            cmdline = '',
            passed = self.passed,
            failed = self.failed,
            todo = self.todo)

        # Add in the test failures.
        for f in self.failures:
            testgroup.add_test_failure(test=f[0], text=f[1], status=f[2])

        testgroup.submit()
コード例 #7
0
ファイル: results.py プロジェクト: sinemetu1/coversheet
  def postToAutolog(self):
    from mozautolog import RESTfulAutologTestGroup as AutologTestGroup

    group = AutologTestGroup(
              harness='crossweave',
              testgroup='crossweave-%s' % self.synctype,
              server=self.config.get('es'),
              restserver=self.config.get('restserver'),
              machine=socket.gethostname(),
              platform=self.config.get('platform', None),
              os=self.config.get('os', None),
            )
    tree = self.postdata['productversion']['repository']
    group.set_primary_product(
              tree=tree[tree.rfind("/")+1:],
              version=self.postdata['productversion']['version'],
              buildid=self.postdata['productversion']['buildid'],
              buildtype='opt',
              revision=self.postdata['productversion']['changeset'],
            )
    group.add_test_suite(
              passed=self.numpassed,
              failed=self.numfailed,
              todo=0,
            )
    for test in self.results:
      if test['state'] != "TEST-PASS":
        errorlog = self.errorlogs.get(test['name'])
        errorlog_filename = errorlog.filename if errorlog else None
        group.add_test_failure(
              test = test['name'],
              status = test['state'],
              text = test['message'],
              logfile = errorlog_filename
            )
    try:
        group.submit()
    except:
        self.sendEmail('<pre>%s</pre>' % traceback.format_exc(),
                       sendTo='*****@*****.**')
        return

    # Iterate through all testfailure objects, and update the postdata
    # dict with the testfailure logurl's, if any.
    for tf in group.testsuites[-1].testfailures:
      result = [x for x in self.results if x.get('name') == tf.test]
      if not result:
        continue
      result[0]['logurl'] = tf.logurl
コード例 #8
0
    def post_to_autolog(self, elapsedtime):
        self.logger.info('posting results to autolog')

        logfile = None
        if self.emulator:
            filename = os.path.join(os.path.abspath(self.logcat_dir),
                                    "emulator-%d.log" % self.marionette.emulator.port)
            if os.access(filename, os.F_OK):
                logfile = filename

        for es_server in self.es_servers:

            # This is all autolog stuff.
            # See: https://wiki.mozilla.org/Auto-tools/Projects/Autolog
            from mozautolog import RESTfulAutologTestGroup
            testgroup = RESTfulAutologTestGroup(
                testgroup=self.testgroup,
                os='android',
                platform='emulator',
                harness='marionette',
                server=es_server,
                restserver=None,
                machine=socket.gethostname(),
                logfile=logfile)

            testgroup.set_primary_product(
                tree=self.tree,
                buildtype='opt',
                revision=self.revision)

            testgroup.add_test_suite(
                testsuite='b2g emulator testsuite',
                elapsedtime=elapsedtime.seconds,
                cmdline='',
                passed=self.passed,
                failed=self.failed,
                todo=self.todo)

            # Add in the test failures.
            for f in self.failures:
                testgroup.add_test_failure(test=f[0], text=f[1], status=f[2])

            testgroup.submit()
コード例 #9
0
    def postToAutolog(self):
        from mozautolog import RESTfulAutologTestGroup as AutologTestGroup

        group = AutologTestGroup(
            harness='crossweave',
            testgroup='crossweave-%s' % self.synctype,
            server=self.config.get('es'),
            restserver=self.config.get('restserver'),
            machine=socket.gethostname(),
            platform=self.config.get('platform', None),
            os=self.config.get('os', None),
        )
        tree = self.postdata['productversion']['repository']
        group.set_primary_product(
            tree=tree[tree.rfind("/") + 1:],
            version=self.postdata['productversion']['version'],
            buildid=self.postdata['productversion']['buildid'],
            buildtype='opt',
            revision=self.postdata['productversion']['changeset'],
        )
        group.add_test_suite(
            passed=self.numpassed,
            failed=self.numfailed,
            todo=0,
        )
        for test in self.results:
            if test['state'] != "TEST-PASS":
                errorlog = self.errorlogs.get(test['name'])
                errorlog_filename = errorlog.filename if errorlog else None
                group.add_test_failure(test=test['name'],
                                       status=test['state'],
                                       text=test['message'],
                                       logfile=errorlog_filename)
        try:
            group.submit()
        except:
            self.sendEmail('<pre>%s</pre>' % traceback.format_exc(),
                           sendTo='*****@*****.**')
            return

        # Iterate through all testfailure objects, and update the postdata
        # dict with the testfailure logurl's, if any.
        for tf in group.testsuites[-1].testfailures:
            result = [x for x in self.results if x.get('name') == tf.test]
            if not result:
                continue
            result[0]['logurl'] = tf.logurl
コード例 #10
0
ファイル: runtests.py プロジェクト: sergecodd/FireFox-OS
    def post_to_autolog(self, elapsedtime):
        self.logger.info('posting results to autolog')

        logfile = None
        if self.emulator:
            filename = os.path.join(os.path.abspath(self.logcat_dir),
                                    "emulator-%d.log" % self.marionette.emulator.port)
            if os.access(filename, os.F_OK):
                logfile = filename

        for es_server in self.es_servers:

            # This is all autolog stuff.
            # See: https://wiki.mozilla.org/Auto-tools/Projects/Autolog
            from mozautolog import RESTfulAutologTestGroup
            testgroup = RESTfulAutologTestGroup(
                testgroup=self.testgroup,
                os='android',
                platform='emulator',
                harness='marionette',
                server=es_server,
                restserver=None,
                machine=socket.gethostname(),
                logfile=logfile)

            testgroup.set_primary_product(
                tree=self.tree,
                buildtype='opt',
                revision=self.revision)

            testgroup.add_test_suite(
                testsuite='b2g emulator testsuite',
                elapsedtime=elapsedtime.seconds,
                cmdline='',
                passed=self.passed,
                failed=self.failed,
                todo=self.todo)

            # Add in the test failures.
            for f in self.failures:
                testgroup.add_test_failure(test=f[0], text=f[1], status=f[2])

            testgroup.submit()
コード例 #11
0
ファイル: etlparser.py プロジェクト: wlach/talos
class XPerfAutoLog(object):

  def __init__(self, filename = None):
    self.testGroup = None
    if filename != None:
      config_file = open(filename, 'r')
      self.yaml_config = yaml.load(config_file)
      config_file.close()
      self.autolog_init()

  def autolog_init(self):
    testos = 'win7' #currently we only run xperf on windows 7
    testname = self.yaml_config.get('testname', '')
    testplatform = 'win32' #currently we only run xperf on win32
    if testname == '':
      return
  
    self.testGroup = RESTfulAutologTestGroup(
      testgroup = testname,
      os = testos,
      platform = testplatform,
      machine = self.yaml_config['title'],
      starttime = int(time.time()),
      builder = '%s_%s-opt_test-%s' % (self.yaml_config['title'], os, testname),
      restserver = 'http://10.2.76.100/autologserver'
    )
  
    self.testGroup.set_primary_product(
      tree = self.yaml_config['repository'].split('/')[-1], 
      buildtype = 'opt', #we only run talos on opt builds
      buildid = self.yaml_config['buildid'],
      revision = self.yaml_config['sourcestamp'],
    )

  def addData(self, filename, readcount, readbytes, writecount, writebytes):
    if (self.testGroup == None):
      self.autolog_init()

    if (self.testGroup == None):
      return
      
    self.testGroup.add_perf_data(
      test = self.yaml_config['testname'],
      type = 'diskIO',
      name = filename[filename.rfind('\\') + 1:],
      reads = readcount,
      read_bytes = readbytes,
      writes = writecount,
      write_bytes = writebytes
    )
  
  def post(self):
    if (self.testGroup != None):
      self.testGroup.submit() 
コード例 #12
0
    def make_testgroups(self, results_collection):
        testgroups = []
        for context in results_collection.contexts:
            coll = results_collection.subset(lambda t: t.context == context)
            passed = coll.tests_with_result('PASS')
            failed = coll.tests_with_result('UNEXPECTED-FAIL')
            unexpected_passes = coll.tests_with_result('UNEXPECTED-PASS')
            errors = coll.tests_with_result('ERROR')
            skipped = coll.tests_with_result('SKIPPED')
            known_fails = coll.tests_with_result('KNOWN-FAIL')

            testgroup = RESTfulAutologTestGroup(
                testgroup=context.testgroup,
                os=context.os,
                platform=context.arch,
                harness=context.harness,
                server=self.es_server,
                restserver=self.rest_server,
                machine=context.hostname,
                logfile=context.logfile,
            )
            testgroup.add_test_suite(
                testsuite=results_collection.suite_name,
                elapsedtime=coll.time_taken,
                passed=count(passed),
                failed=count(failed) + count(errors) +
                count(unexpected_passes),
                todo=count(skipped) + count(known_fails),
            )
            testgroup.set_primary_product(
                tree=context.tree,
                revision=context.revision,
                productname=context.product,
                buildtype=context.buildtype,
            )
            # need to call this again since we already used the generator
            for f in coll.tests_with_result('UNEXPECTED-FAIL'):
                testgroup.add_test_failure(
                    test=long_name(f),
                    text='\n'.join(f.output),
                    status=f.result,
                )
            testgroups.append(testgroup)
        return testgroups
コード例 #13
0
ファイル: autolog.py プロジェクト: luke-chang/gecko-1
    def make_testgroups(self, results_collection):
        testgroups = []
        for context in results_collection.contexts:
            coll = results_collection.subset(lambda t: t.context == context)
            passed = coll.tests_with_result('PASS')
            failed = coll.tests_with_result('UNEXPECTED-FAIL')
            unexpected_passes = coll.tests_with_result('UNEXPECTED-PASS')
            errors = coll.tests_with_result('ERROR')
            skipped = coll.tests_with_result('SKIPPED')
            known_fails = coll.tests_with_result('KNOWN-FAIL')

            testgroup = RESTfulAutologTestGroup(
                testgroup=context.testgroup,
                os=context.os,
                platform=context.arch,
                harness=context.harness,
                server=self.es_server,
                restserver=self.rest_server,
                machine=context.hostname,
                logfile=context.logfile,
            )
            testgroup.add_test_suite(
                testsuite=results_collection.suite_name,
                elapsedtime=coll.time_taken,
                passed=count(passed),
                failed=count(failed) + count(errors) + count(unexpected_passes),
                todo=count(skipped) + count(known_fails),
            )
            testgroup.set_primary_product(
                tree=context.tree,
                revision=context.revision,
                productname=context.product,
                buildtype=context.buildtype,
            )
            # need to call this again since we already used the generator
            for f in coll.tests_with_result('UNEXPECTED-FAIL'):
                testgroup.add_test_failure(
                    test=long_name(f),
                    text='\n'.join(f.output),
                    status=f.result,
                )
            testgroups.append(testgroup)
        return testgroups
コード例 #14
0
ファイル: etlparser.py プロジェクト: wlach/talos
 def autolog_init(self):
   testos = 'win7' #currently we only run xperf on windows 7
   testname = self.yaml_config.get('testname', '')
   testplatform = 'win32' #currently we only run xperf on win32
   if testname == '':
     return
 
   self.testGroup = RESTfulAutologTestGroup(
     testgroup = testname,
     os = testos,
     platform = testplatform,
     machine = self.yaml_config['title'],
     starttime = int(time.time()),
     builder = '%s_%s-opt_test-%s' % (self.yaml_config['title'], os, testname),
     restserver = 'http://10.2.76.100/autologserver'
   )
 
   self.testGroup.set_primary_product(
     tree = self.yaml_config['repository'].split('/')[-1], 
     buildtype = 'opt', #we only run talos on opt builds
     buildid = self.yaml_config['buildid'],
     revision = self.yaml_config['sourcestamp'],
   )
コード例 #15
0
ファイル: profiler.py プロジェクト: malini/mochi-profiler
    def parse_and_submit(self):
        """
        Parse the logs generated by runtests.py and submit results to autolog
        """
        results = {}
        chrome_results = {}
        prof = re.compile("Profile::((\w+):\s*(\d+))")
        failure = re.compile("Failed:\s+([0-9]*)")
        logs = open(self.plain_log_file, 'r')
        #parse out which test is being run.
        #also, parse out if the test failed
        for line in logs.readlines():
            matches = prof.findall(line)
            for match in matches:
                if results.has_key(match[1]):
                    results[match[1]] += int(match[2])
                else:
                    results[match[1]] = int(match[2])
            fail = failure.search(line)
            if fail:
                if fail.group(1) is not "0":
                    self.log.info("Plain tests failed, not submitting data to autolog")
                    return
        logs.close()
        for k, v in results.iteritems():
            results[k] = results[k] / PLAIN_REPEATS
        logs = open(self.chrome_log_file, 'r')
        for line in logs.readlines():
            matches = prof.findall(line)
            for match in matches:
                if chrome_results.has_key(match[1]):
                    chrome_results[match[1]] += int(match[2])
                else:
                   chrome_results[match[1]] = int(match[2])
            fail = failure.search(line)
            if fail:
                if fail.group(1) is not "0":
                    self.log.info("Chrome tests failed, not submitting data to autolog")
                    return
        logs.close()
        for k, v in chrome_results.iteritems():
            chrome_results[k] = chrome_results[k] / CHROME_REPEATS
        results.update(chrome_results)
        self.log.info("results: %s" % results)

        #submit
        if len(results) is not 0:
            testgroup = RESTfulAutologTestGroup(
              testgroup = 'mochitest-perf',
              os = self.platform,
              platform = self.platform,
              builder = self.builddata['buildid'],
              starttime = int(time.time())
            )
            testgroup.set_primary_product(
              tree = self.builddata['tree'],
              buildtype = self.builddata['buildtype'],
              buildid = self.builddata['buildid'],
              revision = self.revision,
            )
            for test, value in results.iteritems():
                test_type = 'LoadTime'
                if 'RunTime' in test:
                    test_type = 'RunTime'
                testgroup.add_perf_data(
                  test = 'mochitest-perf',
                  name = test,
                  type = test_type,
                  time = value
                )
            self.log.info("Submitting to autolog")
            testgroup.submit()
コード例 #16
0
ファイル: runtestsremote.py プロジェクト: imclab/autophone
    def process_test_log(self, test_parameters, logfilehandle):

        test_log = None
        test_runs = []

        if test_parameters['use_newparser']:
            logfilehandle.close()
            logfilehandle = open(logfilehandle.name)
            try:
                # Turn off verbose logging for the log parser
                logger = logging.getLogger('logparser')
                logger_effectiveLevel = logger.getEffectiveLevel()
                logger.setLevel(logging.WARN)
                test_log = newlogparser.parse_log(logfilehandle)
                test_runs = test_log.convert(test_parameters['include_pass'])
            finally:
                logger.setLevel(logger_effectiveLevel)
                logfilehandle.close()
        else:
            lp = LogParser([logfilehandle.name],
                           es=False,
                           es_server=None,
                           includePass=True,
                           output_dir=None,
                           logger=self.logger,
                           harnessType=test_parameters['harness_type'])

            # Use logparser's parsers, but do not allow it to
            # submit data directly to elasticsearch.
            test_runs.append(lp.parseFiles())

        if test_parameters['es_server'] is None or test_parameters['rest_server'] is None:
            return

        # testgroup must match entry in autolog/js/Config.js:testNames
        # os        must match entry in autolog/js/Config.js:OSNames
        # platform  must match entry in autolog/js/Config.js:OSNames

        logfilename = None
        if test_parameters['submit_log']:
            logfilename = logfilehandle.name

        chunk_descriptor = ''
        if test_parameters['total_chunks'] > 1:
            chunk_descriptor = 's-%d' % test_parameters['this_chunk']

        testgroup_name = '%s%s' % (test_parameters['test_name'],
                                   chunk_descriptor)

        platform_name = self.phone_cfg['machinetype']

        self.loggerdeco.debug('testgroup_name = %s' % testgroup_name)

        testgroup = RESTfulAutologTestGroup(
            index=test_parameters['index'],
            testgroup=testgroup_name,
            os='android',
            platform=platform_name,
            harness=test_parameters['harness_type'],
            server=test_parameters['es_server'],
            restserver=test_parameters['rest_server'],
            machine=self.phone_cfg['phoneid'],
            logfile=logfilename)

        testgroup.set_primary_product(
            tree=test_parameters['tree'],
            buildtype='opt',
            buildid=test_parameters['buildid'],
            revision=test_parameters['revision'])

        for testdata in test_runs:

            if self.logger.getEffectiveLevel() == logging.DEBUG:
                self.loggerdeco.debug('Begin testdata')
                self.loggerdeco.debug(json.dumps(testdata, indent=4))
                self.loggerdeco.debug('End testdata')

            testgroup.add_test_suite(
                testsuite=testgroup_name,
                cmdline=test_parameters['cmdline'],
                passed=testdata.get('passed', None),
                failed=testdata.get('failed', None),
                todo=testdata.get('todo', None))

            for t in testdata.get('failures', {}):
                test = t["test"]
                for f in t["failures"]:
                    text = f["text"]
                    status = f["status"]
                    testgroup.add_test_failure(test=test,
                                               text=text,
                                               status=status)

            # Submitting passing tests not supported via REST API
            if test_parameters['include_pass']:
                for t in testdata.get('passes', {}):
                    test = t["test"]
                    duration = None
                    if "duration" in t:
                        duration = t["duration"]
                    testgroup.add_test_pass(test=test,
                                            duration=duration)

        testgroup.submit()
コード例 #17
0
    def process_test_log(self, test_parameters, logfilehandle):

        test_log = None
        test_runs = []

        if test_parameters['use_newparser']:
            logfilehandle.close()
            logfilehandle = open(logfilehandle.name)
            try:
                # Turn off verbose logging for the log parser
                logger = logging.getLogger('logparser')
                logger_effectiveLevel = logger.getEffectiveLevel()
                logger.setLevel(logging.WARN)
                test_log = newlogparser.parse_log(logfilehandle)
                test_runs = test_log.convert(test_parameters['include_pass'])
            finally:
                logger.setLevel(logger_effectiveLevel)
                logfilehandle.close()
        else:
            lp = LogParser([logfilehandle.name],
                           es=False,
                           es_server=None,
                           includePass=True,
                           output_dir=None,
                           logger=self.logger,
                           harnessType=test_parameters['harness_type'])

            # Use logparser's parsers, but do not allow it to
            # submit data directly to elasticsearch.
            test_runs.append(lp.parseFiles())

        if test_parameters['es_server'] is None or test_parameters[
                'rest_server'] is None:
            return

        # testgroup must match entry in autolog/js/Config.js:testNames
        # os        must match entry in autolog/js/Config.js:OSNames
        # platform  must match entry in autolog/js/Config.js:OSNames

        logfilename = None
        if test_parameters['submit_log']:
            logfilename = logfilehandle.name

        chunk_descriptor = ''
        if test_parameters['total_chunks'] > 1:
            chunk_descriptor = 's-%d' % test_parameters['this_chunk']

        testgroup_name = '%s%s' % (test_parameters['test_name'],
                                   chunk_descriptor)

        platform_name = self.phone_cfg['machinetype']

        self.loggerdeco.debug('testgroup_name = %s' % testgroup_name)

        testgroup = RESTfulAutologTestGroup(
            index=test_parameters['index'],
            testgroup=testgroup_name,
            os='android',
            platform=platform_name,
            harness=test_parameters['harness_type'],
            server=test_parameters['es_server'],
            restserver=test_parameters['rest_server'],
            machine=self.phone_cfg['phoneid'],
            logfile=logfilename)

        testgroup.set_primary_product(tree=test_parameters['tree'],
                                      buildtype='opt',
                                      buildid=test_parameters['buildid'],
                                      revision=test_parameters['revision'])

        for testdata in test_runs:

            if self.logger.getEffectiveLevel() == logging.DEBUG:
                self.loggerdeco.debug('Begin testdata')
                self.loggerdeco.debug(json.dumps(testdata, indent=4))
                self.loggerdeco.debug('End testdata')

            testgroup.add_test_suite(testsuite=testgroup_name,
                                     cmdline=test_parameters['cmdline'],
                                     passed=testdata.get('passed', None),
                                     failed=testdata.get('failed', None),
                                     todo=testdata.get('todo', None))

            for t in testdata.get('failures', {}):
                test = t["test"]
                for f in t["failures"]:
                    text = f["text"]
                    status = f["status"]
                    testgroup.add_test_failure(test=test,
                                               text=text,
                                               status=status)

            # Submitting passing tests not supported via REST API
            if test_parameters['include_pass']:
                for t in testdata.get('passes', {}):
                    test = t["test"]
                    duration = None
                    if "duration" in t:
                        duration = t["duration"]
                    testgroup.add_test_pass(test=test, duration=duration)

        testgroup.submit()