Example #1
0
  def parse(self):
    while True:
      try:
        os.kill(self.parent_pid, 0)
        logfile, harnessType = self.job_queue.get_nowait()
      except Empty:
        time.sleep(5)
        continue
      except OSError:
        sys.exit(0)

      self.logger.info('parsing %s' % logfile)
      if logfile == 'exit':
        break
      try:
        lp = LogParser([logfile],
                       es=self.es,
                       es_servers=self.es_servers,
                       includePass=self.include_pass,
                       output_dir=self.output_dir,
                       logger=self.logger,
                       harnessType=harnessType,
                      )
        lp.parseFiles()
      except Exception, inst:
        self.logger.exception(inst)
Example #2
0
    def process_test_log(self, logfilehandle):

        logfilehandle.close()

        # convert embedded \n into real newlines
        logfilehandle = open(self.unittest_logpath)
        logcontents = logfilehandle.read()
        logfilehandle.close()
        logcontents = re.sub(r'\\n', '\n', logcontents)
        logfilehandle = open(self.unittest_logpath, 'wb')
        logfilehandle.write(logcontents)
        logfilehandle.close()

        lp = LogParser([logfilehandle.name],
                       includePass=True,
                       output_dir=None,
                       logger=self.loggerdeco,
                       harnessType=self.parms['harness_type'])
        parsed_log = lp.parseFiles()
        if self.options.verbose:
            self.loggerdeco.debug('process_test_log: LogParser parsed log : %s' %
                                  json.dumps(parsed_log, indent=2))

        self.test_result.todo = parsed_log.get('todo', 0)
        self.test_result.passes = parsed_log.get('passes', [])
        failures = parsed_log.get('failures', [])
        if failures:
            for failure in failures:
                for test_failure in failure['failures']:
                    self.test_failure(failure['test'],
                                      test_failure['status'],
                                      test_failure['text'],
                                      PhoneTestResult.TESTFAILED)
        self.loggerdeco.debug('process_test_log: test_result: %s' %
                              json.dumps(self.test_result.__dict__, indent=2))
Example #3
0
    def process_test_log(self, test_parameters, logfilehandle):

        test_log = None
        test_runs = []

        if test_parameters['use_newparser']:
            logfilehandle.close()
            logfilehandle = open(logfilehandle.name)
            try:
                # Turn off verbose logging for the log parser
                logger = logging.getLogger('logparser')
                logger_effectiveLevel = logger.getEffectiveLevel()
                logger.setLevel(logging.WARN)
                test_log = newlogparser.parse_log(logfilehandle)
                test_runs = test_log.convert(test_parameters['include_pass'])
            finally:
                logger.setLevel(logger_effectiveLevel)
                logfilehandle.close()
        else:
            lp = LogParser([logfilehandle.name],
                           es=False,
                           es_server=None,
                           includePass=True,
                           output_dir=None,
                           logger=self.logger,
                           harnessType=test_parameters['harness_type'])

            # Use logparser's parsers, but do not allow it to
            # submit data directly to elasticsearch.
            test_runs.append(lp.parseFiles())

        if test_parameters['es_server'] is None or test_parameters[
                'rest_server'] is None:
            return

        # testgroup must match entry in autolog/js/Config.js:testNames
        # os        must match entry in autolog/js/Config.js:OSNames
        # platform  must match entry in autolog/js/Config.js:OSNames

        logfilename = None
        if test_parameters['submit_log']:
            logfilename = logfilehandle.name

        chunk_descriptor = ''
        if test_parameters['total_chunks'] > 1:
            chunk_descriptor = 's-%d' % test_parameters['this_chunk']

        testgroup_name = '%s%s' % (test_parameters['test_name'],
                                   chunk_descriptor)

        platform_name = self.phone_cfg['machinetype']

        self.loggerdeco.debug('testgroup_name = %s' % testgroup_name)

        testgroup = RESTfulAutologTestGroup(
            index=test_parameters['index'],
            testgroup=testgroup_name,
            os='android',
            platform=platform_name,
            harness=test_parameters['harness_type'],
            server=test_parameters['es_server'],
            restserver=test_parameters['rest_server'],
            machine=self.phone_cfg['phoneid'],
            logfile=logfilename)

        testgroup.set_primary_product(tree=test_parameters['tree'],
                                      buildtype='opt',
                                      buildid=test_parameters['buildid'],
                                      revision=test_parameters['revision'])

        for testdata in test_runs:

            if self.logger.getEffectiveLevel() == logging.DEBUG:
                self.loggerdeco.debug('Begin testdata')
                self.loggerdeco.debug(json.dumps(testdata, indent=4))
                self.loggerdeco.debug('End testdata')

            testgroup.add_test_suite(testsuite=testgroup_name,
                                     cmdline=test_parameters['cmdline'],
                                     passed=testdata.get('passed', None),
                                     failed=testdata.get('failed', None),
                                     todo=testdata.get('todo', None))

            for t in testdata.get('failures', {}):
                test = t["test"]
                for f in t["failures"]:
                    text = f["text"]
                    status = f["status"]
                    testgroup.add_test_failure(test=test,
                                               text=text,
                                               status=status)

            # Submitting passing tests not supported via REST API
            if test_parameters['include_pass']:
                for t in testdata.get('passes', {}):
                    test = t["test"]
                    duration = None
                    if "duration" in t:
                        duration = t["duration"]
                    testgroup.add_test_pass(test=test, duration=duration)

        testgroup.submit()
Example #4
0
def run(manifests, output_dir, args, post_to_autolog=False):
    args = args[:]
    log = mozlog.getLogger('REFTEST')

    # set up chunks in args list
    try:
        this_index = args.index("--this-chunk")
        this_chunk = int(args[this_index + 1])
        total_chunks = this_chunk
    except:
        this_index = len(args)
        this_chunk = 1
        args.append("--this-chunk")
        args.append("1")
        try:
            total_index = args.index("--total-chunks")
        except:
            total_index = len(args)
            args.append("--total-chunks")
            args.append(str(this_chunk))
        total_chunks = int(args[total_index + 1])

    b2g_path = args[args.index("--b2gpath") + 1]
    # symlink reftests so reftest server can serve them
    if not os.path.exists('tests'):
        gecko_path = os.path.join(b2g_path, 'gecko')
        os.symlink(gecko_path, 'tests')

    # get revision
    default = open(os.path.join(b2g_path, 'default.xml'), 'r')
    soup = BeautifulSoup(default.read())
    mc = soup.find_all('project', attrs={'name': 'mozilla-central'})[0]
    revision = mc['revision']

    with open(manifests, "r") as manifest_file:
        manifests = manifest_file.readlines()

    args.append('')
    for manifest in manifests:
        manifest = manifest.strip()
        if manifest[0] == '#':
            continue
        manifest_path = os.path.join('tests', 'layout', 'reftests', manifest)
        args[-1] = manifest_path

        for chunk in range(this_chunk, total_chunks + 1):
            args[this_index + 1] = str(chunk)
            log.info("Running with manifest '%s' and chunk '%s' of '%s'" %
                     (manifest_path, chunk, total_chunks))
            ret = runreftestb2g.main(args)
            log.info("Run finished with return value '%s'" % ret)
            sleep(5)

            if os.path.exists('reftest.log'):
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir)
                output_file = manifest.replace('/', '_').replace(
                    '.list', '%s_of_%s.log' % (chunk, total_chunks))
                log_file = os.path.join(output_dir, output_file)
                shutil.move('reftest.log', log_file)

                # send log file to autolog
                if post_to_autolog:
                    parser = LogParser([log_file], harnessType='reftest')
                    results = parser.parseFiles()
                    results['id'] = str(uuid.uuid1())
                    try:
                        autolog.post_to_autolog(results, 'reftests-%s' % chunk,
                                                revision, log_file, 'reftest')
                    except urllib2.HTTPError:
                        # autolog times out sometimes, try again
                        autolog.post_to_autolog(results, 'reftests-%s' % chunk,
                                                revision, log_file, 'reftest')

            else:
                log.error("No reftest.log! :(")

    log.info("Test Runs Completed")
Example #5
0
    def process_test_log(self, test_parameters, logfilehandle):

        test_log = None
        test_runs = []

        if test_parameters['use_newparser']:
            logfilehandle.close()
            logfilehandle = open(logfilehandle.name)
            try:
                # Turn off verbose logging for the log parser
                logger = logging.getLogger('logparser')
                logger_effectiveLevel = logger.getEffectiveLevel()
                logger.setLevel(logging.WARN)
                test_log = newlogparser.parse_log(logfilehandle)
                test_runs = test_log.convert(test_parameters['include_pass'])
            finally:
                logger.setLevel(logger_effectiveLevel)
                logfilehandle.close()
        else:
            lp = LogParser([logfilehandle.name],
                           es=False,
                           es_server=None,
                           includePass=True,
                           output_dir=None,
                           logger=self.logger,
                           harnessType=test_parameters['harness_type'])

            # Use logparser's parsers, but do not allow it to
            # submit data directly to elasticsearch.
            test_runs.append(lp.parseFiles())

        if test_parameters['es_server'] is None or test_parameters['rest_server'] is None:
            return

        # testgroup must match entry in autolog/js/Config.js:testNames
        # os        must match entry in autolog/js/Config.js:OSNames
        # platform  must match entry in autolog/js/Config.js:OSNames

        logfilename = None
        if test_parameters['submit_log']:
            logfilename = logfilehandle.name

        chunk_descriptor = ''
        if test_parameters['total_chunks'] > 1:
            chunk_descriptor = 's-%d' % test_parameters['this_chunk']

        testgroup_name = '%s%s' % (test_parameters['test_name'],
                                   chunk_descriptor)

        platform_name = self.phone_cfg['machinetype']

        self.loggerdeco.debug('testgroup_name = %s' % testgroup_name)

        testgroup = RESTfulAutologTestGroup(
            index=test_parameters['index'],
            testgroup=testgroup_name,
            os='android',
            platform=platform_name,
            harness=test_parameters['harness_type'],
            server=test_parameters['es_server'],
            restserver=test_parameters['rest_server'],
            machine=self.phone_cfg['phoneid'],
            logfile=logfilename)

        testgroup.set_primary_product(
            tree=test_parameters['tree'],
            buildtype='opt',
            buildid=test_parameters['buildid'],
            revision=test_parameters['revision'])

        for testdata in test_runs:

            if self.logger.getEffectiveLevel() == logging.DEBUG:
                self.loggerdeco.debug('Begin testdata')
                self.loggerdeco.debug(json.dumps(testdata, indent=4))
                self.loggerdeco.debug('End testdata')

            testgroup.add_test_suite(
                testsuite=testgroup_name,
                cmdline=test_parameters['cmdline'],
                passed=testdata.get('passed', None),
                failed=testdata.get('failed', None),
                todo=testdata.get('todo', None))

            for t in testdata.get('failures', {}):
                test = t["test"]
                for f in t["failures"]:
                    text = f["text"]
                    status = f["status"]
                    testgroup.add_test_failure(test=test,
                                               text=text,
                                               status=status)

            # Submitting passing tests not supported via REST API
            if test_parameters['include_pass']:
                for t in testdata.get('passes', {}):
                    test = t["test"]
                    duration = None
                    if "duration" in t:
                        duration = t["duration"]
                    testgroup.add_test_pass(test=test,
                                            duration=duration)

        testgroup.submit()
Example #6
0
def run(manifests, output_dir, args, post_to_autolog=False):
    args = args[:]
    log = mozlog.getLogger('REFTEST')

    # set up chunks in args list
    try:
        this_index = args.index("--this-chunk")
        this_chunk = int(args[this_index+1])
        total_chunks = this_chunk
    except:
        this_index = len(args)
        this_chunk = 1
        args.append("--this-chunk")
        args.append("1")
        try:
            total_index = args.index("--total-chunks")
        except:
            total_index = len(args)
            args.append("--total-chunks")
            args.append(str(this_chunk))
        total_chunks = int(args[total_index+1])

    b2g_path = args[args.index("--b2gpath")+1]
    # symlink reftests so reftest server can serve them
    if not os.path.exists('tests'):
        gecko_path = os.path.join(b2g_path, 'gecko')
        os.symlink(gecko_path, 'tests')
    
    # get revision
    default = open(os.path.join(b2g_path, 'default.xml'), 'r')
    soup = BeautifulSoup(default.read())
    mc = soup.find_all('project', attrs={'name':'mozilla-central'})[0]
    revision = mc['revision']
    
    with open(manifests, "r") as manifest_file:
        manifests = manifest_file.readlines()
    
    args.append('')
    for manifest in manifests:
        manifest = manifest.strip()
        if manifest[0] == '#':
            continue
        manifest_path = os.path.join('tests', 'layout', 'reftests', manifest)
        args[-1] = manifest_path

        for chunk in range(this_chunk, total_chunks + 1):
            args[this_index + 1] = str(chunk)
            log.info("Running with manifest '%s' and chunk '%s' of '%s'" % (manifest_path, chunk, total_chunks))
            ret = runreftestb2g.main(args)
            log.info("Run finished with return value '%s'" % ret)
            sleep(5)
        
            if os.path.exists('reftest.log'):
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir)
                output_file = manifest.replace('/', '_').replace('.list', '%s_of_%s.log' % (chunk, total_chunks))
                log_file = os.path.join(output_dir, output_file)
                shutil.move('reftest.log', log_file)
               
                # send log file to autolog
                if post_to_autolog:
                    parser = LogParser([log_file], harnessType='reftest')
                    results = parser.parseFiles()
                    results['id'] = str(uuid.uuid1())
                    try:
                        autolog.post_to_autolog(results,
                                                'reftests-%s' % chunk,
                                                revision,
                                                log_file,
                                                'reftest')
                    except urllib2.HTTPError:
                        # autolog times out sometimes, try again
                        autolog.post_to_autolog(results,
                                                'reftests-%s' % chunk,
                                                revision,
                                                log_file,
                                                'reftest')

            else:
                log.error("No reftest.log! :(")

    log.info("Test Runs Completed")