Exemple #1
0
    def print_aggregated(self, module_name, elem, failures_to_print):
        tests_executed = len(elem.findall('.//Test'))
        tests_passed = len(elem.findall('.//Test[@result="pass"]'))
        tests_failed = len(elem.findall('.//Test[@result="fail"]'))

        result = '%s_executed pass %s' % (module_name, str(tests_executed))
        py_test_lib.add_result(self.result_output_file, result)

        result = '%s_passed pass %s' % (module_name, str(tests_passed))
        py_test_lib.add_result(self.result_output_file, result)

        failed_result = 'pass'
        if tests_failed > 0:
            failed_result = 'fail'
        result = '%s_failed %s %s' % (
            module_name,
            failed_result,
            str(tests_failed),
        )
        py_test_lib.add_result(self.result_output_file, result)

        # output result to show if the module is done or not
        tests_done = elem.get('done', 'false')
        if tests_done == 'false':
            result = '%s_done fail' % module_name
        else:
            result = '%s_done pass' % module_name
        py_test_lib.add_result(self.result_output_file, result)

        Result = collections.namedtuple(
            'Result', ['num_printed_failures', 'failures_skipped'])

        if failures_to_print == 0:
            return Result(0, False)

        # print failed test cases for debug
        num_printed_failures = 0
        test_cases = elem.findall('.//TestCase')
        for test_case in test_cases:
            failed_tests = test_case.findall('.//Test[@result="fail"]')
            for failed_test in failed_tests:
                if num_printed_failures == failures_to_print:
                    return Result(num_printed_failures, True)
                test_name = '%s/%s.%s' % (
                    module_name,
                    test_case.get("name"),
                    failed_test.get("name"),
                )
                failures = failed_test.findall('.//Failure')
                failure_msg = ''
                for failure in failures:
                    failure_msg = '%s \n %s' % (
                        failure_msg,
                        failure.get('message'),
                    )

                self.logger.info('%s %s' % (test_name, failure_msg.strip()))
                num_printed_failures += 1

        return Result(num_printed_failures, False)
 def print_atomic(self, module_name, elem):
     test_cases = elem.findall('.//TestCase')
     for test_case in test_cases:
         tests = test_case.findall('.//Test')
         for atomic_test in tests:
             atomic_test_result = atomic_test.get("result")
             atomic_test_name = "%s/%s.%s" % (
                 module_name,
                 test_case.get("name"),
                 atomic_test.get("name"),
             )
             py_test_lib.add_result(
                 self.result_output_file,
                 "%s %s" % (atomic_test_name, atomic_test_result),
             )
Exemple #3
0
 def print_atomic(self, module_name, elem):
     test_cases = elem.findall('.//TestCase')
     for test_case in test_cases:
         tests = test_case.findall('.//Test')
         for atomic_test in tests:
             atomic_test_result = atomic_test.get("result")
             atomic_test_name = "%s/%s.%s" % (
                 module_name,
                 test_case.get("name"),
                 atomic_test.get("name"),
             )
             py_test_lib.add_result(
                 self.result_output_file,
                 "%s %s" % (atomic_test_name, atomic_test_result),
             )
Exemple #4
0
    while loop < 20:
        r = requests.get(url, headers=headers)
        resp = yaml.load(r.text)
        currentsha_on_server = resp.get("deviceImage").get("image").get(
            "hash").get("sha256")
        if currentsha_on_server == sha:
            return 0
        loop = loop + 1
        time.sleep(30)
        if loop == 10:
            print "FAIL: Installed sha on device did not match"
            return -1


if match_sha_on_server(args.installed_sha) == 0:
    py_test_lib.add_result(RESULT_FILE,
                           "installed-device-sha-match-server pass")
    r = requests.put(url, data=data, headers=headers)
    if match_sha_on_server(args.update_sha) == 0:
        py_test_lib.add_result(RESULT_FILE,
                               "ota-update-to-%s pass" % args.update_sha)
        print "PASS: %s updated to %s successfully" % (args.devicename,
                                                       args.update_sha)
    else:
        py_test_lib.add_result(RESULT_FILE,
                               "ota-update-to-%s fail" % args.update_sha)
        print "FAIL: %s update to %s failed" % (args.devicename,
                                                args.update_sha)
else:
    py_test_lib.add_result(RESULT_FILE,
                           "installed-device-sha-match-server fail")
    print "FAIL: Insalled device sha to %s mismatched on the server" % args.devicename
                    'adb device %s is not available and reconnection attempts failed. Aborting.'
                    % device.serial_or_address)

if devices_to_detect:
    cleanup_and_exit(
        1,
        'TradeFed did not detect all available devices after %s retries. Aborting.'
        % tradefed_start_retry_count)

logger.info('Starting TradeFed shell test.')
try:
    child.expect(prompt, timeout=60)
    child.sendline(args.TEST_PARAMS)
except pexpect.TIMEOUT:
    result = 'lunch-tf-shell fail'
    py_test_lib.add_result(RESULT_FILE, result)

retry_check = RetryCheck(args.MAX_NUM_RUNS, args.RUNS_IF_UNCHANGED)

# Loop while TradeFed is running.
# This loop will rerun TradeFed if requested, until the number of failures stabilizes or a maximum
# number of retries is reached.
# Meanwhile, try to keep all devices accessible. For remote devices, use handshakes to inform remote
# workers that their locally connected device needs to be reset.
# The worker host side of the LAVA MultiNode messages is implemented in
# wait-and-keep-local-device-accessible.yaml
fail_to_complete = False
# Assuming TradeFed is started from a clean environment, the first run will have the id 0
# Each retry gets a new session id.
tradefed_session_id = 0
result_summary = None
Exemple #6
0
def result_parser(xml_file, result_format):
    etree_file = open(xml_file, 'rb')
    etree_content = etree_file.read()
    rx = re.compile("&#([0-9]+);|&#x([0-9a-fA-F]+);")
    endpos = len(etree_content)
    pos = 0
    while pos < endpos:
        # remove characters that don't conform to XML spec
        m = rx.search(etree_content, pos)
        if not m:
            break
        mstart, mend = m.span()
        target = m.group(1)
        if target:
            num = int(target)
        else:
            num = int(m.group(2), 16)
        # #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
        if not (num in (0x9, 0xA, 0xD) or 0x20 <= num <= 0xD7FF
                or 0xE000 <= num <= 0xFFFD or 0x10000 <= num <= 0x10FFFF):
            etree_content = etree_content[:mstart] + etree_content[mend:]
            endpos = len(etree_content)
            # next time search again from the same position as this time
            # as the detected pattern was removed here
            pos = mstart
        else:
            # continue from the end of this match
            pos = mend

    try:
        root = ET.fromstring(etree_content)
    except ET.ParseError as e:
        logger.error('xml.etree.ElementTree.ParseError: %s' % e)
        logger.info('Please Check %s manually' % xml_file)
        sys.exit(1)
    logger.info('Test modules in %s: %s' %
                (xml_file, str(len(root.findall('Module')))))
    failures_count = 0
    for elem in root.findall('Module'):
        # Naming: Module Name + Test Case Name + Test Name
        if 'abi' in elem.attrib.keys():
            module_name = '.'.join([elem.attrib['abi'], elem.attrib['name']])
        else:
            module_name = elem.attrib['name']

        if result_format == AGGREGATED:
            tests_executed = len(elem.findall('.//Test'))
            tests_passed = len(elem.findall('.//Test[@result="pass"]'))
            tests_failed = len(elem.findall('.//Test[@result="fail"]'))

            result = '%s_executed pass %s' % (module_name, str(tests_executed))
            py_test_lib.add_result(RESULT_FILE, result)

            result = '%s_passed pass %s' % (module_name, str(tests_passed))
            py_test_lib.add_result(RESULT_FILE, result)

            failed_result = 'pass'
            if tests_failed > 0:
                failed_result = 'fail'
            result = '%s_failed %s %s' % (module_name, failed_result,
                                          str(tests_failed))
            py_test_lib.add_result(RESULT_FILE, result)

            # output result to show if the module is done or not
            tests_done = elem.get('done', 'false')
            if tests_done == 'false':
                result = '%s_done fail' % module_name
            else:
                result = '%s_done pass' % module_name
            py_test_lib.add_result(RESULT_FILE, result)

            if args.FAILURES_PRINTED > 0 and failures_count < args.FAILURES_PRINTED:
                # print failed test cases for debug
                test_cases = elem.findall('.//TestCase')
                for test_case in test_cases:
                    failed_tests = test_case.findall('.//Test[@result="fail"]')
                    for failed_test in failed_tests:
                        test_name = '%s/%s.%s' % (module_name,
                                                  test_case.get("name"),
                                                  failed_test.get("name"))
                        failures = failed_test.findall('.//Failure')
                        failure_msg = ''
                        for failure in failures:
                            failure_msg = '%s \n %s' % (failure_msg,
                                                        failure.get('message'))

                        logger.info('%s %s' % (test_name, failure_msg.strip()))
                        failures_count = failures_count + 1
                        if failures_count > args.FAILURES_PRINTED:
                            logger.info('There are more than %d test cases '
                                        'failed, the output for the rest '
                                        'failed test cases will be '
                                        'skipped.' % (args.FAILURES_PRINTED))
                            # break the for loop of failed_tests
                            break
                    if failures_count > args.FAILURES_PRINTED:
                        # break the for loop of test_cases
                        break

        if result_format == ATOMIC:
            test_cases = elem.findall('.//TestCase')
            for test_case in test_cases:
                tests = test_case.findall('.//Test')
                for atomic_test in tests:
                    atomic_test_result = atomic_test.get("result")
                    atomic_test_name = "%s/%s.%s" % (module_name,
                                                     test_case.get("name"),
                                                     atomic_test.get("name"))
                    py_test_lib.add_result(
                        RESULT_FILE,
                        "%s %s" % (atomic_test_name, atomic_test_result))
def result_parser(xml_file, result_format):
    etree_file = open(xml_file, 'rb')
    etree_content = etree_file.read()
    rx = re.compile("&#([0-9]+);|&#x([0-9a-fA-F]+);")
    endpos = len(etree_content)
    pos = 0
    while pos < endpos:
        # remove characters that don't conform to XML spec
        m = rx.search(etree_content, pos)
        if not m:
            break
        mstart, mend = m.span()
        target = m.group(1)
        if target:
            num = int(target)
        else:
            num = int(m.group(2), 16)
        # #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
        if not (num in (0x9, 0xA, 0xD) or 0x20 <= num <= 0xD7FF
                or 0xE000 <= num <= 0xFFFD or 0x10000 <= num <= 0x10FFFF):
            etree_content = etree_content[:mstart] + etree_content[mend:]
            endpos = len(etree_content)
        pos = mend

    try:
        root = ET.fromstring(etree_content)
    except ET.ParseError as e:
        logger.error('xml.etree.ElementTree.ParseError: %s' % e)
        logger.info('Please Check %s manually' % xml_file)
        sys.exit(1)
    logger.info('Test modules in %s: %s' %
                (xml_file, str(len(root.findall('Module')))))
    for elem in root.findall('Module'):
        # Naming: Module Name + Test Case Name + Test Name
        if 'abi' in elem.attrib.keys():
            module_name = '.'.join([elem.attrib['abi'], elem.attrib['name']])
        else:
            module_name = elem.attrib['name']

        if result_format == AGGREGATED:
            tests_executed = len(elem.findall('.//Test'))
            tests_passed = len(elem.findall('.//Test[@result="pass"]'))
            tests_failed = len(elem.findall('.//Test[@result="fail"]'))

            result = '%s_executed pass %s' % (module_name, str(tests_executed))
            py_test_lib.add_result(RESULT_FILE, result)

            result = '%s_passed pass %s' % (module_name, str(tests_passed))
            py_test_lib.add_result(RESULT_FILE, result)

            failed_result = 'pass'
            if tests_failed > 0:
                failed_result = 'fail'
            result = '%s_failed %s %s' % (module_name, failed_result,
                                          str(tests_failed))
            py_test_lib.add_result(RESULT_FILE, result)

        if result_format == ATOMIC:
            test_cases = elem.findall('.//TestCase')
            for test_case in test_cases:
                tests = test_case.findall('.//Test')
                for atomic_test in tests:
                    atomic_test_result = atomic_test.get("result")
                    atomic_test_name = "%s/%s.%s" % (module_name,
                                                     test_case.get("name"),
                                                     atomic_test.get("name"))
                    py_test_lib.add_result(
                        RESULT_FILE,
                        "%s %s" % (atomic_test_name, atomic_test_result))
    command = "android-cts/tools/cts-tradefed run commandAndExit " + args.TEST_PARAMS
if args.TEST_PATH == "android-vts":
    os.environ["VTS_ROOT"] = os.getcwd()
    command = "android-vts/tools/vts-tradefed run commandAndExit " + args.TEST_PARAMS

if command is None:
    logger.error("Not supported path: %s" % args.TEST_PATH)
    sys.exit(1)

child = subprocess.Popen(shlex.split(command),
                         stderr=subprocess.STDOUT,
                         stdout=tradefed_stdout)
fail_to_complete = child.wait()

if fail_to_complete:
    py_test_lib.add_result(RESULT_FILE, "tradefed-test-run fail")
else:
    py_test_lib.add_result(RESULT_FILE, "tradefed-test-run pass")

logger.info("Tradefed test finished")
tradefed_stdout.close()
tradefed_logcat.kill()
tradefed_logcat_out.close()

# Locate and parse test result.
result_dir = "%s/results" % args.TEST_PATH
test_result = "test_result.xml"
if os.path.exists(result_dir) and os.path.isdir(result_dir):
    for root, dirs, files in os.walk(result_dir):
        for name in files:
            if name == test_result:
    def print_aggregated(self, module_name, elem, failures_to_print):
        tests_executed = len(elem.findall('.//Test'))
        tests_passed = len(elem.findall('.//Test[@result="pass"]'))
        tests_failed = len(elem.findall('.//Test[@result="fail"]'))

        result = '%s_executed pass %s' % (module_name, str(tests_executed))
        py_test_lib.add_result(self.result_output_file, result)

        result = '%s_passed pass %s' % (module_name, str(tests_passed))
        py_test_lib.add_result(self.result_output_file, result)

        failed_result = 'pass'
        if tests_failed > 0:
            failed_result = 'fail'
        result = '%s_failed %s %s' % (
            module_name,
            failed_result,
            str(tests_failed),
        )
        py_test_lib.add_result(self.result_output_file, result)

        # output result to show if the module is done or not
        tests_done = elem.get('done', 'false')
        if tests_done == 'false':
            result = '%s_done fail' % module_name
        else:
            result = '%s_done pass' % module_name
        py_test_lib.add_result(self.result_output_file, result)

        Result = collections.namedtuple(
            'Result', ['num_printed_failures', 'failures_skipped']
        )

        if failures_to_print == 0:
            return Result(0, False)

        # print failed test cases for debug
        num_printed_failures = 0
        test_cases = elem.findall('.//TestCase')
        for test_case in test_cases:
            failed_tests = test_case.findall('.//Test[@result="fail"]')
            for failed_test in failed_tests:
                if num_printed_failures == failures_to_print:
                    return Result(num_printed_failures, True)
                test_name = '%s/%s.%s' % (
                    module_name,
                    test_case.get("name"),
                    failed_test.get("name"),
                )
                failures = failed_test.findall('.//Failure')
                failure_msg = ''
                for failure in failures:
                    failure_msg = '%s \n %s' % (
                        failure_msg,
                        failure.get('message'),
                    )

                self.logger.info('%s %s' % (test_name, failure_msg.strip()))
                num_printed_failures += 1

        return Result(num_printed_failures, False)
def result_parser(xml_file, result_format):
    etree_file = open(xml_file, 'rb')
    etree_content = etree_file.read()
    rx = re.compile("&#([0-9]+);|&#x([0-9a-fA-F]+);")
    endpos = len(etree_content)
    pos = 0
    while pos < endpos:
        # remove characters that don't conform to XML spec
        m = rx.search(etree_content, pos)
        if not m:
            break
        mstart, mend = m.span()
        target = m.group(1)
        if target:
            num = int(target)
        else:
            num = int(m.group(2), 16)
        # #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
        if not(num in (0x9, 0xA, 0xD) or
                0x20 <= num <= 0xD7FF or
                0xE000 <= num <= 0xFFFD or
                0x10000 <= num <= 0x10FFFF):
            etree_content = etree_content[:mstart] + etree_content[mend:]
            endpos = len(etree_content)
        pos = mend

    try:
        root = ET.fromstring(etree_content)
    except ET.ParseError as e:
        logger.error('xml.etree.ElementTree.ParseError: %s' % e)
        logger.info('Please Check %s manually' % xml_file)
        sys.exit(1)
    logger.info('Test modules in %s: %s'
                % (xml_file, str(len(root.findall('Module')))))
    failures_count = 0
    for elem in root.findall('Module'):
        # Naming: Module Name + Test Case Name + Test Name
        if 'abi' in elem.attrib.keys():
            module_name = '.'.join([elem.attrib['abi'], elem.attrib['name']])
        else:
            module_name = elem.attrib['name']

        if result_format == AGGREGATED:
            tests_executed = len(elem.findall('.//Test'))
            tests_passed = len(elem.findall('.//Test[@result="pass"]'))
            tests_failed = len(elem.findall('.//Test[@result="fail"]'))

            result = '%s_executed pass %s' % (module_name, str(tests_executed))
            py_test_lib.add_result(RESULT_FILE, result)

            result = '%s_passed pass %s' % (module_name, str(tests_passed))
            py_test_lib.add_result(RESULT_FILE, result)

            failed_result = 'pass'
            if tests_failed > 0:
                failed_result = 'fail'
            result = '%s_failed %s %s' % (module_name, failed_result,
                                          str(tests_failed))
            py_test_lib.add_result(RESULT_FILE, result)

            # output result to show if the module is done or not
            tests_done = elem.get('done', 'false')
            if tests_done == 'false':
                result = '%s_done fail' % module_name
            else:
                result = '%s_done pass' % module_name
            py_test_lib.add_result(RESULT_FILE, result)

            if args.FAILURES_PRINTED > 0 and failures_count < args.FAILURES_PRINTED:
                # print failed test cases for debug
                test_cases = elem.findall('.//TestCase')
                for test_case in test_cases:
                    failed_tests = test_case.findall('.//Test[@result="fail"]')
                    for failed_test in failed_tests:
                        test_name = '%s/%s.%s' % (module_name,
                                                  test_case.get("name"),
                                                  failed_test.get("name"))
                        failures = failed_test.findall('.//Failure')
                        failure_msg = ''
                        for failure in failures:
                            failure_msg = '%s \n %s' % (failure_msg,
                                                        failure.get('message'))

                        logger.info('%s %s' % (test_name, failure_msg.strip()))
                        failures_count = failures_count + 1
                        if failures_count > args.FAILURES_PRINTED:
                            logger.info('There are more than %d test cases '
                                        'failed, the output for the rest '
                                        'failed test cases will be '
                                        'skipped.' % (args.FAILURES_PRINTED))
                            #break the for loop of failed_tests
                            break
                    if failures_count > args.FAILURES_PRINTED:
                        #break the for loop of test_cases
                        break

        if result_format == ATOMIC:
            test_cases = elem.findall('.//TestCase')
            for test_case in test_cases:
                tests = test_case.findall('.//Test')
                for atomic_test in tests:
                    atomic_test_result = atomic_test.get("result")
                    atomic_test_name = "%s/%s.%s" % (module_name,
                                                     test_case.get("name"),
                                                     atomic_test.get("name"))
                    py_test_lib.add_result(
                        RESULT_FILE, "%s %s" % (atomic_test_name,
                                                atomic_test_result))
Exemple #11
0
}
data = json.dumps({"image": {"hash": args.update_sha}})


def match_sha_on_server(sha):
    loop = 0
    while loop < 20:
        r = requests.get(url, headers=headers)
        resp = yaml.load(r.text)
        currentsha_on_server = resp.get("deviceImage").get("image").get("hash").get("sha256")
        if currentsha_on_server == sha:
            return 0
        loop = loop + 1
        time.sleep(30)
        if loop == 10:
            print "FAIL: Installed sha on device did not match"
            return -1

if match_sha_on_server(args.installed_sha) == 0:
    py_test_lib.add_result(RESULT_FILE, "installed-device-sha-match-server pass")
    r = requests.put(url, data=data, headers=headers)
    if match_sha_on_server(args.update_sha) == 0:
        py_test_lib.add_result(RESULT_FILE, "ota-update-to-%s pass" % args.update_sha)
        print "PASS: %s updated to %s successfully" % (args.devicename, args.update_sha)
    else:
        py_test_lib.add_result(RESULT_FILE, "ota-update-to-%s fail" % args.update_sha)
        print "FAIL: %s update to %s failed" % (args.devicename, args.update_sha)
else:
    py_test_lib.add_result(RESULT_FILE, "installed-device-sha-match-server fail")
    print "FAIL: Insalled device sha to %s mismatched on the server" % args.devicename