Пример #1
0
def main():
    log_file = "automated_test_result.txt"
    summary_file = "automated_test_summary.txt"

    parser = argparse.ArgumentParser(description='pyOCD automated testing')
    parser.add_argument('-d',
                        '--debug',
                        action="store_true",
                        help='Enable debug logging')
    args = parser.parse_args()

    # Setup logging
    if os.path.exists(log_file):
        os.remove(log_file)
    level = logging.DEBUG if args.debug else logging.INFO
    logging.basicConfig(level=level)
    logger = Logger(log_file)
    sys.stdout = logger
    sys.stderr = logger

    test_list = []
    board_list = []
    result_list = []

    # Put together list of tests
    test = Test("Basic Test", lambda board: basic_test(board, None))
    test_list.append(test)
    test_list.append(GdbServerJsonTest())
    test_list.append(ConnectTest())
    test_list.append(SpeedTest())
    test_list.append(CortexTest())
    test_list.append(FlashTest())
    test_list.append(GdbTest())

    # Put together list of boards to test
    board_list = MbedBoard.getAllConnectedBoards(close=True, blocking=False)

    start = time()
    for board in board_list:
        print("--------------------------")
        print("TESTING BOARD %s" % board.getUniqueID())
        print("--------------------------")
        for test in test_list:
            test_start = time()
            result = test.run(board)
            test_stop = time()
            result.time = test_stop - test_start
            result_list.append(result)
    stop = time()
    test_time = (stop - start)

    print_summary(test_list, result_list, test_time)
    with open(summary_file, "w") as output_file:
        print_summary(test_list, result_list, test_time, output_file)

    exit_val = 0 if Test.all_tests_pass(result_list) else -1
    exit(exit_val)
Пример #2
0
def print_summary(test_list, result_list, test_time, output_file=None):
    for test in test_list:
        test.print_perf_info(result_list, output_file=output_file)

    Test.print_results(result_list, output_file=output_file)
    print("", file=output_file)
    print("Test Time: %.3f" % test_time, file=output_file)
    if Test.all_tests_pass(result_list):
        print("All tests passed", file=output_file)
    else:
        print("One or more tests has failed!", file=output_file)
Пример #3
0
def main():
    log_file = "automated_test_result.txt"
    summary_file = "automated_test_summary.txt"

    parser = argparse.ArgumentParser(description='pyOCD automated testing')
    parser.add_argument('-d', '--debug',
                        action="store_true", help='Enable debug logging')
    args = parser.parse_args()

    # Setup logging
    if os.path.exists(log_file):
        os.remove(log_file)
    level = logging.DEBUG if args.debug else logging.INFO
    logging.basicConfig(level=level)
    logger = Logger(log_file)
    sys.stdout = logger
    sys.stderr = logger

    test_list = []
    board_list = []
    result_list = []

    # Put together list of tests
    test = Test("Basic Test", lambda board: basic_test(board, None))
    test_list.append(test)
    test_list.append(GdbServerJsonTest())
    test_list.append(SpeedTest())
    test_list.append(CortexTest())
    test_list.append(FlashTest())
    test_list.append(GdbTest())

    # Put together list of boards to test
    board_list = MbedBoard.getAllConnectedBoards(close=True, blocking=False)

    start = time()
    for board in board_list:
        print("--------------------------")
        print("TESTING BOARD %s" % board.getUniqueID())
        print("--------------------------")
        for test in test_list:
            test_start = time()
            result = test.run(board)
            test_stop = time()
            result.time = test_stop - test_start
            result_list.append(result)
    stop = time()
    test_time = (stop - start)

    print_summary(test_list, result_list, test_time)
    with open(summary_file, "wb") as output_file:
        print_summary(test_list, result_list, test_time, output_file)

    exit_val = 0 if Test.all_tests_pass(result_list) else -1
    exit(exit_val)
Пример #4
0
def print_summary(test_list, result_list, test_time, output_file=None):
    for test in test_list:
        test.print_perf_info(result_list, output_file=output_file)

    Test.print_results(result_list, output_file=output_file)
    print("", file=output_file)
    print("Test Time: %s" % test_time, file=output_file)
    if Test.all_tests_pass(result_list):
        print("All tests passed", file=output_file)
    else:
        print("One or more tests has failed!", file=output_file)
Пример #5
0
def main():
    parser = argparse.ArgumentParser(description='pyOCD automated testing')
    parser.add_argument('-d', '--debug', action="store_true", help='Enable debug logging')
    parser.add_argument('-q', '--quiet', action="store_true", help='Hide test progress for 1 job')
    parser.add_argument('-j', '--jobs', action="store", default=1, type=int, metavar="JOBS",
        help='Set number of concurrent board tests (default is 1)')
    parser.add_argument('-b', '--board', action="append", metavar="ID", help="Limit testing to boards with specified unique IDs. Multiple boards can be listed.")
    args = parser.parse_args()
    
    # Allow CI to override the number of concurrent jobs.
    if 'CI_JOBS' in os.environ:
        args.jobs = int(os.environ['CI_JOBS'])
    
    # Disable multiple jobs on macOS prior to Python 3.4. By default, multiprocessing uses
    # fork() on Unix, which doesn't work on the Mac because CoreFoundation requires exec()
    # to be used in order to init correctly (CoreFoundation is used in hidapi). Only on Python
    # version 3.4+ is the multiprocessing.set_start_method() API available that lets us
    # switch to the 'spawn' method, i.e. exec().
    if args.jobs > 1 and sys.platform.startswith('darwin') and sys.version_info[0:2] < (3, 4):
        print("WARNING: Cannot support multiple jobs on macOS prior to Python 3.4. Forcing 1 job.")
        args.jobs = 1

    # Setup logging based on concurrency and quiet option.
    level = logging.DEBUG if args.debug else logging.INFO
    if args.jobs == 1 and not args.quiet:
        log_file = LOG_FILE_TEMPLATE.format(get_env_file_name())
        # Create common log file.
        if os.path.exists(log_file):
            os.remove(log_file)
        logToConsole = True
        commonLogFile = open(log_file, "a")
    else:
        logToConsole = False
        commonLogFile = None

    board_list = []
    result_list = []

    # Put together list of boards to test
    board_list = ConnectHelper.get_all_connected_probes(blocking=False)
    board_id_list = sorted(b.unique_id for b in board_list)
    
    # Filter boards.
    if args.board:
        board_id_list = [b for b in board_id_list if any(c for c in args.board if c.lower() in b.lower())]

    # If only 1 job was requested, don't bother spawning processes.
    start = time()
    if args.jobs == 1:
        for n, board_id in enumerate(board_id_list):
            result_list += test_board(board_id, n, level, logToConsole, commonLogFile)
    else:
        # Create a pool of processes to run tests.
        try:
            pool = mp.Pool(args.jobs)
            
            # Issue board test job to process pool.
            async_results = [pool.apply_async(test_board, (board_id, n, level, logToConsole, commonLogFile))
                             for n, board_id in enumerate(board_id_list)]
            
            # Gather results.
            for r in async_results:
                result_list += r.get(timeout=JOB_TIMEOUT)
        finally:
            pool.close()
            pool.join()
    stop = time()
    test_time = (stop - start)

    print_summary(test_list, result_list, test_time)
    summary_file = SUMMARY_FILE_TEMPLATE.format(get_env_file_name())
    with open(summary_file, "w") as output_file:
        print_summary(test_list, result_list, test_time, output_file)
    generate_xml_results(result_list)
    
    exit_val = 0 if Test.all_tests_pass(result_list) else -1
    exit(exit_val)
Пример #6
0
    # Put together list of boards to test
    board_list = MbedBoard.getAllConnectedBoards(close=True, blocking=False)

    start = time()
    for board in board_list:
        print("--------------------------")
        print("TESTING BOARD %s" % board.getUniqueID())
        print("--------------------------")
        for test in test_list:
            test_start = time()
            result = test.run(board)
            test_stop = time()
            result.time = test_stop - test_start
            result_list.append(result)
    stop = time()

    for test in test_list:
        test.print_perf_info(result_list)

    Test.print_results(result_list)
    print("")
    print("Test Time: %s" % (stop - start))
    if Test.all_tests_pass(result_list):
        print("All tests passed")
        exit(0)
    else:
        print("One or more tests has failed!")
        exit(-1)

    #TODO - check if any threads are still running?
Пример #7
0
def main():
    parser = argparse.ArgumentParser(description='pyOCD automated testing')
    parser.add_argument('-d', '--debug', action="store_true", help='Enable debug logging')
    parser.add_argument('-q', '--quiet', action="store_true", help='Hide test progress for 1 job')
    parser.add_argument('-j', '--jobs', action="store", default=1, type=int, metavar="JOBS",
        help='Set number of concurrent board tests (default is 1)')
    args = parser.parse_args()
    
    # Force jobs to 1 when running under CI until concurrency issues with enumerating boards are
    # solved. Specifically, the connect test has intermittently failed to open boards on Linux and
    # Win7. This is only done under CI, and in this script, to make testing concurrent runs easy.
    if 'CI_TEST' in os.environ:
        args.jobs = 1
    
    # Disable multiple jobs on macOS prior to Python 3.4. By default, multiprocessing uses
    # fork() on Unix, which doesn't work on the Mac because CoreFoundation requires exec()
    # to be used in order to init correctly (CoreFoundation is used in hidapi). Only on Python
    # version 3.4+ is the multiprocessing.set_start_method() API available that lets us
    # switch to the 'spawn' method, i.e. exec().
    if args.jobs > 1 and sys.platform.startswith('darwin') and sys.version_info[0:2] < (3, 4):
        print("WARNING: Cannot support multiple jobs on macOS prior to Python 3.4. Forcing 1 job.")
        args.jobs = 1

    # Setup logging based on concurrency and quiet option.
    level = logging.DEBUG if args.debug else logging.INFO
    if args.jobs == 1 and not args.quiet:
        # Create common log file.
        if os.path.exists(LOG_FILE):
            os.remove(LOG_FILE)
        logToConsole = True
        commonLogFile = open(LOG_FILE, "a")
    else:
        logToConsole = False
        commonLogFile = None

    board_list = []
    result_list = []

    # Put together list of boards to test
    board_list = MbedBoard.getAllConnectedBoards(close=True, blocking=False)
    board_id_list = sorted(b.getUniqueID() for b in board_list)

    # If only 1 job was requested, don't bother spawning processes.
    start = time()
    if args.jobs == 1:
        for n, board_id in enumerate(board_id_list):
            result_list += test_board(board_id, n, level, logToConsole, commonLogFile)
    else:
        # Create a pool of processes to run tests.
        try:
            pool = mp.Pool(args.jobs)
            
            # Issue board test job to process pool.
            async_results = [pool.apply_async(test_board, (board_id, n, level, logToConsole, commonLogFile))
                             for n, board_id in enumerate(board_id_list)]
            
            # Gather results.
            for r in async_results:
                result_list += r.get(timeout=JOB_TIMEOUT)
        finally:
            pool.close()
            pool.join()
    stop = time()
    test_time = (stop - start)

    print_summary(test_list, result_list, test_time)
    with open(SUMMARY_FILE, "w") as output_file:
        print_summary(test_list, result_list, test_time, output_file)
    generate_xml_results(result_list)
    
    exit_val = 0 if Test.all_tests_pass(result_list) else -1
    exit(exit_val)
Пример #8
0
def main():
    parser = argparse.ArgumentParser(description='pyOCD automated testing')
    parser.add_argument('-d', '--debug', action="store_true", help='Enable debug logging')
    parser.add_argument('-q', '--quiet', action="store_true", help='Hide test progress for 1 job')
    parser.add_argument('-j', '--jobs', action="store", default=1, type=int, metavar="JOBS",
        help='Set number of concurrent board tests (default is 1)')
    parser.add_argument('-b', '--board', action="append", metavar="ID", help="Limit testing to boards with specified unique IDs. Multiple boards can be listed.")
    args = parser.parse_args()
    
    # Force jobs to 1 when running under CI until concurrency issues with enumerating boards are
    # solved. Specifically, the connect test has intermittently failed to open boards on Linux and
    # Win7. This is only done under CI, and in this script, to make testing concurrent runs easy.
    if 'CI_TEST' in os.environ:
        args.jobs = 1
    
    # Disable multiple jobs on macOS prior to Python 3.4. By default, multiprocessing uses
    # fork() on Unix, which doesn't work on the Mac because CoreFoundation requires exec()
    # to be used in order to init correctly (CoreFoundation is used in hidapi). Only on Python
    # version 3.4+ is the multiprocessing.set_start_method() API available that lets us
    # switch to the 'spawn' method, i.e. exec().
    if args.jobs > 1 and sys.platform.startswith('darwin') and sys.version_info[0:2] < (3, 4):
        print("WARNING: Cannot support multiple jobs on macOS prior to Python 3.4. Forcing 1 job.")
        args.jobs = 1

    # Setup logging based on concurrency and quiet option.
    level = logging.DEBUG if args.debug else logging.INFO
    if args.jobs == 1 and not args.quiet:
        # Create common log file.
        if os.path.exists(LOG_FILE):
            os.remove(LOG_FILE)
        logToConsole = True
        commonLogFile = open(LOG_FILE, "a")
    else:
        logToConsole = False
        commonLogFile = None

    board_list = []
    result_list = []

    # Put together list of boards to test
    board_list = ConnectHelper.get_all_connected_probes(blocking=False)
    board_id_list = sorted(b.unique_id for b in board_list)
    
    # Filter boards.
    if args.board:
        board_id_list = [b for b in board_id_list if any(c for c in args.board if c.lower() in b.lower())]

    # If only 1 job was requested, don't bother spawning processes.
    start = time()
    if args.jobs == 1:
        for n, board_id in enumerate(board_id_list):
            result_list += test_board(board_id, n, level, logToConsole, commonLogFile)
    else:
        # Create a pool of processes to run tests.
        try:
            pool = mp.Pool(args.jobs)
            
            # Issue board test job to process pool.
            async_results = [pool.apply_async(test_board, (board_id, n, level, logToConsole, commonLogFile))
                             for n, board_id in enumerate(board_id_list)]
            
            # Gather results.
            for r in async_results:
                result_list += r.get(timeout=JOB_TIMEOUT)
        finally:
            pool.close()
            pool.join()
    stop = time()
    test_time = (stop - start)

    print_summary(test_list, result_list, test_time)
    with open(SUMMARY_FILE, "w") as output_file:
        print_summary(test_list, result_list, test_time, output_file)
    generate_xml_results(result_list)
    
    exit_val = 0 if Test.all_tests_pass(result_list) else -1
    exit(exit_val)