def test_find_tests_no_app_config(self, mock_prepare_toolchain, mock_scan_resources):
        """
        Test find_tests correctly deals with no app_config

        :param mock_prepare_toolchain: mock of function prepare_toolchain
        :param mock_scan_resources: mock of function scan_resources
        :return:
        """
        mock_scan_resources().inc_dirs.return_value = []

        find_tests(self.base_dir, self.target, self.toolchain_name)

        args = mock_prepare_toolchain.call_args
        self.assertTrue('app_config' in args[1],
                        "prepare_toolchain was not called with app_config")
        self.assertEqual(args[1]['app_config'], None,
                         "prepare_toolchain was called with an incorrect app_config")
Exemple #2
0
def test_find_tests_app_config(base_dir, target, toolchain_name, app_config):
    """
    Test find_tests for correct use of app_config

    :param base_dir: dummy value for the test base directory
    :param target: the target to "test" for
    :param toolchain_name: the toolchain to use for "testing"
    :param app_config: Application configuration parameter to find tests
    """
    set_targets_json_location()
    with patch('tools.test_api.scan_resources') as mock_scan_resources,\
         patch('tools.test_api.prepare_toolchain') as mock_prepare_toolchain:
        mock_scan_resources().inc_dirs.return_value = []

        find_tests(base_dir, target, toolchain_name, app_config=app_config)

        args = mock_prepare_toolchain.call_args
        assert 'app_config' in args[1],\
            "prepare_toolchain was not called with app_config"
        assert args[1]['app_config'] == app_config,\
            "prepare_toolchain was called with an incorrect app_config"
Exemple #3
0
def test_find_tests_app_config(base_dir, target, toolchain_name, app_config):
    """
    Test find_tests for correct use of app_config

    :param base_dir: dummy value for the test base directory
    :param target: the target to "test" for
    :param toolchain_name: the toolchain to use for "testing"
    :param app_config: Application configuration parameter to find tests
    """
    set_targets_json_location()
    with patch('tools.test_api.scan_resources') as mock_scan_resources,\
         patch('tools.test_api.prepare_toolchain') as mock_prepare_toolchain:
        mock_scan_resources().inc_dirs.return_value = []

        find_tests(base_dir, target, toolchain_name, app_config=app_config)

        args = mock_prepare_toolchain.call_args
        assert 'app_config' in args[1],\
            "prepare_toolchain was not called with app_config"
        assert args[1]['app_config'] == app_config,\
            "prepare_toolchain was called with an incorrect app_config"
def check_valid_mbed_os(test):
    """Check if the specified name is in all_os_tests
    args:
        test: string name to index all_os_tests
    returns: tuple of test_name and source location of test,
        as given by find_tests"""
    all_os_tests = find_tests(ROOT, "K64F", "ARM")
    if test in all_os_tests.keys():
        return (test, all_os_tests[test])
    else:
        supported = columnate([t for t in all_os_tests.keys()])
        raise ArgumentTypeError("Program with name '{0}' not found. "
                                "Supported tests are: \n{1}".format(test,supported))
def check_valid_mbed_os(test):
    """Check if the specified name is in all_os_tests
    args:
        test: string name to index all_os_tests
    returns: tuple of test_name and source location of test,
        as given by find_tests"""
    all_os_tests = find_tests(ROOT, "K64F", "ARM")
    if test in all_os_tests.keys():
        return (test, all_os_tests[test])
    else:
        supported = columnate([t for t in all_os_tests.keys()])
        raise ArgumentTypeError("Program with name '{0}' not found. "
                                "Supported tests are: \n{1}".format(
                                    test, supported))
Exemple #6
0
        mcu = options.mcu[0]

        # Toolchain
        if options.tool is None:
            args_error(parser, "argument -t/--tool is required")
        toolchain = options.tool[0]

        if not TOOLCHAIN_CLASSES[toolchain].check_executable():
            search_path = TOOLCHAIN_PATHS[toolchain] or "No path set"
            args_error(parser, "Could not find executable for %s.\n"
                               "Currently set search path: %s"
                       % (toolchain, search_path))

        # Find all tests in the relevant paths
        for path in all_paths:
            all_tests.update(find_tests(path, mcu, toolchain, 
                                        app_config=options.app_config))

        # Filter tests by name if specified
        if options.names:
            all_names = options.names
            all_names = [x.lower() for x in all_names]

            for name in all_names:
                if any(fnmatch.fnmatch(testname, name) for testname in all_tests):
                    for testname, test in all_tests.items():
                        if fnmatch.fnmatch(testname, name):
                            tests[testname] = test
                else:
                    print "[Warning] Test with name '%s' was not found in the available tests" % (name)
        else:
            tests = all_tests
Exemple #7
0
                args_error(
                    parser,
                    "argument --test-config contains invalid path or identifier"
                )
        elif options.app_config:
            config = options.app_config
        else:
            config = Config.find_app_config(options.source_dir)

        if not config:
            config = get_default_config(options.source_dir or ['.'], mcu)

        # Find all tests in the relevant paths
        for path in all_paths:
            all_tests.update(
                find_tests(path, mcu, toolchain, app_config=config))

        # Filter tests by name if specified
        if options.names:
            all_names = options.names
            all_names = [x.lower() for x in all_names]

            for name in all_names:
                if any(
                        fnmatch.fnmatch(testname, name)
                        for testname in all_tests):
                    for testname, test in all_tests.items():
                        if fnmatch.fnmatch(testname, name):
                            tests[testname] = test
                else:
                    print("[Warning] Test with name '%s' was not found in the "
Exemple #8
0
                          help="Verbose diagnostic output")

        (options, args) = parser.parse_args()

        # Filter tests by path if specified
        if options.paths:
            all_paths = options.paths.split(",")
        else:
            all_paths = ["."]

        all_tests = {}
        tests = {}

        # Find all tests in the relevant paths
        for path in all_paths:
            all_tests.update(find_tests(path))

        # Filter tests by name if specified
        if options.names:
            all_names = options.names.split(",")
            all_names = [x.lower() for x in all_names]

            for name in all_names:
                if any(
                        fnmatch.fnmatch(testname, name)
                        for testname in all_tests):
                    for testname, test in all_tests.items():
                        if fnmatch.fnmatch(testname, name):
                            tests[testname] = test
                else:
                    print "[Warning] Test with name '%s' was not found in the available tests" % (
Exemple #9
0
def main():
    error = False
    try:
        # Parse Options
        parser = get_default_options_parser(add_app_config=True)

        parser.add_argument("-D",
                            action="append",
                            dest="macros",
                            help="Add a macro definition")

        parser.add_argument(
            "-j",
            "--jobs",
            type=int,
            dest="jobs",
            default=0,
            help=
            "Number of concurrent jobs. Default: 0/auto (based on host machine's number of CPUs)"
        )

        parser.add_argument(
            "--source",
            dest="source_dir",
            type=argparse_filestring_type,
            default=None,
            help=
            "The source (input) directory (for sources other than tests). Defaults to current directory.",
            action="append")

        parser.add_argument("--build",
                            dest="build_dir",
                            type=argparse_dir_not_parent(ROOT),
                            default=None,
                            help="The build (output) directory")

        parser.add_argument(
            "-l",
            "--list",
            action="store_true",
            dest="list",
            default=False,
            help="List (recursively) available tests in order and exit")

        parser.add_argument(
            "-p",
            "--paths",
            dest="paths",
            type=argparse_many(argparse_filestring_type),
            default=None,
            help=
            "Limit the tests to those within the specified comma separated list of paths"
        )

        format_choices = ["list", "json"]
        format_default_choice = "list"
        format_help = "Change the format in which tests are listed. Choices include: %s. Default: %s" % (
            ", ".join(format_choices), format_default_choice)
        parser.add_argument("-f",
                            "--format",
                            dest="format",
                            type=argparse_lowercase_type(
                                format_choices, "format"),
                            default=format_default_choice,
                            help=format_help)

        parser.add_argument(
            "--continue-on-build-fail",
            action="store_true",
            dest="continue_on_build_fail",
            default=None,
            help="Continue trying to build all tests if a build failure occurs"
        )

        #TODO validate the names instead of just passing through str
        parser.add_argument(
            "-n",
            "--names",
            dest="names",
            type=argparse_many(str),
            default=None,
            help="Limit the tests to a comma separated list of names")

        parser.add_argument("--test-config",
                            dest="test_config",
                            type=str,
                            default=None,
                            help="Test config for a module")

        parser.add_argument(
            "--test-spec",
            dest="test_spec",
            default=None,
            help=
            "Destination path for a test spec file that can be used by the Greentea automated test tool"
        )

        parser.add_argument(
            "--build-report-junit",
            dest="build_report_junit",
            default=None,
            help="Destination path for a build report in the JUnit xml format")
        parser.add_argument("--build-data",
                            dest="build_data",
                            default=None,
                            help="Dump build_data to this file")

        parser.add_argument("-v",
                            "--verbose",
                            action="store_true",
                            dest="verbose",
                            default=False,
                            help="Verbose diagnostic output")

        parser.add_argument(
            "--silent",
            action="store_true",
            dest="silent",
            default=False,
            help="Silent diagnostic output (no copy, compile notification)")

        parser.add_argument("--stats-depth",
                            type=int,
                            dest="stats_depth",
                            default=2,
                            help="Depth level for static memory report")

        parser.add_argument(
            "--ignore",
            dest="ignore",
            type=argparse_many(str),
            default=None,
            help=
            "Comma separated list of patterns to add to mbedignore (eg. ./main.cpp)"
        )

        parser.add_argument("--icetea",
                            action="store_true",
                            dest="icetea",
                            default=False,
                            help="Only icetea tests")

        parser.add_argument("--greentea",
                            action="store_true",
                            dest="greentea",
                            default=False,
                            help="Only greentea tests")

        options = parser.parse_args()

        # Filter tests by path if specified
        if options.paths:
            all_paths = options.paths
        else:
            all_paths = ["."]

        all_tests = {}
        tests = {}
        end_warnings = []

        # As default both test tools are enabled
        if not (options.greentea or options.icetea):
            options.greentea = True
            options.icetea = True

        # Target
        if options.mcu is None:
            args_error(parser, "argument -m/--mcu is required")
        mcu = extract_mcus(parser, options)[0]
        target = Target.get_target(mcu)

        # Toolchain
        if options.tool is None:
            args_error(parser, "argument -t/--tool is required")
        toolchain = options.tool[0]

        try:
            toolchain_name, internal_tc_name, end_warnings = find_valid_toolchain(
                target, toolchain)
        except NoValidToolchainException as e:
            print_end_warnings(e.end_warnings)
            args_error(parser, str(e))

        # Assign config file. Precedence: test_config>app_config
        # TODO: merge configs if both given
        if options.test_config:
            config = get_test_config(options.test_config, mcu)
            if not config:
                args_error(
                    parser,
                    "argument --test-config contains invalid path or identifier"
                )
        elif options.app_config:
            config = options.app_config
        else:
            config = Config.find_app_config(options.source_dir)

        if not config:
            config = get_default_config(options.source_dir or ['.'], mcu)

        # Find all tests in the relevant paths
        for path in all_paths:
            all_tests.update(
                find_tests(base_dir=path,
                           target_name=mcu,
                           toolchain_name=toolchain_name,
                           icetea=options.icetea,
                           greentea=options.greentea,
                           app_config=config))

        # Filter tests by name if specified
        if options.names:
            all_names = options.names
            all_names = [x.lower() for x in all_names]

            for name in all_names:
                if any(
                        fnmatch.fnmatch(testname, name)
                        for testname in all_tests):
                    for testname, test in all_tests.items():
                        if fnmatch.fnmatch(testname, name):
                            tests[testname] = test
                else:
                    print("[Warning] Test with name '%s' was not found in the "
                          "available tests" % (name))
        else:
            tests = all_tests

        if options.list:
            # Print available tests in order and exit
            print_tests(tests, options.format)
            sys.exit(0)
        else:
            # Build all tests
            if not options.build_dir:
                args_error(parser, "argument --build is required")

            base_source_paths = options.source_dir

            # Default base source path is the current directory
            if not base_source_paths:
                base_source_paths = ['.']

            build_report = {}
            build_properties = {}

            library_build_success = False
            profile = extract_profile(parser, options, internal_tc_name)
            try:
                resource_filter = None
                if target.is_PSA_secure_target:
                    resource_filter = OsAndSpeResourceFilter()
                    generate_psa_sources(source_dirs=base_source_paths,
                                         ignore_paths=[options.build_dir])

                # Build sources
                notify = TerminalNotifier(options.verbose, options.silent)
                build_library(base_source_paths,
                              options.build_dir,
                              mcu,
                              toolchain_name,
                              jobs=options.jobs,
                              clean=options.clean,
                              report=build_report,
                              properties=build_properties,
                              name="mbed-build",
                              macros=options.macros,
                              notify=notify,
                              archive=False,
                              app_config=config,
                              build_profile=profile,
                              ignore=options.ignore,
                              resource_filter=resource_filter)

                library_build_success = True
            except ToolException as e:
                # ToolException output is handled by the build log
                print("[ERROR] " + str(e))
                pass
            except NotSupportedException as e:
                # NotSupportedException is handled by the build log
                print("[ERROR] " + str(e))
                pass
            except Exception as e:
                if options.verbose:
                    import traceback
                    traceback.print_exc()
                # Some other exception occurred, print the error message
                print(e)

            if not library_build_success:
                print("Failed to build library")
            else:
                if target.is_PSA_secure_target:
                    resource_filter = SpeOnlyResourceFilter()
                else:
                    resource_filter = None

                # Build all the tests
                notify = TerminalNotifier(options.verbose, options.silent)
                test_build_success, test_build = build_tests(
                    tests, [os.path.relpath(options.build_dir)],
                    options.build_dir,
                    mcu,
                    toolchain_name,
                    clean=options.clean,
                    report=build_report,
                    properties=build_properties,
                    macros=options.macros,
                    notify=notify,
                    jobs=options.jobs,
                    continue_on_build_fail=options.continue_on_build_fail,
                    app_config=config,
                    build_profile=profile,
                    stats_depth=options.stats_depth,
                    ignore=options.ignore,
                    resource_filter=resource_filter)

                # If a path to a test spec is provided, write it to a file
                if options.test_spec:
                    write_json_to_file(test_spec_from_test_builds(test_build),
                                       options.test_spec)

            # If a path to a JUnit build report spec is provided, write it to a file
            if options.build_report_junit:
                report_exporter = ReportExporter(ResultExporterType.JUNIT,
                                                 package="build")
                report_exporter.report_to_file(
                    build_report,
                    options.build_report_junit,
                    test_suite_properties=build_properties)

            # Print memory map summary on screen
            if build_report:
                print()
                print(print_build_memory_usage(build_report))

            print_report_exporter = ReportExporter(ResultExporterType.PRINT,
                                                   package="build")
            status = print_report_exporter.report(build_report)
            if options.build_data:
                merge_build_data(options.build_data, build_report, "test")

            if status:
                sys.exit(0)
            else:
                sys.exit(1)

    except KeyboardInterrupt as e:
        print("\n[CTRL+c] exit")
    except ConfigException as e:
        # Catching ConfigException here to prevent a traceback
        print("[ERROR] %s" % str(e))
        error = True
    except Exception as e:
        import traceback
        traceback.print_exc(file=sys.stdout)
        print("[ERROR] %s" % str(e))
        error = True

    print_end_warnings(end_warnings)
    if error:
        sys.exit(1)
Exemple #10
0
        mcu = options.mcu[0]

        # Toolchain
        if options.tool is None:
            args_error(parser, "argument -t/--tool is required")
        toolchain = options.tool[0]

        if not TOOLCHAIN_CLASSES[toolchain].check_executable():
            search_path = TOOLCHAIN_PATHS[toolchain] or "No path set"
            args_error(parser, "Could not find executable for %s.\n"
                               "Currently set search path: %s"
                       % (toolchain, search_path))

        # Find all tests in the relevant paths
        for path in all_paths:
            all_tests.update(find_tests(path, mcu, toolchain, 
                                        app_config=options.app_config))

        # Filter tests by name if specified
        if options.names:
            all_names = options.names
            all_names = [x.lower() for x in all_names]

            for name in all_names:
                if any(fnmatch.fnmatch(testname, name) for testname in all_tests):
                    for testname, test in all_tests.items():
                        if fnmatch.fnmatch(testname, name):
                            tests[testname] = test
                else:
                    print "[Warning] Test with name '%s' was not found in the available tests" % (name)
        else:
            tests = all_tests
Exemple #11
0
                                  jobs=options.jobs,
                                  clean=options.clean,
                                  report=build_report,
                                  properties=build_properties,
                                  name="mbed-os",
                                  macros=options.macros,
                                  verbose=options.verbose,
                                  archive=False)
                except Exception, e:
                    library_build_success = False
                    print "Failed to build library"
                    print e

                if options.continue_on_build_fail or library_build_success:
                    # Build all the tests
                    all_tests = find_tests(base_source_paths[0], target_name,
                                           toolchain_name)
                    test_build_success, test_build = build_tests(
                        all_tests, [build_directory],
                        build_directory,
                        target,
                        target_toolchain,
                        clean=options.clean,
                        report=build_report,
                        properties=build_properties,
                        macros=options.macros,
                        verbose=options.verbose,
                        jobs=options.jobs,
                        continue_on_build_fail=options.continue_on_build_fail)

                    if not test_build_success:
                        total_build_success = False
Exemple #12
0
                          help="Verbose diagnostic output")

        (options, args) = parser.parse_args()

        # Filter tests by path if specified 
        if options.paths:
            all_paths = options.paths.split(",")
        else:
            all_paths = ["."]
        
        all_tests = {}
        tests = {}
        
        # Find all tests in the relevant paths
        for path in all_paths:
            all_tests.update(find_tests(path))
        
        # Filter tests by name if specified
        if options.names:
            all_names = options.names.split(",")
            all_names = [x.lower() for x in all_names]
            
            for name in all_names:
                if any(fnmatch.fnmatch(testname, name) for testname in all_tests):
                    for testname, test in all_tests.items():
                        if fnmatch.fnmatch(testname, name):
                            tests[testname] = test
                else:
                    print "[Warning] Test with name '%s' was not found in the available tests" % (name)
        else:
            tests = all_tests
Exemple #13
0
                                     jobs=options.jobs,
                                     clean=options.clean,
                                     report=build_report,
                                     properties=build_properties,
                                     name="mbed-os",
                                     macros=options.macros,
                                     verbose=options.verbose,
                                     archive=False)
 except Exception, e:
     library_build_success = False
     print "Failed to build library"
     print e
     
 if options.continue_on_build_fail or library_build_success:
     # Build all the tests
     all_tests = find_tests(base_source_paths[0], target_name, toolchain_name)
     test_build_success, test_build = build_tests(all_tests, [build_directory], build_directory, target, target_toolchain,
             clean=options.clean,
             report=build_report,
             properties=build_properties,
             macros=options.macros,
             verbose=options.verbose,
             jobs=options.jobs,
             continue_on_build_fail=options.continue_on_build_fail)
             
     if not test_build_success:
         total_build_success = False
         print "Failed to build some tests, check build log for details"
     
     test_builds.update(test_build)
 else:
Exemple #14
0
        
        if options.list_config:
            print json.dumps(build_config, indent=4)
            sys.exit(0)
        
        # Ensure build directory is set
        if not options.build_dir:
            print "[ERROR] You must specify a build path"
            sys.exit(1)
        
        # Default base source path is the current directory
        base_source_paths = options.source_dir
        if not base_source_paths:
            base_source_paths = ['.']
        
        all_tests = find_tests(base_source_paths[0])
        
        start = time()    
        build_report = {}
        build_properties = {}
        test_builds = {}
        total_build_success = True

        for target_name, target_toolchains in build_config.iteritems():
            target = TARGET_MAP[target_name]
            
            for target_toolchain in target_toolchains:
                library_build_success = True
                
                try:
                    build_directory = join(options.build_dir, target_name, target_toolchain)
Exemple #15
0
                    "argument --test-config contains invalid path or identifier"
                )
        elif options.app_config:
            config = options.app_config
        else:
            config = Config.find_app_config(options.source_dir)

        if not config:
            config = get_default_config(options.source_dir or ['.'], mcu)

        # Find all tests in the relevant paths
        for path in all_paths:
            all_tests.update(
                find_tests(base_dir=path,
                           target_name=mcu,
                           toolchain_name=toolchain,
                           icetea=options.icetea,
                           greentea=options.greentea,
                           app_config=config))

        # Filter tests by name if specified
        if options.names:
            all_names = options.names
            all_names = [x.lower() for x in all_names]

            for name in all_names:
                if any(
                        fnmatch.fnmatch(testname, name)
                        for testname in all_tests):
                    for testname, test in all_tests.items():
                        if fnmatch.fnmatch(testname, name):
                            tests[testname] = test
Exemple #16
0
        # App config
        # Disable finding `mbed_app.json` files in the source tree if not
        # explicitly defined on the command line. Config system searches for
        # `mbed_app.json` files if `app_config` is None, but will set the
        # app config data to an empty dictionary if the path value is another
        # falsey value besides None.
        if options.app_config is None:
            options.app_config = ''

        # Find all tests in the relevant paths
        for path in all_paths:
            all_tests.update(
                find_tests(path,
                           mcu,
                           toolchain,
                           options.options,
                           app_config=options.app_config))

        # Filter tests by name if specified
        if options.names:
            all_names = options.names
            all_names = [x.lower() for x in all_names]

            for name in all_names:
                if any(
                        fnmatch.fnmatch(testname, name)
                        for testname in all_tests):
                    for testname, test in all_tests.items():
                        if fnmatch.fnmatch(testname, name):
                            tests[testname] = test
Exemple #17
0
        all_tests = {}
        tests = {}

        # Target
        if options.mcu is None :
            args_error(parser, "[ERROR] You should specify an MCU")
        mcu = options.mcu[0]

        # Toolchain
        if options.tool is None:
            args_error(parser, "[ERROR] You should specify a TOOLCHAIN")
        toolchain = options.tool[0]

        # Find all tests in the relevant paths
        for path in all_paths:
            all_tests.update(find_tests(path, mcu, toolchain, options.options))

        # Filter tests by name if specified
        if options.names:
            all_names = options.names
            all_names = [x.lower() for x in all_names]

            for name in all_names:
                if any(fnmatch.fnmatch(testname, name) for testname in all_tests):
                    for testname, test in all_tests.items():
                        if fnmatch.fnmatch(testname, name):
                            tests[testname] = test
                else:
                    print "[Warning] Test with name '%s' was not found in the available tests" % (name)
        else:
            tests = all_tests