Exemple #1
0
                    # Create the target dir for the test spec if necessary
                    # mkdir will not create the dir if it already exists
                    test_spec_dir = os.path.dirname(options.test_spec)
                    if test_spec_dir:
                        mkdir(test_spec_dir)

                    try:
                        with open(options.test_spec, 'w') as f:
                            f.write(json.dumps(test_spec_data, indent=2))
                    except IOError, e:
                        print "[ERROR] Error writing test spec to file"
                        print e

            # If a path to a JUnit build report spec is provided, write it to a file
            if options.build_report_junit:
                report_exporter = ReportExporter(ResultExporterType.JUNIT,
                                                 package="build")
                report_exporter.report_to_file(
                    build_report,
                    options.build_report_junit,
                    test_suite_properties=build_properties)

            # Print memory map summary on screen
            if build_report:
                print
                print print_build_memory_usage(build_report)

            print_report_exporter = ReportExporter(ResultExporterType.PRINT,
                                                   package="build")
            status = print_report_exporter.report(build_report)

            if status:
Exemple #2
0
            'auto-detected' if opts.auto_detect else opts.test_spec_filename)
        if test_spec:
            print print_test_configuration_from_json(test_spec)
        exit(0)

    if get_module_avail('mbed_lstools'):
        if opts.operability_checks:
            # Check if test scope is valid and run tests
            test_scope = get_available_oper_test_scopes()
            if opts.operability_checks in test_scope:
                tests = IOperTestRunner(scope=opts.operability_checks)
                test_results = tests.run()

                # Export results in form of JUnit XML report to separate file
                if opts.report_junit_file_name:
                    report_exporter = ReportExporter(
                        ResultExporterType.JUNIT_OPER)
                    report_exporter.report_to_file(test_results,
                                                   opts.report_junit_file_name)
            else:
                print "Unknown interoperability test scope name: '%s'" % (
                    opts.operability_checks)
                print "Available test scopes: %s" % (','.join(
                    ["'%s'" % n for n in test_scope]))

            exit(0)

    # Verbose test specification and MUTs configuration
    if MUTs and opts.verbose:
        print print_muts_configuration_from_json(MUTs)
    if test_spec and opts.verbose:
        print print_test_configuration_from_json(test_spec)
Exemple #3
0
                    # Create the target dir for the test spec if necessary
                    # mkdir will not create the dir if it already exists
                    test_spec_dir = os.path.dirname(options.test_spec)
                    if test_spec_dir:
                        mkdir(test_spec_dir)

                    try:
                        with open(options.test_spec, 'w') as f:
                            f.write(json.dumps(test_spec_data, indent=2))
                    except IOError, e:
                        print "[ERROR] Error writing test spec to file"
                        print e

            # If a path to a JUnit build report spec is provided, write it to a file
            if options.build_report_junit:
                report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
                report_exporter.report_to_file(build_report, options.build_report_junit, test_suite_properties=build_properties)

            # Print memory map summary on screen
            if build_report:
                print
                print print_build_memory_usage(build_report)

            print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
            status = print_report_exporter.report(build_report)

            if status:
                sys.exit(0)
            else:
                sys.exit(1)
Exemple #4
0
                    TARGET_MAP[target_name],
                    toolchain,
                    notify=TerminalNotifier(options.verbose),
                    jobs=options.jobs,
                    report=build_report,
                    properties=build_properties,
                    build_profile=extract_profile(parser, options, toolchain),
                )

    # copy targets.json file as part of the release
    copy(join(dirname(abspath(__file__)), '..', 'targets', 'targets.json'),
         MBED_LIBRARIES)

    # Write summary of the builds
    if options.report_build_file_name:
        file_report_exporter = ReportExporter(ResultExporterType.JUNIT,
                                              package="build")
        file_report_exporter.report_to_file(
            build_report,
            options.report_build_file_name,
            test_suite_properties=build_properties)

    print("\n\nCompleted in: (%.2f)s" % (time() - start))

    print_report_exporter = ReportExporter(ResultExporterType.PRINT,
                                           package="build")
    status = status and print_report_exporter.report(build_report)

    if not status:
        sys.exit(1)
Exemple #5
0
        print "Test specification in %s:" % ('auto-detected' if opts.auto_detect else opts.test_spec_filename)
        if test_spec:
            print print_test_configuration_from_json(test_spec)
        exit(0)

    if get_module_avail('mbed_lstools'):
        if opts.operability_checks:
            # Check if test scope is valid and run tests
            test_scope = get_available_oper_test_scopes()
            if opts.operability_checks in test_scope:
                tests = IOperTestRunner(scope=opts.operability_checks)
                test_results = tests.run()

                # Export results in form of JUnit XML report to separate file
                if opts.report_junit_file_name:
                    report_exporter = ReportExporter(ResultExporterType.JUNIT_OPER)
                    report_exporter.report_to_file(test_results, opts.report_junit_file_name)
            else:
                print "Unknown interoperability test scope name: '%s'" % (opts.operability_checks)
                print "Available test scopes: %s" % (','.join(["'%s'" % n for n in test_scope]))

            exit(0)

    # Verbose test specification and MUTs configuration
    if MUTs and opts.verbose:
        print print_muts_configuration_from_json(MUTs)
    if test_spec and opts.verbose:
        print print_test_configuration_from_json(test_spec)

    if opts.only_build_tests:
        # We are skipping testing phase, and suppress summary
Exemple #6
0
def main():
    error = False
    try:
        # Parse Options
        parser = get_default_options_parser(add_app_config=True)

        parser.add_argument("-D",
                            action="append",
                            dest="macros",
                            help="Add a macro definition")

        parser.add_argument(
            "-j",
            "--jobs",
            type=int,
            dest="jobs",
            default=0,
            help=
            "Number of concurrent jobs. Default: 0/auto (based on host machine's number of CPUs)"
        )

        parser.add_argument(
            "--source",
            dest="source_dir",
            type=argparse_filestring_type,
            default=None,
            help=
            "The source (input) directory (for sources other than tests). Defaults to current directory.",
            action="append")

        parser.add_argument("--build",
                            dest="build_dir",
                            type=argparse_dir_not_parent(ROOT),
                            default=None,
                            help="The build (output) directory")

        parser.add_argument(
            "-l",
            "--list",
            action="store_true",
            dest="list",
            default=False,
            help="List (recursively) available tests in order and exit")

        parser.add_argument(
            "-p",
            "--paths",
            dest="paths",
            type=argparse_many(argparse_filestring_type),
            default=None,
            help=
            "Limit the tests to those within the specified comma separated list of paths"
        )

        format_choices = ["list", "json"]
        format_default_choice = "list"
        format_help = "Change the format in which tests are listed. Choices include: %s. Default: %s" % (
            ", ".join(format_choices), format_default_choice)
        parser.add_argument("-f",
                            "--format",
                            dest="format",
                            type=argparse_lowercase_type(
                                format_choices, "format"),
                            default=format_default_choice,
                            help=format_help)

        parser.add_argument(
            "--continue-on-build-fail",
            action="store_true",
            dest="continue_on_build_fail",
            default=None,
            help="Continue trying to build all tests if a build failure occurs"
        )

        #TODO validate the names instead of just passing through str
        parser.add_argument(
            "-n",
            "--names",
            dest="names",
            type=argparse_many(str),
            default=None,
            help="Limit the tests to a comma separated list of names")

        parser.add_argument("--test-config",
                            dest="test_config",
                            type=str,
                            default=None,
                            help="Test config for a module")

        parser.add_argument(
            "--test-spec",
            dest="test_spec",
            default=None,
            help=
            "Destination path for a test spec file that can be used by the Greentea automated test tool"
        )

        parser.add_argument(
            "--build-report-junit",
            dest="build_report_junit",
            default=None,
            help="Destination path for a build report in the JUnit xml format")
        parser.add_argument("--build-data",
                            dest="build_data",
                            default=None,
                            help="Dump build_data to this file")

        parser.add_argument("-v",
                            "--verbose",
                            action="store_true",
                            dest="verbose",
                            default=False,
                            help="Verbose diagnostic output")

        parser.add_argument(
            "--silent",
            action="store_true",
            dest="silent",
            default=False,
            help="Silent diagnostic output (no copy, compile notification)")

        parser.add_argument("--stats-depth",
                            type=int,
                            dest="stats_depth",
                            default=2,
                            help="Depth level for static memory report")

        parser.add_argument(
            "--ignore",
            dest="ignore",
            type=argparse_many(str),
            default=None,
            help=
            "Comma separated list of patterns to add to mbedignore (eg. ./main.cpp)"
        )

        parser.add_argument("--icetea",
                            action="store_true",
                            dest="icetea",
                            default=False,
                            help="Only icetea tests")

        parser.add_argument("--greentea",
                            action="store_true",
                            dest="greentea",
                            default=False,
                            help="Only greentea tests")

        options = parser.parse_args()

        # Filter tests by path if specified
        if options.paths:
            all_paths = options.paths
        else:
            all_paths = ["."]

        all_tests = {}
        tests = {}
        end_warnings = []

        # As default both test tools are enabled
        if not (options.greentea or options.icetea):
            options.greentea = True
            options.icetea = True

        # Target
        if options.mcu is None:
            args_error(parser, "argument -m/--mcu is required")
        mcu = extract_mcus(parser, options)[0]
        target = Target.get_target(mcu)

        # Toolchain
        if options.tool is None:
            args_error(parser, "argument -t/--tool is required")
        toolchain = options.tool[0]

        try:
            toolchain_name, internal_tc_name, end_warnings = find_valid_toolchain(
                target, toolchain)
        except NoValidToolchainException as e:
            print_end_warnings(e.end_warnings)
            args_error(parser, str(e))

        # Assign config file. Precedence: test_config>app_config
        # TODO: merge configs if both given
        if options.test_config:
            config = get_test_config(options.test_config, mcu)
            if not config:
                args_error(
                    parser,
                    "argument --test-config contains invalid path or identifier"
                )
        elif options.app_config:
            config = options.app_config
        else:
            config = Config.find_app_config(options.source_dir)

        if not config:
            config = get_default_config(options.source_dir or ['.'], mcu)

        # Find all tests in the relevant paths
        for path in all_paths:
            all_tests.update(
                find_tests(base_dir=path,
                           target_name=mcu,
                           toolchain_name=toolchain_name,
                           icetea=options.icetea,
                           greentea=options.greentea,
                           app_config=config))

        # Filter tests by name if specified
        if options.names:
            all_names = options.names
            all_names = [x.lower() for x in all_names]

            for name in all_names:
                if any(
                        fnmatch.fnmatch(testname, name)
                        for testname in all_tests):
                    for testname, test in all_tests.items():
                        if fnmatch.fnmatch(testname, name):
                            tests[testname] = test
                else:
                    print("[Warning] Test with name '%s' was not found in the "
                          "available tests" % (name))
        else:
            tests = all_tests

        if options.list:
            # Print available tests in order and exit
            print_tests(tests, options.format)
            sys.exit(0)
        else:
            # Build all tests
            if not options.build_dir:
                args_error(parser, "argument --build is required")

            base_source_paths = options.source_dir

            # Default base source path is the current directory
            if not base_source_paths:
                base_source_paths = ['.']

            build_report = {}
            build_properties = {}

            library_build_success = False
            profile = extract_profile(parser, options, internal_tc_name)
            try:
                resource_filter = None
                if target.is_PSA_secure_target:
                    resource_filter = OsAndSpeResourceFilter()
                    generate_psa_sources(source_dirs=base_source_paths,
                                         ignore_paths=[options.build_dir])

                # Build sources
                notify = TerminalNotifier(options.verbose, options.silent)
                build_library(base_source_paths,
                              options.build_dir,
                              mcu,
                              toolchain_name,
                              jobs=options.jobs,
                              clean=options.clean,
                              report=build_report,
                              properties=build_properties,
                              name="mbed-build",
                              macros=options.macros,
                              notify=notify,
                              archive=False,
                              app_config=config,
                              build_profile=profile,
                              ignore=options.ignore,
                              resource_filter=resource_filter)

                library_build_success = True
            except ToolException as e:
                # ToolException output is handled by the build log
                print("[ERROR] " + str(e))
                pass
            except NotSupportedException as e:
                # NotSupportedException is handled by the build log
                print("[ERROR] " + str(e))
                pass
            except Exception as e:
                if options.verbose:
                    import traceback
                    traceback.print_exc()
                # Some other exception occurred, print the error message
                print(e)

            if not library_build_success:
                print("Failed to build library")
            else:
                if target.is_PSA_secure_target:
                    resource_filter = SpeOnlyResourceFilter()
                else:
                    resource_filter = None

                # Build all the tests
                notify = TerminalNotifier(options.verbose, options.silent)
                test_build_success, test_build = build_tests(
                    tests, [os.path.relpath(options.build_dir)],
                    options.build_dir,
                    mcu,
                    toolchain_name,
                    clean=options.clean,
                    report=build_report,
                    properties=build_properties,
                    macros=options.macros,
                    notify=notify,
                    jobs=options.jobs,
                    continue_on_build_fail=options.continue_on_build_fail,
                    app_config=config,
                    build_profile=profile,
                    stats_depth=options.stats_depth,
                    ignore=options.ignore,
                    resource_filter=resource_filter)

                # If a path to a test spec is provided, write it to a file
                if options.test_spec:
                    write_json_to_file(test_spec_from_test_builds(test_build),
                                       options.test_spec)

            # If a path to a JUnit build report spec is provided, write it to a file
            if options.build_report_junit:
                report_exporter = ReportExporter(ResultExporterType.JUNIT,
                                                 package="build")
                report_exporter.report_to_file(
                    build_report,
                    options.build_report_junit,
                    test_suite_properties=build_properties)

            # Print memory map summary on screen
            if build_report:
                print()
                print(print_build_memory_usage(build_report))

            print_report_exporter = ReportExporter(ResultExporterType.PRINT,
                                                   package="build")
            status = print_report_exporter.report(build_report)
            if options.build_data:
                merge_build_data(options.build_data, build_report, "test")

            if status:
                sys.exit(0)
            else:
                sys.exit(1)

    except KeyboardInterrupt as e:
        print("\n[CTRL+c] exit")
    except ConfigException as e:
        # Catching ConfigException here to prevent a traceback
        print("[ERROR] %s" % str(e))
        error = True
    except Exception as e:
        import traceback
        traceback.print_exc(file=sys.stdout)
        print("[ERROR] %s" % str(e))
        error = True

    print_end_warnings(end_warnings)
    if error:
        sys.exit(1)
Exemple #7
0
                profile = extract_profile(parser, options, toolchain)

                try:
                    built_mbed_lib = build_mbed_libs(TARGET_MAP[target_name],
                                                     toolchain,
                                                     verbose=options.verbose,
                                                     jobs=options.jobs,
                                                     report=build_report,
                                                     properties=build_properties,
                                                     build_profile=profile)

                except Exception, e:
                    print str(e)

    # copy targets.json file as part of the release
    copy(join(dirname(abspath(__file__)), '..', 'targets', 'targets.json'), MBED_LIBRARIES)

    # Write summary of the builds
    if options.report_build_file_name:
        file_report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
        file_report_exporter.report_to_file(build_report, options.report_build_file_name, test_suite_properties=build_properties)

    print "\n\nCompleted in: (%.2f)s" % (time() - start)

    print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
    status = print_report_exporter.report(build_report)

    if not status:
        sys.exit(1)