Exemplo n.º 1
0
def compile_repos(config, toolchains, targets, profile, verbose, examples):
    """Compiles combinations of example programs, targets and compile chains.

       The results are returned in a [key: value] dictionary format:
       Where key = The example name from the json config file
             value = a list containing: pass_status, successes, and failures

             where pass_status = The overall pass status for the compilation of the full
                                 set of example programs comprising the example suite.
                                 True if all examples pass, false otherwise
                   successes = list of passing examples.
                   failures = list of failing examples.

                   Both successes and failures contain the example name, target and compile chain

    Args:
    config - the json object imported from the file.
    toolchains - List of toolchains to compile for.
    results - results of the compilation stage.

    """
    results = {}
    test_json = {"builds":{}}
    valid_examples = set(examples)
    base_path = os.getcwd()
    print("\nCompiling example repos....\n")
    for example in config['examples']:
        example_names = [basename(x['repo']) for x in get_repo_list(example)]
        common_examples = valid_examples.intersection(set(example_names))
        if not common_examples:
            continue
        failures = []
        successes = []
        compiled = True
        pass_status = True
        if example.has_key('test') and example['test'] and example.has_key('baud_rate') and example.has_key('compare_log'):
            test_example = True
        else:
            test_example = False
        if example['compile']:
            for repo_info in get_repo_list(example):
                name = basename(repo_info['repo'])
                os.chdir(name)
                # Check that the target, toolchain and features combinations are valid and return a
                # list of valid combinations to work through
                for target, toolchain in target_cross_toolchain(valid_choices(example['targets'], targets),
                                                                valid_choices(example['toolchains'], toolchains),
                                                                example['features']):
                    
                    build_command = ["mbed-cli", "compile", "-t", toolchain, "-m", target] + (['-v'] if verbose else [])
                    if profile:
                        build_command.append("--profile")
                        build_command.append(profile)
                    
                    print("Compiling [%s] for [%s] with toolchain [%s]\n\n>  %s" % (name, target, toolchain, " ".join(build_command)))
                    
                    proc = subprocess.Popen(build_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

                    std_out, std_err = proc.communicate()
                    print ("\n#### STDOUT ####\n%s\n#### STDERR ####\n%s\n#### End of STDOUT/STDERR ####\n" % (std_out,std_err))
                    
                    if test_example:
                        log = example['compare_log'].pop(0)
                        # example['compare_log'] is a list of log file/files, which matches each examples/sub-examples from same repo. 
                        # pop the log file out of list regardless the compilation for each example pass of fail
                        image = fetch_output_image(std_out)
                        if image:
                            image_info = [{"binary_type": "bootable","path": normpath(join(name,image)),"compare_log":log}]
                        else:
                            print ("Warning: could not find built image for example %s" % name)
                        
                    example_summary = "{} {} {}".format(name, target, toolchain)
                    if proc.returncode:
                        failures.append(example_summary)
                    else:
                        if test_example:
                            test_group = "{}-{}-{}".format(target, toolchain, example['baud_rate'])
                            if image:
                                if not test_json['builds'].has_key(test_group):
                                    test_json['builds'][test_group] = {
                                        "platform":target ,
                                        "toolchain": toolchain ,
                                        "base_path": base_path ,
                                        "baud_rate": int(example['baud_rate']), 
                                        "tests":{} }
                                test_json['builds'][test_group]['tests'][name]={"binaries":image_info}
                                test_status = "TEST_ON"
                            else:
                                test_status = "NO_IMAGE"
                        else:
                            print("Warning: Test for %s will not be generated." % name)
                            print("One or more of 'test', 'baud_rate', and 'compare_log' keys are missing from the example config json file\n")
                            test_status = "TEST_OFF"
                        successes.append(example_summary + " " + test_status)

                os.chdir("..")

            # If there are any compilation failures for the example 'set' then the overall status is fail.
            if len(failures) > 0:
                pass_status = False
        else:
            compiled = False

        results[example['name']] = [compiled, pass_status, successes, failures]

    write_json_to_file(test_json, "test_spec.json")
    return results
Exemplo n.º 2
0
def main():
    error = False
    try:
        # Parse Options
        parser = get_default_options_parser(add_app_config=True)

        parser.add_argument("-D",
                            action="append",
                            dest="macros",
                            help="Add a macro definition")

        parser.add_argument(
            "-j",
            "--jobs",
            type=int,
            dest="jobs",
            default=0,
            help=
            "Number of concurrent jobs. Default: 0/auto (based on host machine's number of CPUs)"
        )

        parser.add_argument(
            "--source",
            dest="source_dir",
            type=argparse_filestring_type,
            default=None,
            help=
            "The source (input) directory (for sources other than tests). Defaults to current directory.",
            action="append")

        parser.add_argument("--build",
                            dest="build_dir",
                            type=argparse_dir_not_parent(ROOT),
                            default=None,
                            help="The build (output) directory")

        parser.add_argument(
            "-l",
            "--list",
            action="store_true",
            dest="list",
            default=False,
            help="List (recursively) available tests in order and exit")

        parser.add_argument(
            "-p",
            "--paths",
            dest="paths",
            type=argparse_many(argparse_filestring_type),
            default=None,
            help=
            "Limit the tests to those within the specified comma separated list of paths"
        )

        format_choices = ["list", "json"]
        format_default_choice = "list"
        format_help = "Change the format in which tests are listed. Choices include: %s. Default: %s" % (
            ", ".join(format_choices), format_default_choice)
        parser.add_argument("-f",
                            "--format",
                            dest="format",
                            type=argparse_lowercase_type(
                                format_choices, "format"),
                            default=format_default_choice,
                            help=format_help)

        parser.add_argument(
            "--continue-on-build-fail",
            action="store_true",
            dest="continue_on_build_fail",
            default=None,
            help="Continue trying to build all tests if a build failure occurs"
        )

        #TODO validate the names instead of just passing through str
        parser.add_argument(
            "-n",
            "--names",
            dest="names",
            type=argparse_many(str),
            default=None,
            help="Limit the tests to a comma separated list of names")

        parser.add_argument("--test-config",
                            dest="test_config",
                            type=str,
                            default=None,
                            help="Test config for a module")

        parser.add_argument(
            "--test-spec",
            dest="test_spec",
            default=None,
            help=
            "Destination path for a test spec file that can be used by the Greentea automated test tool"
        )

        parser.add_argument(
            "--build-report-junit",
            dest="build_report_junit",
            default=None,
            help="Destination path for a build report in the JUnit xml format")
        parser.add_argument("--build-data",
                            dest="build_data",
                            default=None,
                            help="Dump build_data to this file")

        parser.add_argument("-v",
                            "--verbose",
                            action="store_true",
                            dest="verbose",
                            default=False,
                            help="Verbose diagnostic output")

        parser.add_argument(
            "--silent",
            action="store_true",
            dest="silent",
            default=False,
            help="Silent diagnostic output (no copy, compile notification)")

        parser.add_argument("--stats-depth",
                            type=int,
                            dest="stats_depth",
                            default=2,
                            help="Depth level for static memory report")

        parser.add_argument(
            "--ignore",
            dest="ignore",
            type=argparse_many(str),
            default=None,
            help=
            "Comma separated list of patterns to add to mbedignore (eg. ./main.cpp)"
        )

        parser.add_argument("--icetea",
                            action="store_true",
                            dest="icetea",
                            default=False,
                            help="Only icetea tests")

        parser.add_argument("--greentea",
                            action="store_true",
                            dest="greentea",
                            default=False,
                            help="Only greentea tests")

        options = parser.parse_args()

        # Filter tests by path if specified
        if options.paths:
            all_paths = options.paths
        else:
            all_paths = ["."]

        all_tests = {}
        tests = {}
        end_warnings = []

        # As default both test tools are enabled
        if not (options.greentea or options.icetea):
            options.greentea = True
            options.icetea = True

        # Target
        if options.mcu is None:
            args_error(parser, "argument -m/--mcu is required")
        mcu = extract_mcus(parser, options)[0]
        target = Target.get_target(mcu)

        # Toolchain
        if options.tool is None:
            args_error(parser, "argument -t/--tool is required")
        toolchain = options.tool[0]

        try:
            toolchain_name, internal_tc_name, end_warnings = find_valid_toolchain(
                target, toolchain)
        except NoValidToolchainException as e:
            print_end_warnings(e.end_warnings)
            args_error(parser, str(e))

        # Assign config file. Precedence: test_config>app_config
        # TODO: merge configs if both given
        if options.test_config:
            config = get_test_config(options.test_config, mcu)
            if not config:
                args_error(
                    parser,
                    "argument --test-config contains invalid path or identifier"
                )
        elif options.app_config:
            config = options.app_config
        else:
            config = Config.find_app_config(options.source_dir)

        if not config:
            config = get_default_config(options.source_dir or ['.'], mcu)

        # Find all tests in the relevant paths
        for path in all_paths:
            all_tests.update(
                find_tests(base_dir=path,
                           target_name=mcu,
                           toolchain_name=toolchain_name,
                           icetea=options.icetea,
                           greentea=options.greentea,
                           app_config=config))

        # Filter tests by name if specified
        if options.names:
            all_names = options.names
            all_names = [x.lower() for x in all_names]

            for name in all_names:
                if any(
                        fnmatch.fnmatch(testname, name)
                        for testname in all_tests):
                    for testname, test in all_tests.items():
                        if fnmatch.fnmatch(testname, name):
                            tests[testname] = test
                else:
                    print("[Warning] Test with name '%s' was not found in the "
                          "available tests" % (name))
        else:
            tests = all_tests

        if options.list:
            # Print available tests in order and exit
            print_tests(tests, options.format)
            sys.exit(0)
        else:
            # Build all tests
            if not options.build_dir:
                args_error(parser, "argument --build is required")

            base_source_paths = options.source_dir

            # Default base source path is the current directory
            if not base_source_paths:
                base_source_paths = ['.']

            build_report = {}
            build_properties = {}

            library_build_success = False
            profile = extract_profile(parser, options, internal_tc_name)
            try:
                resource_filter = None
                if target.is_PSA_secure_target:
                    resource_filter = OsAndSpeResourceFilter()
                    generate_psa_sources(source_dirs=base_source_paths,
                                         ignore_paths=[options.build_dir])

                # Build sources
                notify = TerminalNotifier(options.verbose, options.silent)
                build_library(base_source_paths,
                              options.build_dir,
                              mcu,
                              toolchain_name,
                              jobs=options.jobs,
                              clean=options.clean,
                              report=build_report,
                              properties=build_properties,
                              name="mbed-build",
                              macros=options.macros,
                              notify=notify,
                              archive=False,
                              app_config=config,
                              build_profile=profile,
                              ignore=options.ignore,
                              resource_filter=resource_filter)

                library_build_success = True
            except ToolException as e:
                # ToolException output is handled by the build log
                print("[ERROR] " + str(e))
                pass
            except NotSupportedException as e:
                # NotSupportedException is handled by the build log
                print("[ERROR] " + str(e))
                pass
            except Exception as e:
                if options.verbose:
                    import traceback
                    traceback.print_exc()
                # Some other exception occurred, print the error message
                print(e)

            if not library_build_success:
                print("Failed to build library")
            else:
                if target.is_PSA_secure_target:
                    resource_filter = SpeOnlyResourceFilter()
                else:
                    resource_filter = None

                # Build all the tests
                notify = TerminalNotifier(options.verbose, options.silent)
                test_build_success, test_build = build_tests(
                    tests, [os.path.relpath(options.build_dir)],
                    options.build_dir,
                    mcu,
                    toolchain_name,
                    clean=options.clean,
                    report=build_report,
                    properties=build_properties,
                    macros=options.macros,
                    notify=notify,
                    jobs=options.jobs,
                    continue_on_build_fail=options.continue_on_build_fail,
                    app_config=config,
                    build_profile=profile,
                    stats_depth=options.stats_depth,
                    ignore=options.ignore,
                    resource_filter=resource_filter)

                # If a path to a test spec is provided, write it to a file
                if options.test_spec:
                    write_json_to_file(test_spec_from_test_builds(test_build),
                                       options.test_spec)

            # If a path to a JUnit build report spec is provided, write it to a file
            if options.build_report_junit:
                report_exporter = ReportExporter(ResultExporterType.JUNIT,
                                                 package="build")
                report_exporter.report_to_file(
                    build_report,
                    options.build_report_junit,
                    test_suite_properties=build_properties)

            # Print memory map summary on screen
            if build_report:
                print()
                print(print_build_memory_usage(build_report))

            print_report_exporter = ReportExporter(ResultExporterType.PRINT,
                                                   package="build")
            status = print_report_exporter.report(build_report)
            if options.build_data:
                merge_build_data(options.build_data, build_report, "test")

            if status:
                sys.exit(0)
            else:
                sys.exit(1)

    except KeyboardInterrupt as e:
        print("\n[CTRL+c] exit")
    except ConfigException as e:
        # Catching ConfigException here to prevent a traceback
        print("[ERROR] %s" % str(e))
        error = True
    except Exception as e:
        import traceback
        traceback.print_exc(file=sys.stdout)
        print("[ERROR] %s" % str(e))
        error = True

    print_end_warnings(end_warnings)
    if error:
        sys.exit(1)
Exemplo n.º 3
0
                    clean=options.clean,
                    report=build_report,
                    properties=build_properties,
                    macros=options.macros,
                    notify=notify,
                    jobs=options.jobs,
                    continue_on_build_fail=options.continue_on_build_fail,
                    app_config=config,
                    build_profile=profile,
                    stats_depth=options.stats_depth,
                    ignore=options.ignore,
                    spe_build=mcu_secured)

                # If a path to a test spec is provided, write it to a file
                if options.test_spec:
                    write_json_to_file(test_spec_from_test_builds(test_build),
                                       options.test_spec)

            # If a path to a JUnit build report spec is provided, write it to a file
            if options.build_report_junit:
                report_exporter = ReportExporter(ResultExporterType.JUNIT,
                                                 package="build")
                report_exporter.report_to_file(
                    build_report,
                    options.build_report_junit,
                    test_suite_properties=build_properties)

            # Print memory map summary on screen
            if build_report:
                print
                print(print_build_memory_usage(build_report))
Exemplo n.º 4
0
def compile_repos(config,
                  toolchains,
                  targets,
                  profiles,
                  verbose,
                  exp_filter,
                  cmake=False,
                  jobs=0):
    """Compiles combinations of example programs, targets and compile chains.

       The results are returned in a [key: value] dictionary format:
       Where key = The example name from the json config file
             value = a list containing: pass_status, successes, and failures

             where pass_status = The overall pass status for the compilation of the full
                                 set of example programs comprising the example suite.
                                 True if all examples pass, false otherwise
                   successes = list of passing examples.
                   failures = list of failing examples.

                   Both successes and failures contain the example name, target and compile chain

    Args:
    config - the json object imported from the file.
    toolchains - List of toolchains to compile for.
    targets - list of target names
    profile - build profile path or name if in default place
    verbose - enabling verbose
    exp_filter - List of exp_filter to be build
    jobs - Number of compile jobs

    """
    results = {}
    test_json = {"builds": {}}
    base_path = os.getcwd()
    print("\nCompiling example repos....\n")
    for example in config['examples']:
        if example['name'] not in exp_filter:
            continue
        failures = []
        successes = []
        compiled = True
        pass_status = True
        if example['test']:
            if not ('baud_rate' in example and 'compare_log' in example):
                logging.warning(
                    "'baud_rate' or 'compare_log' keys are missing from config json file"
                )
                example['test'] = False
        if example['compile']:
            for name in get_sub_examples_list(example):
                os.chdir(name)
                logging.info("In folder '%s'" % name)
                # Check that the target, toolchain and features combinations are valid and return a
                # list of valid combinations to work through
                for target, toolchain in target_cross_toolchain(
                        valid_choices(example['targets'], targets),
                        valid_choices(example['toolchains'], toolchains),
                        example['features']):
                    example_summary = {
                        "name": name,
                        "target": target,
                        "toolchain": toolchain,
                        "test": "UNSET"
                    }
                    summary_string = "%s %s %s" % (name, target, toolchain)
                    logging.info("Compiling %s" % summary_string)

                    if cmake:
                        build_command_seq = [
                            "mbed-tools compile -t {} -m {} -c".format(
                                toolchain, target)
                        ]
                    else:
                        build_command_seq = [
                            "mbed-cli compile -t {} -m {} -j {} {}".format(
                                toolchain, target, str(jobs),
                                '-vv' if verbose else '')
                        ]
                        if profiles:
                            for profile in profiles:
                                build_command_seq[0] += " --profile {}".format(
                                    profile)

                    failed_flag = False
                    for build_command in build_command_seq:
                        logging.info("Executing command '%s'..." %
                                     build_command)
                        proc = subprocess.Popen(build_command.split(),
                                                stdout=subprocess.PIPE,
                                                stderr=subprocess.PIPE)

                        std_out, std_err = proc.communicate()
                        std_out = std_out.decode()
                        std_err = std_err.decode()
                        print(
                            "\n#### STDOUT ####\n%s\n#### STDERR ####\n%s\n#### End of STDOUT/STDERR ####\n"
                            % (std_out, std_err))

                        if proc.returncode:
                            failures.append(example_summary)
                            failed_flag = True
                            break

                    if not failed_flag:
                        if example['test']:
                            log = example['compare_log'].pop(0)
                            # example['compare_log'] is a list of log file/files, which matches each examples/sub-examples from same repo.
                            # pop the log file out of list regardless the compilation for each example pass of fail
                            image = fetch_output_image(std_out, cmake)
                            if image:
                                image_info = [{
                                    "binary_type":
                                    "bootable",
                                    "path":
                                    normpath(join(name, image)),
                                    "compare_log":
                                    log
                                }]
                                test_group = "{}-{}-{}".format(
                                    target, toolchain, example['baud_rate'])
                                if not test_group in test_json['builds']:
                                    test_json['builds'][test_group] = {
                                        "platform": target,
                                        "toolchain": toolchain,
                                        "base_path": base_path,
                                        "baud_rate": int(example['baud_rate']),
                                        "tests": {}
                                    }
                                test_json['builds'][test_group]['tests'][
                                    name] = {
                                        "binaries": image_info
                                    }
                                example_summary["test"] = "TEST_ON"

                            else:
                                logging.warning(
                                    "could not find built image for example %s"
                                    % name)
                                example_summary["test"] = "NO_IMAGE"
                        else:
                            logging.warning(
                                "Test for %s will not be generated." % name)
                            example_summary["test"] = "TEST_OFF"
                        successes.append(example_summary)

                os.chdir(CWD)

            # If there are any compilation failures for the example 'set' then the overall status is fail.
            if len(failures) > 0:
                pass_status = False
        else:
            compiled = False

        results[example['name']] = [compiled, pass_status, successes, failures]

    write_json_to_file(test_json, "test_spec.json")
    return results