Ejemplo n.º 1
0
    def _replaceStringMap(self, root, platform, program_path,
                          stringmap_from_info):
        try:
            # backward compatible
            string_map = json.loads(self.args.string_map) \
                if self.args.string_map else {}

            info_string_map = json.loads(stringmap_from_info) \
                if stringmap_from_info else {}
        except BaseException:
            string_map = ast.literal_eval(self.args.string_map) \
                if self.args.string_map else {}
            info_string_map = stringmap_from_info if stringmap_from_info else {}

        deepMerge(string_map, info_string_map)

        string_map["TGTDIR"] = platform.getOutputDir()
        string_map["HOSTDIR"] = self._createHostDir()
        string_map["FAIPEPROOT"] = getFAIPEPROOT()
        if program_path:
            string_map["BUILDDIR"] = program_path

        for name in string_map:
            value = string_map[name]
            deepReplace(root, "{" + name + "}", value)
Ejemplo n.º 2
0
    def _collectOneBenchmark(self, source, meta, benchmarks, info):
        assert os.path.isfile(source), \
            "Benchmark {} does not exist".format(source)
        with open(source, 'r') as b:
            one_benchmark = json.load(b)

        self._verifyBenchmark(one_benchmark, source, False)

        self._updateFiles(one_benchmark, source)

        # following change should not appear in updated_json file
        if meta:
            deepMerge(one_benchmark["model"], meta)
        if "commands" in info:
            if "commands" not in one_benchmark["model"]:
                one_benchmark["model"]["commands"] = {}
            deepMerge(one_benchmark["model"]["commands"], info["commands"])

        self._updateTests(one_benchmark, source)
        # Add fields that should not appear in the saved benchmark file
        # Adding path to benchmark file
        one_benchmark["path"] = os.path.abspath(source)

        # One test per benchmark
        if len(one_benchmark["tests"]) == 1:
            benchmarks.append(one_benchmark)
        else:
            tests = copy.deepcopy(one_benchmark["tests"])
            one_benchmark["tests"] = []
            for test in tests:
                new_benchmark = copy.deepcopy(one_benchmark)
                new_benchmark["tests"].append(test)
                benchmarks.append(new_benchmark)
Ejemplo n.º 3
0
 def _runCommands(self, output, commands, platform, programs, model, test,
                  model_files, input_files, output_files, shared_libs,
                  test_files, total_num, converter, platform_args=None,
                  main_command=False):
     if platform_args is None:
         platform_args = {}
     if test and test.get("log_output", False):
         platform_args["log_output"] = True
     if self.args.timeout > 0 and "timeout" not in platform_args:
         platform_args["timeout"] = self.args.timeout
     cmds = self.composeRunCommand(commands, platform,
                                   programs, model, test,
                                   model_files,
                                   input_files, output_files,
                                   shared_libs, test_files, main_command)
     profiling_enabled = False
     if "profiler" in test:
         profiling_enabled = test.get("profiler", {}).get("enabled", False)
     if profiling_enabled:
         platform_args["profiler_args"] = test.get("profiler", {})
     for idx, cmd in enumerate(cmds):
         # note that we only enable profiling for the last command
         # of the main commands.
         platform_args["enable_profiling"] = profiling_enabled and \
             main_command and idx == len(cmds) - 1
         one_output = self.runOnPlatform(total_num, cmd, platform,
                                         platform_args,
                                         converter)
         deepMerge(output, one_output)
Ejemplo n.º 4
0
 def _getCommand(self, repo_info):
     platform = repo_info["platform"]
     # Remove it from repo_info to avoid polution, should clean up later
     del repo_info["platform"]
     dir_path = os.path.dirname(os.path.realpath(__file__))
     unknowns = getUnknowns()
     # a not so elegant way of merging info construct
     if '--info' in unknowns:
         info_idx = unknowns.index('--info')
         info = json.loads(unknowns[info_idx + 1])
         deepMerge(repo_info, info)
         del unknowns[info_idx + 1]
         del unknowns[info_idx]
     info = getString(json.dumps(repo_info))
     command = getPythonInterpreter() + " " + \
         os.path.join(dir_path, "harness.py") + " " + \
         " --platform " + getString(platform) + \
         " --framework " + getString(getArgs().framework) + \
         " --info " + info + " " + \
         ' '.join([getString(u) for u in unknowns])
     if getArgs().env:
         command = command + " --env "
         env_vars = getArgs().env.split()
         for env_var in env_vars:
             command = command + ' ' + env_var + ' '
     return command
Ejemplo n.º 5
0
 def _getRawArgs(self, repo_info):
     platform = repo_info["platform"]
     # Remove it from repo_info to avoid polution, should clean up later
     del repo_info["platform"]
     unknowns = self.unknowns
     # a not so elegant way of merging info construct
     if '--info' in unknowns:
         info_idx = unknowns.index('--info')
         info = json.loads(unknowns[info_idx + 1])
         deepMerge(repo_info, info)
         del unknowns[info_idx + 1]
         del unknowns[info_idx]
     info = json.dumps(repo_info)
     raw_args = []
     raw_args.extend([
         "--platform",
         getString(platform), "--framework",
         getString(self.args.framework), "--info", info
     ])
     raw_args.extend(unknowns)
     if self.args.env:
         raw_args.append("--env")
         env_vars = self.args.env.split()
         for env_var in env_vars:
             raw_args.append(env_var)
     return raw_args
Ejemplo n.º 6
0
 def _runCommands(self,
                  output,
                  commands,
                  platform,
                  programs,
                  model,
                  test,
                  model_files,
                  input_files,
                  output_files,
                  shared_libs,
                  test_files,
                  total_num,
                  converter,
                  platform_args=None):
     if platform_args is None:
         platform_args = {}
     if test and test.get("log_output", False):
         platform_args["log_output"] = True
     if getArgs().timeout > 0:
         platform_args["timeout"] = getArgs().timeout
     cmds = self.composeRunCommand(commands, platform, programs, model,
                                   test, model_files, input_files,
                                   output_files, shared_libs, test_files)
     for cmd in cmds:
         one_output = self.runOnPlatform(total_num, cmd, platform,
                                         platform_args, converter)
         deepMerge(output, one_output)
Ejemplo n.º 7
0
    def _collectOneBenchmark(self, source, meta, benchmarks, info):
        assert os.path.isfile(source), \
            "Benchmark {} does not exist".format(source)
        with open(source, 'r') as b:
            one_benchmark = json.load(b)

        string_map = json.loads(self.args.string_map) \
            if self.args.string_map else {}
        for name in string_map:
            value = string_map[name]
            deepReplace(one_benchmark, "{" + name + "}", value)

        self._verifyBenchmark(one_benchmark, source, False)

        self._updateFiles(one_benchmark, source)

        # following change should not appear in updated_json file
        if meta:
            deepMerge(one_benchmark["model"], meta)

        self._updateTests(one_benchmark, source)
        # Add fields that should not appear in the saved benchmark file
        # Adding path to benchmark file
        one_benchmark["path"] = os.path.abspath(source)

        # One test per benchmark
        if len(one_benchmark["tests"]) == 1:
            benchmarks.append(one_benchmark)
        else:
            tests = copy.deepcopy(one_benchmark["tests"])
            one_benchmark["tests"] = []
            for test in tests:
                new_benchmark = copy.deepcopy(one_benchmark)
                new_benchmark["tests"].append(test)
                benchmarks.append(new_benchmark)
Ejemplo n.º 8
0
    def collectBenchmarks(self, info, source):
        assert os.path.isfile(source), "Source {} is not a file".format(source)
        with open(source, 'r') as f:
            content = json.load(f)

        meta = content["meta"] if "meta" in content else {}
        if "meta" in info:
            deepMerge(meta, info["meta"])
        if hasattr(self.args, "timeout"):
            meta["timeout"] = self.args.timeout
        benchmarks = []

        if "benchmarks" in content:
            path = os.path.abspath(os.path.dirname(source))
            assert "meta" in content, "Meta field is missing in benchmarks"
            for benchmark_file in content["benchmarks"]:
                benchmark_file = os.path.join(path, benchmark_file)
                self._collectOneBenchmark(benchmark_file, meta, benchmarks,
                                          info)
        else:
            self._collectOneBenchmark(source, meta, benchmarks, info)

        for b in benchmarks:
            self._verifyBenchmark(b, b["path"], True)
        return benchmarks
Ejemplo n.º 9
0
def _runOnePass(info, benchmark, framework, platform):
    assert len(benchmark["tests"]) == 1, \
        "At this moment, only one test exists in the benchmark"
    to = benchmark["model"]["repeat"] if "repeat" in benchmark["model"] else 1
    output = {}
    for idx in range(to):
        benchmark["tests"][0]["INDEX"] = idx
        one_output, output_files = \
            framework.runBenchmark(info, benchmark, platform)
        deepMerge(output, one_output)
    data = _processDelayData(output)
    return data
Ejemplo n.º 10
0
    def _updateNewTestFields(self, tests, one_benchmark):
        idx = 0
        for test in tests:
            identifier = test["identifier"].replace("{ID}", str(idx))
            test["identifier"] = identifier
            idx += 1

        if "commands" in one_benchmark["model"]:
            for test in tests:
                if "commands" not in test:
                    test["commands"] = {}
                deepMerge(test["commands"], one_benchmark["model"]["commands"])
Ejemplo n.º 11
0
 def _runCommands(self, output, commands, platform, programs, model, test,
                  model_files, input_files, output_files, shared_libs,
                  test_files, total_num, platform_args, converter):
     cmds = self.composeRunCommand(commands, platform,
                                   programs, model, test,
                                   model_files,
                                   input_files, output_files,
                                   shared_libs, test_files)
     for cmd in cmds:
         one_output = self.runOnPlatform(total_num, cmd, platform,
                                         platform_args,
                                         converter)
         deepMerge(output, one_output)
Ejemplo n.º 12
0
def _runOnePass(info, benchmark, framework, platform):
    assert len(benchmark["tests"]) == 1, \
        "At this moment, only one test exists in the benchmark"
    to = benchmark["model"]["repeat"] if "repeat" in benchmark["model"] else 1
    output = None
    for idx in range(to):
        benchmark["tests"][0]["INDEX"] = idx
        one_output, output_files = \
            framework.runBenchmark(info, benchmark, platform)
        if output:
            deepMerge(output, one_output)
        else:
            output = copy.deepcopy(one_output)
        if getRunStatus() != 0:
            # early exit if there is an error
            break
    data = _processDelayData(output)
    return data
Ejemplo n.º 13
0
    def _replaceStringMap(self, root, platform, program_path,
                          stringmap_from_info):
        string_map = json.loads(getArgs().string_map) \
            if getArgs().string_map else {}

        info_string_map = json.loads(stringmap_from_info) \
            if stringmap_from_info else {}

        deepMerge(string_map, info_string_map)

        string_map["TGTDIR"] = platform.getOutputDir()
        string_map["HOSTDIR"] = self._createHostDir()
        string_map["FAIPEPROOT"] = getFAIPEPROOT()
        if program_path:
            string_map["BUILDDIR"] = program_path

        for name in string_map:
            value = string_map[name]
            deepReplace(root, "{" + name + "}", value)
Ejemplo n.º 14
0
 def _getCommand(self, repo_info):
     platform = repo_info["platform"]
     # Remove it from repo_info to avoid polution, should clean up later
     del repo_info["platform"]
     dir_path = os.path.dirname(os.path.realpath(__file__))
     unknowns = getUnknowns()
     # a not so elegant way of merging info construct
     if '--info' in unknowns:
         info_idx = unknowns.index('--info')
         info = json.loads(unknowns[info_idx + 1])
         deepMerge(repo_info, info)
         del unknowns[info_idx + 1]
         del unknowns[info_idx]
     command = getPythonInterpreter() + " " + dir_path + "/harness.py " + \
         " --platform \'" + platform + "\'" + \
         " --framework \'" + getArgs().framework + "\'" + \
         (" --info \'" + json.dumps(repo_info) + "\'") + " " + \
         ' '.join(['\'' + u + '\'' for u in unknowns])
     return command
Ejemplo n.º 15
0
    def runBenchmark(self, info, benchmark, platform):
        model = benchmark["model"]
        tests = benchmark["tests"]
        assert len(tests) == 1, "At this point, only one test should " + \
            "exist in one benchmark. However, benchmark " + \
            "{} doesn't.".format(benchmark["name"])
        test = tests[0]
        index = test["INDEX"] if "INDEX" in test else 0
        first_iteration = index == 0
        last_iteration = (("repeat" not in model) or
                          ("repeat" in model and index == model["repeat"] - 1))

        if self.host_platform is None:
            self.host_platform = getHostPlatform(self.tempdir)

        program_files = {
            name: info["programs"][name]["location"]
            for name in info["programs"]
        }
        program_path = os.path.dirname(program_files["program"]) \
            if "program" in program_files else None
        stringmap_from_info = info[
            "string_map"] if "string_map" in info else None
        self._replaceStringMap(benchmark, platform, program_path,
                               stringmap_from_info)

        # better to be before target program files separation.
        # this way, in ios, the platform may not be copied to the target.
        platform.preprocess(programs=program_files, benchmark=benchmark)

        tgt_program_files, host_program_files = \
            self._separatePrograms(program_files, test.get("commands"))

        tgt_program_files = \
            platform.copyFilesToPlatform(tgt_program_files,
                                         copy_files = first_iteration)
        programs = {}
        deepMerge(programs, host_program_files)
        deepMerge(programs, tgt_program_files)

        model_files = {
            name: model["files"][name]["location"]
            for name in model["files"]
        }

        if "converter" in model:
            converter = model["converter"]
            assert "name" in converter, "converter field must have a name"
            assert converter["name"] in self.converters, \
                "Unknown converter {}".format(converter)
        else:
            converter = None

        output = {}
        # overall preprocess
        if "preprocess" in model and first_iteration:
            commands = model["preprocess"]["commands"]
            self._runCommands(output, commands, self.host_platform, programs,
                              model, None, model_files, None, None, None, None,
                              -1, converter)

        input_files = {name: test["input_files"][name]["location"]
                       for name in test["input_files"]} \
            if "input_files" in test else None

        test_files = {
            name: test["files"][name]["location"]
            for name in test["files"]
        } if "files" in test else {}

        # Let's handle preprocess comamnd first,
        # since we will copy all files into host
        if "preprocess" in test:
            # simple thing first, let's assume preprocess is self contained
            # check the program to executable
            if "files" in test["preprocess"] and \
                    "program" in test["preprocess"]["files"]:
                host_program_path = \
                    test["preprocess"]["files"]["program"]["location"]
                os.chmod(host_program_path, 0o777)

            # will deprecate in the future
            if "files" in test["preprocess"]:
                preprocess_files = \
                    {name: test["preprocess"]["files"][name]["location"]
                     for name in test["preprocess"]["files"]}
                deepMerge(test_files, preprocess_files)

            if "commands" in test["preprocess"]:
                commands = test["preprocess"]["commands"]
            elif "command" in test["preprocess"]:
                commands = [test["preprocess"]["command"]]
            self._runCommands(output, commands, self.host_platform, programs,
                              model, test, model_files, input_files, None,
                              None, test_files, -1, converter)

        tgt_input_files = platform.copyFilesToPlatform(input_files) \
            if input_files else None
        shared_libs = None
        if "shared_libs" in info:
            shared_libs = \
                platform.copyFilesToPlatform(info["shared_libs"],
                                             copy_files = first_iteration)

        tgt_model_files = \
            platform.copyFilesToPlatform(model_files,
                                         copy_files = first_iteration)

        tgt_result_files = None
        if "output_files" in test:
            tgt_result_files = {
                name: test["output_files"][name]["location"]
                for name in test["output_files"]
            }

        total_num = test["iter"]

        if "platform_args" in test:
            platform_args = test["platform_args"]
        elif "platform_args" in model:
            platform_args = model["platform_args"]
        else:
            platform_args = {}

        if 'timeout' in model:
            platform_args['timeout'] = model['timeout']
        if 'timeout' in test:
            platform_args['timeout'] = test['timeout']

        program = programs["program"] if "program" in programs else ""
        if test["metric"] == "power":
            platform_args["power"] = True
            # in power metric, the output is ignored
            total_num = 0
            platform.killProgram(program)

        if test.get("env", False):
            platform_args["env"] = test["env"]

        self._runCommands(output,
                          test["commands"],
                          platform,
                          programs,
                          model,
                          test,
                          tgt_model_files,
                          tgt_input_files,
                          tgt_result_files,
                          shared_libs,
                          test_files,
                          total_num,
                          converter,
                          platform_args=platform_args)

        if test["metric"] == "power":
            collection_time = test["collection_time"] \
                if "collection_time" in test else 180
            voltage = float(test["voltage"]) if "voltage" in test else 4.0
            from utils.monsoon_power import collectPowerData
            output = collectPowerData(platform.platform_hash, collection_time,
                                      voltage, test["iter"])
            platform.waitForDevice(20)
            # kill the process if exists
            platform.killProgram(program)

        # remove the files before copying out the output files
        # this will save some time in ios platform, since in ios
        # all files are copied back to the host system
        if len(output) > 0:
            if input_files is not None:
                platform.delFilesFromPlatform(input_files)
            if last_iteration:
                platform.delFilesFromPlatform(tgt_model_files)
                platform.delFilesFromPlatform(tgt_program_files)
                if shared_libs is not None:
                    platform.delFilesFromPlatform(shared_libs)

        output_files = None
        if "output_files" in test:
            target_dir = os.path.join(self.tempdir, "output")
            shutil.rmtree(target_dir, True)
            os.makedirs(target_dir)
            output_files = \
                platform.moveFilesFromPlatform(tgt_result_files, target_dir)

        if "postprocess" in test:
            if "files" in test["postprocess"] and \
                    "program" in test["preprocess"]["files"]:
                host_program_path = \
                    test["postprocess"]["files"]["program"]["location"]
                os.chmod(host_program_path, 0o777)

            # will deprecate in the future
            if "files" in test["postprocess"]:
                postprocess_files = \
                    {name: test["postprocess"]["files"][name]["location"]
                     for name in test["postprocess"]["files"]}
                deepMerge(test_files, postprocess_files)

            commands = None
            if "commands" in test["postprocess"]:
                commands = test["postprocess"]["commands"]
            elif "command" in test["postprocess"]:
                commands = [test["postprocess"]["command"]]

            self._runCommands(output, commands, self.host_platform, programs,
                              model, test, model_files, input_files,
                              output_files, None, test_files, -1, converter)

        if "postprocess" in model and last_iteration:
            commands = model["postprocess"]["commands"]
            self._runCommands(output, commands, self.host_platform, programs,
                              model, test, model_files, None, None, None, None,
                              -1, converter)

        # after everything is done, some of the output files may
        # contain metrics that can be processed. Those files have
        # field converter, and specify which convert to use to
        # convert the metrics
        if output_files:
            for filename in output_files:
                file = output_files[filename]
                converter = \
                    test["output_files"][filename].get("converter")
                if not converter:
                    continue
                assert "name" in converter, "converter field must have a name"
                assert converter["name"] in self.converters, \
                    "Unknown converter {}".format(converter["name"])
                converter_class = self.converters[converter["name"]]
                args = converter.get("args")
                with open(file, "r") as f:
                    content = f.read()
                convert = converter_class()
                results, _ = convert.collect(content, args)
                one_output = convert.convert(results)
                deepMerge(output, one_output)
        return output, output_files
Ejemplo n.º 16
0
    def runBenchmark(self, info, benchmark, platform):
        model = benchmark["model"]
        tests = benchmark["tests"]
        assert len(tests) == 1, "At this point, only one test should " + \
            "exist in one benchmark. However, benchmark " + \
            "{} doesn't.".format(benchmark["name"])
        test = tests[0]
        index = test["INDEX"] if "INDEX" in test else 0
        first_iteration = index == 0
        last_iteration = (("repeat" not in model) or
                          ("repeat" in model and index == model["repeat"] - 1))

        if self.host_platform is None:
            self.host_platform = getHostPlatform(self.tempdir)

        self._replaceStringMap(benchmark, platform)

        program_files = {name: info["programs"][name]["location"]
                         for name in info["programs"]}
        # better to be before target program files separation.
        # this way, in ios, the platform may not be copied to the target.
        platform.preprocess(programs=program_files)

        tgt_program_files, host_program_files = \
            self._separatePrograms(program_files, test["commands"])

        # we need to copy programs in all iterations, because this is
        # how we get the absolute path of the programs in the target platform
        # may consider optimize this later that only copying for the first
        # iteration
        programs = platform.copyFilesToPlatform(tgt_program_files)
        deepMerge(programs, host_program_files)

        model_files = {name: model["files"][name]["location"]
                       for name in model["files"]}

        if "converter" in model:
            converter_name = model["converter"]
            assert converter_name in self.converters, \
                "Unknown converter {}".format(converter_name)
            converter = self.converters[converter_name]
        else:
            converter = None

        log_output = {"log_output": True}
        output = {}
        # overall preprocess
        if "preprocess" in model and first_iteration:
            commands = model["preprocess"]["commands"]
            self._runCommands(output, commands, self.host_platform, programs,
                              model, None, model_files, None, None, None,
                              None, -1, log_output, converter)

        input_files = {name: test["input_files"][name]["location"]
                       for name in test["input_files"]} \
            if "input_files" in test else None

        test_files = {name: test["files"][name]["location"]
                      for name in test["files"]} if "files" in test else {}

        # Let's handle preprocess comamnd first,
        # since we will copy all files into host
        if "preprocess" in test:
            # simple thing first, let's assume preprocess is self contained
            # check the program to executable
            if "files" in test["preprocess"] and \
                    "program" in test["preprocess"]["files"]:
                host_program_path = \
                    test["preprocess"]["files"]["program"]["location"]
                os.chmod(host_program_path, 0o777)

            # will deprecate in the future
            if "files" in test["preprocess"]:
                preprocess_files = \
                    {name: test["preprocess"]["files"][name]["location"]
                     for name in test["preprocess"]["files"]}
                deepMerge(test_files, preprocess_files)

            if "commands" in test["preprocess"]:
                commands = test["preprocess"]["commands"]
            elif "command" in test["preprocess"]:
                commands = [test["preprocess"]["command"]]
            self._runCommands(output, commands, self.host_platform, programs,
                              model,
                              test, model_files, input_files, None, None,
                              test_files, -1, log_output, converter)

        tgt_input_files = platform.copyFilesToPlatform(input_files) \
            if input_files else None
        shared_libs = None
        if "shared_libs" in info:
            shared_libs = platform.copyFilesToPlatform(info["shared_libs"])

        # We need to copy the model files in every iteration, because this
        # is how we get the absolute path in the target platform,
        # will optimize that later.
        tgt_model_files = platform.copyFilesToPlatform(model_files)

        tgt_result_files = None
        if "output_files" in test:
            tgt_result_files = {name: test["output_files"][name]["location"]
                                for name in test["output_files"]}

        total_num = test["iter"]

        if "platform_args" in test:
            platform_args = test["platform_args"]
        elif "platform_args" in model:
            platform_args = model["platform_args"]
        else:
            platform_args = {}

        if sys.version_info > (3, 0):
            if 'timeout' in model:
                platform_args['timeout'] = model['timeout']
            if 'timeout' in test:
                platform_args['timeout'] = test['timeout']

        program = programs["program"] if "program" in programs else ""
        if test["metric"] == "power":
            platform_args["power"] = True
            # in power metric, the output is ignored
            total_num = 0
            platform.killProgram(program)

        if test.get("log_output", False):
            platform_args["log_output"] = True

        self._runCommands(output, test["commands"], platform, programs, model,
                          test, tgt_model_files, tgt_input_files,
                          tgt_result_files, shared_libs, test_files,
                          total_num, platform_args, converter)

        if test["metric"] == "power":
            collection_time = test["collection_time"] \
                if "collection_time" in test else 180
            voltage = float(test["voltage"]) if "voltage" in test else 4.0
            from utils.monsoon_power import collectPowerData
            output = collectPowerData(platform.platform_hash,
                                      collection_time, voltage, test["iter"])
            platform.waitForDevice(20)
            # kill the process if exists
            platform.killProgram(program)

        # remove the files before copying out the output files
        # this will save some time in ios platform, since in ios
        # all files are copied back to the host system
        if len(output) > 0:
            platform.delFilesFromPlatform(tgt_model_files)
            platform.delFilesFromPlatform(program)
            if shared_libs is not None:
                platform.delFilesFromPlatform(shared_libs)
            if input_files is not None:
                platform.delFilesFromPlatform(input_files)

        output_files = None
        if "output_files" in test:
            target_dir = os.path.join(self.tempdir, "output")
            shutil.rmtree(target_dir, True)
            os.makedirs(target_dir)
            output_files = \
                platform.moveFilesFromPlatform(tgt_result_files, target_dir)

        if "postprocess" in test:
            if "files" in test["postprocess"] and \
                    "program" in test["preprocess"]["files"]:
                host_program_path = \
                    test["postprocess"]["files"]["program"]["location"]
                os.chmod(host_program_path, 0o777)

            # will deprecate in the future
            if "files" in test["postprocess"]:
                postprocess_files = \
                    {name: test["postprocess"]["files"][name]["location"]
                     for name in test["postprocess"]["files"]}
                deepMerge(test_files, postprocess_files)

            commands = test["postprocess"]["commands"]
            self._runCommands(output, commands, self.host_platform, programs,
                              model, test, model_files, input_files,
                              output_files, None, test_files, -1, log_output,
                              converter)

        if "postprocess" in model and last_iteration:
            commands = model["postprocess"]["commands"]
            self._runCommands(output, commands, self.host_platform, programs,
                              model, test, model_files, None, None, None, None,
                              -1, log_output, converter)

        return output, output_files
Ejemplo n.º 17
0
    def runBenchmark(self, info, benchmark, platform):
        model = benchmark["model"]
        tests = benchmark["tests"]
        assert len(tests) == 1, (
            "At this point, only one test should " +
            "exist in one benchmark. However, benchmark " +
            "{} doesn't.".format(benchmark["name"]))
        test = tests[0]
        index = test["INDEX"] if "INDEX" in test else 0
        first_iteration = index == 0
        last_iteration = ("repeat"
                          not in model) or ("repeat" in model
                                            and index == model["repeat"] - 1)

        if self.host_platform is None:
            self.host_platform = getHostPlatform(self.tempdir, self.args)

        program_files = {
            name: info["programs"][name]["location"]
            for name in info["programs"]
        }
        program_path = (os.path.dirname(program_files["program"])
                        if "program" in program_files else None)
        stringmap_from_info = info[
            "string_map"] if "string_map" in info else None
        self._replaceStringMap(benchmark, platform, program_path,
                               stringmap_from_info)

        # better to be before target program files separation.
        # this way, in ios, the platform may not be copied to the target.
        platform.preprocess(programs=program_files, benchmark=benchmark)

        tgt_program_files, host_program_files = self._separatePrograms(
            program_files, test.get("commands"))

        tgt_program_files = platform.copyFilesToPlatform(
            tgt_program_files, copy_files=first_iteration)
        programs = {}
        deepMerge(programs, host_program_files)
        deepMerge(programs, tgt_program_files)

        model_files = {
            name: model["files"][name]["location"]
            for name in model["files"]
        }

        if "converter" in model:
            converter = model["converter"]
            assert "name" in converter, "converter field must have a name"
            assert converter[
                "name"] in self.converters, "Unknown converter {}".format(
                    converter)
        else:
            converter = None

        output = {}

        # inject default parameters into test
        if "iter" not in test:
            test["iter"] = -1

        # overall preprocess
        if "preprocess" in model and first_iteration:
            commands = model["preprocess"]["commands"]
            self._runCommands(
                output,
                commands,
                self.host_platform,
                programs,
                model,
                None,
                model_files,
                None,
                None,
                None,
                None,
                -1,
                converter,
            )

        input_files = ({
            name: test["input_files"][name]["location"]
            for name in test["input_files"]
        } if "input_files" in test else None)

        test_files = (
            {name: test["files"][name]["location"]
             for name in test["files"]} if "files" in test else {})

        # Let's handle preprocess comamnd first,
        # since we will copy all files into host
        if "preprocess" in test:
            # simple thing first, let's assume preprocess is self contained
            # check the program to executable
            if ("files" in test["preprocess"]
                    and "program" in test["preprocess"]["files"]):
                host_program_path = test["preprocess"]["files"]["program"][
                    "location"]
                os.chmod(host_program_path, 0o777)

            # will deprecate in the future
            if "files" in test["preprocess"]:
                preprocess_files = {
                    name: test["preprocess"]["files"][name]["location"]
                    for name in test["preprocess"]["files"]
                }
                deepMerge(test_files, preprocess_files)

            if "commands" in test["preprocess"]:
                commands = test["preprocess"]["commands"]
            elif "command" in test["preprocess"]:
                commands = [test["preprocess"]["command"]]
            self._runCommands(
                output,
                commands,
                self.host_platform,
                programs,
                model,
                test,
                model_files,
                input_files,
                None,
                None,
                test_files,
                -1,
                converter,
            )

        tgt_input_files = (platform.copyFilesToPlatform(input_files)
                           if input_files else None)
        shared_libs = None
        if "shared_libs" in info:
            shared_libs = platform.copyFilesToPlatform(
                info["shared_libs"], copy_files=first_iteration)

        tgt_model_files = platform.copyFilesToPlatform(
            model_files, copy_files=first_iteration)

        tgt_result_files = None
        if "output_files" in test:
            tgt_result_files = {
                name: test["output_files"][name]["location"]
                for name in test["output_files"]
            }

        total_num = test["iter"]

        if "platform_args" in test:
            platform_args = test["platform_args"]
        elif "platform_args" in model:
            platform_args = model["platform_args"]
        else:
            platform_args = {}

        if "timeout" in model:
            platform_args["timeout"] = model["timeout"]
        if "timeout" in test:
            platform_args["timeout"] = test["timeout"]

        program = programs["program"] if "program" in programs else ""
        if test["metric"] == "power":
            platform_args["power"] = True
            method = test.get("method")
            platform_args["method"] = method

            if method == "software":
                power_util = software_power.PowerUtil(
                    platform, test.get("collection_time", 300))
            else:
                # FIXME "Monsoon" was unimportable
                from utils.monsoon_power import collectPowerData

            # in power metric, the output is ignored
            total_num = 0
            platform.killProgram(program)

        if test.get("env", False):
            platform_args["env"] = test["env"]

        if platform.getType() == "host":
            # Fix the number of threads
            if not platform_args.get("env", False):
                platform_args["env"] = {}
            MKL_NUM_THREADS = test.get("MKL_NUM_THREADS", 1)
            OMP_NUM_THREADS = test.get("OMP_NUM_THREADS", 1)
            if MKL_NUM_THREADS > 0:
                platform_args["env"]["MKL_NUM_THREADS"] = MKL_NUM_THREADS
            if OMP_NUM_THREADS > 0:
                platform_args["env"]["OMP_NUM_THREADS"] = OMP_NUM_THREADS
            # Randomly select one cpu core from logic cpu #4 to #13.
            cpu_core = test.get("cpu-list", random.randint(5, 14))
            if isinstance(test["commands"], list) and cpu_core > 0:
                test["commands"][-1] = " ".join([
                    "taskset", "--cpu-list",
                    str(cpu_core), test["commands"][-1]
                ])

        self._runCommands(
            output,
            test["commands"],
            platform,
            programs,
            model,
            test,
            tgt_model_files,
            tgt_input_files,
            tgt_result_files,
            shared_libs,
            test_files,
            total_num,
            converter,
            platform_args=platform_args,
            main_command=True,
        )

        if test["metric"] == "power":
            if test.get("method") == "software":
                output = power_util.collect()
            else:
                collection_time = (test["collection_time"]
                                   if "collection_time" in test else 180)
                voltage = float(test["voltage"]) if "voltage" in test else 4.0
                output = collectPowerData(
                    platform.platform_hash,
                    collection_time,
                    voltage,
                    test["iter"],
                    self.args.monsoon_map,
                )
                platform.waitForDevice(20)
                # kill the process if exists
                platform.killProgram(program)

        # remove the files before copying out the output files
        # this will save some time in ios platform, since in ios
        # all files are copied back to the host system
        if len(output) > 0:
            if input_files is not None:
                platform.delFilesFromPlatform(tgt_input_files)
            if last_iteration:
                platform.delFilesFromPlatform(tgt_model_files)
                platform.delFilesFromPlatform(tgt_program_files)
                if shared_libs is not None:
                    platform.delFilesFromPlatform(shared_libs)

        output_files = None
        if "output_files" in test:
            target_dir = os.path.join(self.tempdir, "output")
            shutil.rmtree(target_dir, True)
            os.makedirs(target_dir)
            output_files = platform.moveFilesFromPlatform(
                tgt_result_files, target_dir)

        platform.postprocess()

        if "postprocess" in test:
            if ("files" in test["postprocess"]
                    and "program" in test["preprocess"]["files"]):
                host_program_path = test["postprocess"]["files"]["program"][
                    "location"]
                os.chmod(host_program_path, 0o777)

            # will deprecate in the future
            if "files" in test["postprocess"]:
                postprocess_files = {
                    name: test["postprocess"]["files"][name]["location"]
                    for name in test["postprocess"]["files"]
                }
                deepMerge(test_files, postprocess_files)

            commands = None
            if "commands" in test["postprocess"]:
                commands = test["postprocess"]["commands"]
            elif "command" in test["postprocess"]:
                commands = [test["postprocess"]["command"]]

            self._runCommands(
                output,
                commands,
                self.host_platform,
                programs,
                model,
                test,
                model_files,
                input_files,
                output_files,
                None,
                test_files,
                -1,
                converter,
            )

        if "postprocess" in model and last_iteration:
            commands = model["postprocess"]["commands"]
            self._runCommands(
                output,
                commands,
                self.host_platform,
                programs,
                model,
                test,
                model_files,
                None,
                None,
                None,
                None,
                -1,
                converter,
            )

        # after everything is done, some of the output files may
        # contain metrics that can be processed. Those files have
        # field converter, and specify which convert to use to
        # convert the metrics
        if output_files:
            for filename in output_files:
                file = output_files[filename]
                converter = test["output_files"][filename].get("converter")
                if not converter:
                    continue
                assert "name" in converter, "converter field must have a name"
                assert (converter["name"]
                        in self.converters), "Unknown converter {}".format(
                            converter["name"])
                converter_class = self.converters[converter["name"]]
                args = converter.get("args")
                with open(file, "r") as f:
                    content = f.read()
                convert = converter_class()
                results, _ = convert.collect(content, args)
                one_output = convert.convert(results)
                deepMerge(output, one_output)
        return output, output_files