Example #1
0
    def run_harness(self):
        flag_dict = self.build_default_flags()
        flag_dict.update(self.build_scenario_specific_flags())

        # Handle engines
        if self.has_gpu:
            flag_dict["gpu_engines"] = self.gpu_engine

        # Generates the entries in the `measurements/` directory, and updates flag_dict accordingly
        generate_measurements_entry(self.get_system_name(), self.name,
                                    self._get_submission_benchmark_name(),
                                    self.scenario, self.args["input_dtype"],
                                    self.args["precision"], flag_dict)

        # Stop here if we are only generating .conf files in measurements
        if self.generate_conf_files_only:
            return "Generated conf files"

        argstr = self._build_custom_flags(flag_dict)
        if type(argstr) is dict:
            argstr = args_to_string(flag_dict)

        # Handle environment variables
        if self.use_jemalloc:
            self.prepend_ld_preload(
                "/usr/lib/x86_64-linux-gnu/libjemalloc.so.2")

        cmd = "{:} {:}".format(self.executable, argstr)
        output = run_command(cmd, get_output=True, custom_env=self.env_vars)

        # Return harness result.
        return self._handle_harness_result(
            self.harness_get_result(output,
                                    scenario_result_regex[self.scenario]))
    def run_harness(self):
        flag_dict = self.build_default_flags()
        flag_dict.update(self.build_scenario_specific_flags())

        # Handle engines
        if self.has_gpu:
            flag_dict["gpu_engines"] = self.gpu_engine

        # MLPINF-853: Special handing of --fast. Use min_duration=60000, and if Multistream, use min_query_count=1.
        if flag_dict.get("fast", False):
            if "min_duration" not in flag_dict:
                flag_dict["min_duration"] = 60000
            if self.scenario in [SCENARIOS.Offline, SCENARIOS.MultiStream]:
                if "min_query_count" not in flag_dict:
                    flag_dict["min_query_count"] = 1
            flag_dict["fast"] = None

        # Generates the entries in the `measurements/` directory, and updates flag_dict accordingly
        generate_measurements_entry(
            self.get_system_name(),
            self.name,
            self._get_submission_benchmark_name(),
            self.scenario,
            self.args["input_dtype"],
            self.args["precision"],
            flag_dict)

        # Stop here if we are only generating .conf files in measurements
        if self.generate_conf_files_only:
            return "Generated conf files"

        argstr = self._build_custom_flags(flag_dict)
        if type(argstr) is dict:
            argstr = args_to_string(flag_dict)

        # Handle environment variables
        if self.use_jemalloc:
            self.prepend_ld_preload("/usr/lib/x86_64-linux-gnu/libjemalloc.so.2")

        cmd = "{:} {:}".format(self.executable, argstr)
        output = run_command(cmd, get_output=True, custom_env=self.env_vars)

        # Return harness result.
        scenario_key = scenario_loadgen_log_keys[self.scenario]
        results = from_loadgen_by_keys(
            os.path.join(
                self.args["log_dir"],
                self.get_system_name(),
                self._get_submission_benchmark_name(),
                self.scenario),
            ["result_validity", scenario_key])

        if scenario_key not in results:
            result_string = "Cannot find performance result. Maybe you are running in AccuracyOnly mode."
        elif "result_validity" not in results:
            result_string = "{}: {}, Result validity unknown".format(scenario_key, results[scenario_key])
        else:
            result_string = "{}: {}, Result is {}".format(scenario_key, results[scenario_key], results["result_validity"])
        return self._handle_harness_result(result_string)