Ejemplo n.º 1
0
    def run(self, benchmark_file):
        assert benchmark_file, "benchmark_file is not set"
        benchmarks = getBenchmarks(benchmark_file)
        if not os.path.isdir(self.root_model_dir):
            os.makedirs(self.root_model_dir)

        for benchmark in benchmarks:
            self._processOneBenchmark(benchmark)
Ejemplo n.º 2
0
    def run(self, benchmark_file):
        assert benchmark_file, "benchmark_file is not set"
        benchmarks = getBenchmarks(benchmark_file)
        locations = []
        if not os.path.isdir(self.root_model_dir):
            os.makedirs(self.root_model_dir)

        for benchmark in benchmarks:
            location = self._processOneBenchmark(benchmark)
            locations.extend(location)
        locations = [l for l in locations if l]
        return locations
Ejemplo n.º 3
0
    def run(self):
        if self.args.list_devices:
            self._listDevices()
            return
        if self.args.list_job_queues:
            self._printJobQueues()
            return
        if self.args.fetch_status or self.args.fetch_result:
            result = self._fetchResult()
            return result
        if self.args.query_num_devices:
            return self._queryNumDevices(self.args.query_num_devices)

        assert self.args.benchmark_file, \
            "--benchmark_file (-b) must be specified"
        assert self.args.devices, "--devices must be specified"
        assert self.args.framework, "--framework must be specified"
        assert self.args.platform, "--platform must be specified"
        assert self.args.repo_dir, "--repo_dir must be specified"
        assert ((self.args.info is not None) and
            (self.args.custom_binary is None) and
            (self.args.pre_built_binary is None)) or (self.args.info is None), \
            "--info cannot co-exist with --custom_binary and --pre_built_binary"

        list_job_queues = self._listJobQueues()
        if not self.args.force_submit:
            self._checkDevices(self.args.devices, self.args.hashes)
            assert self.args.job_queue != "*" and \
                self.args.job_queue in list_job_queues, \
                "--job_queue must be choosen from " + " ".join(list_job_queues)

        self.tempdir = tempfile.mkdtemp()
        program_filenames = {}
        if self.args.info:
            self.info = json.loads(self.args.info)
        else:
            self.info = {"treatment": {"programs": {}}}
            if self.args.string_map:
                self.info["treatment"]["string_map"] = str(
                    self.args.string_map)

        assert (("treatment" in self.info) and
                ("programs" in self.info["treatment"])), \
            'In --info, field treatment must exist. In info["treatment"] ' \
            "program field must exist (may be None)"

        binary = self.info["treatment"]["programs"]["program"]["location"] \
            if ("programs" in self.info["treatment"] and
                "program" in self.info["treatment"]["programs"]) \
            else self.args.custom_binary if self.args.custom_binary \
            else self.args.pre_built_binary
        t = BuildProgram(self.args, self.file_handler, self.tempdir,
                         program_filenames, binary)
        t.start()

        benchmarks = getBenchmarks(self.args.benchmark_file,
                                   self.args.framework)
        for benchmark in benchmarks:
            self._uploadOneBenchmark(benchmark)
            if self.args.debug:
                for test in benchmark["content"]["tests"]:
                    test["log_output"] = True
            if self.args.env:
                env = {}
                env_vars = self.args.env.split()
                for env_var in env_vars:
                    k, v = parse_kwarg(env_var)
                    env[k] = v
                for test in benchmark["content"]["tests"]:
                    cmd_env = {}
                    cmd_env.update(env)
                    if "env" in test:
                        cmd_env.update(test["env"])
                    test["env"] = cmd_env
        t.join()

        assert "program" in program_filenames, \
            "program does not exist. Build may be failed."

        for fn in program_filenames:
            self.info["treatment"]["programs"][fn] = {
                "location": program_filenames[fn]
            }

        # Pass meta file from build to benchmark
        meta = getMeta(self.args, self.args.platform)
        if meta:
            assert "meta" not in self.info, \
                "info field already has a meta field"
            self.info["meta"] = meta

        new_devices = self.devices.getFullNames(self.args.devices)
        user_identifier = int(self.args.user_identifier) \
            if self.args.user_identifier else randint(1, 1000000000000000)
        user = getuser(
        ) if not self.args.user_string else self.args.user_string
        hashes = self.args.hashes
        for benchmark in benchmarks:
            data = {
                "benchmark": benchmark,
                "info": self.info,
            }
            self.db.submitBenchmarks(data, new_devices, user_identifier, user,
                                     hashes)
        if self.args.async_submit:
            return

        self.url_printer.printURL(self.scuba_dataset, user_identifier,
                                  benchmarks)

        if not self.args.debug:
            shutil.rmtree(self.tempdir, True)
        if self.args.screen_reporter:
            self._screenReporter(user_identifier)