def benchmark_using_loadgen(scenario_str, mode_str, samples_in_mem,
                            config_filepath):
    "Perform the benchmark using python API for the LoadGen librar"

    scenario = {
        'SingleStream': lg.TestScenario.SingleStream,
        'MultiStream': lg.TestScenario.MultiStream,
        'Server': lg.TestScenario.Server,
        'Offline': lg.TestScenario.Offline,
    }[scenario_str]

    mode = {
        'AccuracyOnly': lg.TestMode.AccuracyOnly,
        'PerformanceOnly': lg.TestMode.PerformanceOnly,
        'SubmissionRun': lg.TestMode.SubmissionRun,
    }[mode_str]

    ts = lg.TestSettings()
    if (config_filepath):
        ts.FromConfig(config_filepath, 'random_model_name', scenario_str)
    ts.scenario = scenario
    ts.mode = mode

    sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
    qsl = lg.ConstructQSL(dataset_size, samples_in_mem, load_query_samples,
                          unload_query_samples)

    log_settings = lg.LogSettings()
    log_settings.enable_trace = False
    lg.StartTestWithLogSettings(sut, qsl, ts, log_settings)

    lg.DestroyQSL(qsl)
    lg.DestroySUT(sut)
def benchmark_using_loadgen():
    "Perform the benchmark using python API for the LoadGen library"

    scenario = {
        'SingleStream': lg.TestScenario.SingleStream,
        'MultiStream': lg.TestScenario.MultiStream,
        'Server': lg.TestScenario.Server,
        'Offline': lg.TestScenario.Offline,
    }[LOADGEN_SCENARIO]

    mode = {
        'AccuracyOnly': lg.TestMode.AccuracyOnly,
        'PerformanceOnly': lg.TestMode.PerformanceOnly,
        'SubmissionRun': lg.TestMode.SubmissionRun,
    }[LOADGEN_MODE]

    ts = lg.TestSettings()
    ts.FromConfig(MLPERF_CONF_PATH, MODEL_NAME, LOADGEN_SCENARIO)
    ts.FromConfig(USER_CONF_PATH, MODEL_NAME, LOADGEN_SCENARIO)
    ts.scenario = scenario
    ts.mode = mode

    sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
    qsl = lg.ConstructQSL(LOADGEN_DATASET_SIZE, LOADGEN_BUFFER_SIZE,
                          load_query_samples, unload_query_samples)

    log_settings = lg.LogSettings()
    log_settings.enable_trace = False
    lg.StartTestWithLogSettings(sut, qsl, ts, log_settings)

    lg.DestroyQSL(qsl)
    lg.DestroySUT(sut)
Exemplo n.º 3
0
def benchmark_using_loadgen():
    "Perform the benchmark using python API for the LoadGen library"

    global model

    # Load the [cached] Torch model
    torchvision_version = ''  # master by default
    try:
        import torchvision
        torchvision_version = ':v' + torchvision.__version__
    except Exception:
        pass

    model = torch.hub.load('pytorch/vision' + torchvision_version,
                           MODEL_NAME,
                           pretrained=True)
    model.eval()

    # move the model to GPU for speed if available
    if USE_CUDA:
        model.to('cuda')

    scenario = {
        'SingleStream': lg.TestScenario.SingleStream,
        'MultiStream': lg.TestScenario.MultiStream,
        'Server': lg.TestScenario.Server,
        'Offline': lg.TestScenario.Offline,
    }[LOADGEN_SCENARIO]

    mode = {
        'AccuracyOnly': lg.TestMode.AccuracyOnly,
        'PerformanceOnly': lg.TestMode.PerformanceOnly,
        'SubmissionRun': lg.TestMode.SubmissionRun,
    }[LOADGEN_MODE]

    ts = lg.TestSettings()
    ts.FromConfig(MLPERF_CONF_PATH, MODEL_NAME, LOADGEN_SCENARIO)
    ts.FromConfig(USER_CONF_PATH, MODEL_NAME, LOADGEN_SCENARIO)
    ts.scenario = scenario
    ts.mode = mode

    if LOADGEN_MULTISTREAMNESS:
        ts.multi_stream_samples_per_query = int(LOADGEN_MULTISTREAMNESS)

    if LOADGEN_COUNT_OVERRIDE:
        ts.min_query_count = int(LOADGEN_COUNT_OVERRIDE)
        ts.max_query_count = int(LOADGEN_COUNT_OVERRIDE)

    sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
    qsl = lg.ConstructQSL(LOADGEN_DATASET_SIZE, LOADGEN_BUFFER_SIZE,
                          load_query_samples, unload_query_samples)

    log_settings = lg.LogSettings()
    log_settings.enable_trace = False
    lg.StartTestWithLogSettings(sut, qsl, ts, log_settings)

    lg.DestroyQSL(qsl)
    lg.DestroySUT(sut)
Exemplo n.º 4
0
def main():
    args = get_args()

    if args.backend == "pytorch":
        assert not args.quantized, "Quantized model is only supported by onnxruntime backend!"
        assert not args.profile, "Profiling is only supported by onnxruntime backend!"
        from pytorch_SUT import get_pytorch_sut
        sut = get_pytorch_sut()
    elif args.backend == "tf":
        assert not args.quantized, "Quantized model is only supported by onnxruntime backend!"
        assert not args.profile, "Profiling is only supported by onnxruntime backend!"
        from tf_SUT import get_tf_sut
        sut = get_tf_sut()
    elif args.backend == "tf_estimator":
        assert not args.quantized, "Quantized model is only supported by onnxruntime backend!"
        assert not args.profile, "Profiling is only supported by onnxruntime backend!"
        from tf_estimator_SUT import get_tf_estimator_sut
        sut = get_tf_estimator_sut()
    elif args.backend == "onnxruntime":
        from onnxruntime_SUT import get_onnxruntime_sut
        sut = get_onnxruntime_sut(args)
    else:
        raise ValueError("Unknown backend: {:}".format(args.backend))

    settings = lg.TestSettings()
    settings.scenario = scenario_map[args.scenario]
    settings.FromConfig(args.mlperf_conf, "bert", args.scenario)
    settings.FromConfig(args.user_conf, "bert", args.scenario)

    if args.accuracy:
        settings.mode = lg.TestMode.AccuracyOnly
    else:
        settings.mode = lg.TestMode.PerformanceOnly

    log_path = "build/logs"
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    log_output_settings = lg.LogOutputSettings()
    log_output_settings.outdir = log_path
    log_output_settings.copy_summary_to_stdout = True
    log_settings = lg.LogSettings()
    log_settings.log_output = log_output_settings

    print("Running LoadGen test...")
    lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings, log_settings)

    if args.accuracy:
        cmd = "python3 accuracy-squad.py"
        subprocess.check_call(cmd, shell=True)

    print("Done!")

    print("Destroying SUT...")
    lg.DestroySUT(sut.sut)

    print("Destroying QSL...")
    lg.DestroyQSL(sut.qsl.qsl)
Exemplo n.º 5
0
    def eval_func(model):
        print("Running Loadgen test...")
        sut.greedy_decoder._model = model
        lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings,
                                    log_settings)
        cmd = f"python3 accuracy_eval.py --log_dir {log_path} \
            --dataset_dir {args.dataset_dir} --manifest {args.manifest}"

        out = subprocess.check_output(cmd, shell=True)
        out = out.decode()
        regex_accu = re.compile(pattern[0])
        accu = float(regex_accu.findall(out)[0].split('=')[1])
        print('Accuracy: %.3f ' % (accu))
        return accu
Exemplo n.º 6
0
def main():
    args = get_args()

    batch_size = args.offline_batch_size if args.scenario == "Offline" else 1

    settings = lg.TestSettings()
    settings.scenario = scenario_map[args.scenario]
    settings.FromConfig(args.mlperf_conf, "rnnt", args.scenario)
    settings.FromConfig(args.user_conf, "rnnt", args.scenario)

    issued_query_count = None
    if args.accuracy:
        settings.mode = lg.TestMode.AccuracyOnly
        issued_query_count = 2513
    else:
        settings.mode = lg.TestMode.PerformanceOnly
        issued_query_count = settings.min_query_count

    log_path = args.log_dir
    os.makedirs(log_path, exist_ok=True)
    log_output_settings = lg.LogOutputSettings()
    log_output_settings.outdir = log_path
    log_output_settings.copy_summary_to_stdout = True
    log_settings = lg.LogSettings()
    log_settings.log_output = log_output_settings

    if args.backend == "pytorch":
        from pytorch_SUT import PytorchSUT
        sut = PytorchSUT(args.pytorch_config_toml, args.pytorch_checkpoint,
                         args.dataset_dir, args.manifest, args.perf_count,
                         issued_query_count, args.scenario, args.machine_conf,
                         batch_size, args.cores_for_loadgen,
                         args.cores_per_instance, args.debug, args.cosim,
                         args.profile, args.ipex, args.bf16, args.warmup)
    else:
        raise ValueError("Unknown backend: {:}".format(args.backend))

    print("Running Loadgen test...")
    lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings, log_settings)

    if args.accuracy:
        cmd = f"python3 accuracy_eval.py --log_dir {log_path} --dataset_dir {args.dataset_dir} --manifest {args.manifest}"
        print(f"Running accuracy script: {cmd}")
        subprocess.check_call(cmd, shell=True)

    lg.DestroySUT(sut.sut)

    print("Done!")
Exemplo n.º 7
0
def main():
    args = get_args()

    if args.backend == "pytorch":
        from pytorch_SUT import get_pytorch_sut
        sut = get_pytorch_sut(args.model_dir, args.preprocessed_data_dir,
                              args.performance_count)
    elif args.backend == "onnxruntime":
        from onnxruntime_SUT import get_onnxruntime_sut
        sut = get_onnxruntime_sut(args.onnx_model, args.preprocessed_data_dir,
                                  args.performance_count)
    else:
        raise ValueError("Unknown backend: {:}".format(args.backend))

    settings = lg.TestSettings()
    settings.scenario = scenario_map[args.scenario]
    settings.FromConfig(args.mlperf_conf, "3d-unet", args.scenario)
    settings.FromConfig(args.user_conf, "3d-unet", args.scenario)

    if args.accuracy:
        settings.mode = lg.TestMode.AccuracyOnly
    else:
        settings.mode = lg.TestMode.PerformanceOnly

    log_path = "build/logs"
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    log_output_settings = lg.LogOutputSettings()
    log_output_settings.outdir = log_path
    log_output_settings.copy_summary_to_stdout = True
    log_settings = lg.LogSettings()
    log_settings.log_output = log_output_settings

    print("Running Loadgen test...")
    lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings, log_settings)

    if args.accuracy:
        print("Running accuracy script...")
        cmd = "python3 brats_eval.py"
        subprocess.check_call(cmd, shell=True)

    print("Done!")

    print("Destroying SUT...")
    lg.DestroySUT(sut.sut)

    print("Destroying QSL...")
    lg.DestroyQSL(sut.qsl.qsl)
Exemplo n.º 8
0
def benchmark_using_loadgen():
    "Perform the benchmark using python API for the LoadGen library"

    global num_classes
    global model_output_volume

    pycuda_context, max_batch_size, input_volume, model_output_volume, num_layers = initialize_predictor(
    )
    num_classes = len(class_labels)

    scenario = {
        'SingleStream': lg.TestScenario.SingleStream,
        'MultiStream': lg.TestScenario.MultiStream,
        'Server': lg.TestScenario.Server,
        'Offline': lg.TestScenario.Offline,
    }[LOADGEN_SCENARIO]

    mode = {
        'AccuracyOnly': lg.TestMode.AccuracyOnly,
        'PerformanceOnly': lg.TestMode.PerformanceOnly,
        'SubmissionRun': lg.TestMode.SubmissionRun,
    }[LOADGEN_MODE]

    ts = lg.TestSettings()
    ts.FromConfig(MLPERF_CONF_PATH, MODEL_NAME, LOADGEN_SCENARIO)
    ts.FromConfig(USER_CONF_PATH, MODEL_NAME, LOADGEN_SCENARIO)
    ts.scenario = scenario
    ts.mode = mode

    if LOADGEN_MULTISTREAMNESS:
        ts.multi_stream_samples_per_query = int(LOADGEN_MULTISTREAMNESS)

    if LOADGEN_COUNT_OVERRIDE:
        ts.min_query_count = int(LOADGEN_COUNT_OVERRIDE)
        ts.max_query_count = int(LOADGEN_COUNT_OVERRIDE)

    sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
    qsl = lg.ConstructQSL(LOADGEN_DATASET_SIZE, LOADGEN_BUFFER_SIZE,
                          load_query_samples, unload_query_samples)

    log_settings = lg.LogSettings()
    log_settings.enable_trace = False
    lg.StartTestWithLogSettings(sut, qsl, ts, log_settings)

    lg.DestroyQSL(qsl)
    lg.DestroySUT(sut)
    pycuda_context.pop()
Exemplo n.º 9
0
 def benchmark(model):
     print("Running Loadgen test...")
     sut.greedy_decoder._model = model
     lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings,
                                 log_settings)
     file_path = os.path.join(log_path, 'mlperf_log_summary.txt')
     f = open(file_path, 'r', encoding='UTF-8')
     file_content = f.read()
     f.close()
     regex_batch = re.compile(pattern[1])
     regex_late = re.compile(pattern[2])
     samples_per_query = int(
         regex_batch.findall(file_content)[0].split(': ')[1])
     latency_per_sample = int(
         regex_late.findall(file_content)[0].split(': ')[1])
     print('Batch size = %d' % samples_per_query)
     print('Latency: %.3f ms' % (latency_per_sample / 10**6))
     print('Throughput: %.3f samples/sec' % (10**9 / latency_per_sample))
Exemplo n.º 10
0
 def perf_func(model):
     print("Running Loadgen test...")
     sut.greedy_decoder._model = model
     lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings,
                                 log_settings)
     if not args.accuracy:
         file_path = os.path.join(log_path, 'mlperf_log_summary.txt')
         f = open(file_path, 'r', encoding='UTF-8')
         file_content = f.read()
         f.close()
         regex_batch = re.compile(pattern[1])
         regex_thro = re.compile(pattern[2])
         samples_per_query = int(
             regex_batch.findall(file_content)[0].split(': ')[1])
         samples_per_second = float(
             regex_thro.findall(file_content)[0].split(': ')[1])
         print('Batch size = %d' % samples_per_query)
         print('Latency: %.3f ms' % ((1 / samples_per_second) * 1000))
         print('Throughput: %.3f samples/sec' % samples_per_second)
Exemplo n.º 11
0
    def start(self):
        """Starts the load test."""
        settings = self.get_test_settings()

        log_settings = lg.LogSettings()
        log_settings.log_output.outdir = tempfile.mkdtemp()
        log_settings.log_output.copy_detail_to_stdout = True
        log_settings.log_output.copy_summary_to_stdout = True
        log_settings.enable_trace = False

        logging.info("Constructing SUT.")
        sut = lg.ConstructSUT(self.issue_queries, self.flush_queries,
                              self.process_metrics)
        logging.info("Constructing QSL.")
        qsl = lg.ConstructQSL(self.total_sample_count,
                              self.performance_sample_count, self.load_samples,
                              self.unload_samples)
        logging.info("Starting test.")
        lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)
        lg.DestroyQSL(qsl)
        lg.DestroySUT(sut)
Exemplo n.º 12
0
def benchmark_using_loadgen():
    "Perform the benchmark using python API for the LoadGen library"

    global pycuda_context
    initialize_predictor()

    scenario = {
        'SingleStream': lg.TestScenario.SingleStream,
        'MultiStream': lg.TestScenario.MultiStream,
        'Server': lg.TestScenario.Server,
        'Offline': lg.TestScenario.Offline,
    }[LOADGEN_SCENARIO]

    mode = {
        'AccuracyOnly': lg.TestMode.AccuracyOnly,
        'PerformanceOnly': lg.TestMode.PerformanceOnly,
        'SubmissionRun': lg.TestMode.SubmissionRun,
    }[LOADGEN_MODE]

    ts = lg.TestSettings()
    if LOADGEN_CONF_FILE:
        ts.FromConfig(LOADGEN_CONF_FILE, 'random_model_name', LOADGEN_SCENARIO)
    ts.scenario = scenario
    ts.mode = mode

    if LOADGEN_MULTISTREAMNESS:
        ts.multi_stream_samples_per_query = int(LOADGEN_MULTISTREAMNESS)

    sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
    qsl = lg.ConstructQSL(LOADGEN_DATASET_SIZE, LOADGEN_BUFFER_SIZE,
                          load_query_samples, unload_query_samples)

    log_settings = lg.LogSettings()
    log_settings.enable_trace = False
    lg.StartTestWithLogSettings(sut, qsl, ts, log_settings)

    lg.DestroyQSL(qsl)
    lg.DestroySUT(sut)
    pycuda_context.pop()
Exemplo n.º 13
0
def main():
    args = get_args()

    if args.backend == "pytorch":
        from pytorch_SUT import PytorchSUT
        sut = PytorchSUT(args.pytorch_config_toml, args.pytorch_checkpoint,
                         args.dataset_dir, args.manifest, args.perf_count)
    else:
        raise ValueError("Unknown backend: {:}".format(args.backend))

    settings = lg.TestSettings()
    settings.scenario = scenario_map[args.scenario]
    settings.FromConfig(args.mlperf_conf, "rnnt", args.scenario)
    settings.FromConfig(args.user_conf, "rnnt", args.scenario)

    if args.accuracy:
        settings.mode = lg.TestMode.AccuracyOnly
    else:
        settings.mode = lg.TestMode.PerformanceOnly

    log_path = args.log_dir
    os.makedirs(log_path, exist_ok=True)
    log_output_settings = lg.LogOutputSettings()
    log_output_settings.outdir = log_path
    log_output_settings.copy_summary_to_stdout = True
    log_settings = lg.LogSettings()
    log_settings.log_output = log_output_settings

    print("Running Loadgen test...")
    lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings, log_settings)

    if args.accuracy:
        cmd = f"python3 accuracy_eval.py --log_dir {log_path} --dataset_dir {args.dataset_dir} --manifest {args.manifest}"
        print(f"Running accuracy script: {cmd}")
        subprocess.check_call(cmd, shell=True)

    print("Done!")
Exemplo n.º 14
0
def main(argv):
    del argv

    global last_timeing

    if FLAGS.scenario == "Server":
        # Disable garbage collection for realtime performance.
        gc.disable()

    # define backend
    backend = BackendTensorflow()

    # override image format if given
    image_format = FLAGS.data_format if FLAGS.data_format else backend.image_format(
    )

    # dataset to use
    wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[
        FLAGS.dataset]
    ds = wanted_dataset(data_path=FLAGS.dataset_path,
                        image_list=FLAGS.dataset_list,
                        name=FLAGS.dataset,
                        image_format=image_format,
                        use_cache=FLAGS.cache,
                        count=FLAGS.count,
                        cache_dir=FLAGS.cache_dir,
                        annotation_file=FLAGS.annotation_file,
                        use_space_to_depth=FLAGS.use_space_to_depth)
    # load model to backend
    # TODO(wangtao): parse flags to params.
    params = dict(ssd_model.default_hparams().values())
    params["conv0_space_to_depth"] = FLAGS.use_space_to_depth
    params["use_bfloat16"] = FLAGS.use_bfloat16
    params["use_fused_bn"] = FLAGS.use_fused_bn

    masters = []
    tpu_names = FLAGS.tpu_name
    tpu_names = tpu_names.split(",")
    for tpu_name in tpu_names:
        tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
            tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        masters.append(tpu_cluster_resolver.get_master())

    #
    # make one pass over the dataset to validate accuracy
    #
    count = FLAGS.count if FLAGS.count else ds.get_item_count()

    #
    # warmup
    #
    log.info("warmup ...")

    batch_size = FLAGS.batch_size[0] if FLAGS.scenario == "Offline" else 1
    backend_lists = []
    for _ in range(len(tpu_names)):
        backend = BackendTensorflow()
        backend_lists.append(backend)
    runner = QueueRunner(backend_lists,
                         ds,
                         FLAGS.threads,
                         post_proc=post_proc,
                         max_batchsize=batch_size)

    runner.start_run({}, FLAGS.accuracy)

    def issue_queries(query_samples):
        for i in [1]:
            runner.enqueue(query_samples)

    def flush_queries():
        pass

    def process_latencies(latencies_ns):
        # called by loadgen to show us the recorded latencies
        global last_timeing
        last_timeing = [t / NANO_SEC for t in latencies_ns]

    tf.logging.info("starting {}, latency={}".format(FLAGS.scenario,
                                                     FLAGS.max_latency))
    settings = lg.TestSettings()
    tf.logging.info(FLAGS.scenario)
    settings.scenario = SCENARIO_MAP[FLAGS.scenario]
    settings.qsl_rng_seed = FLAGS.qsl_rng_seed
    settings.sample_index_rng_seed = FLAGS.sample_index_rng_seed
    settings.schedule_rng_seed = FLAGS.schedule_rng_seed

    if FLAGS.accuracy:
        settings.mode = lg.TestMode.AccuracyOnly
    else:
        settings.mode = lg.TestMode.PerformanceOnly

    if FLAGS.qps:
        qps = float(FLAGS.qps)
        settings.server_target_qps = qps
        settings.offline_expected_qps = qps

    if FLAGS.time:
        settings.min_duration_ms = FLAGS.time * MILLI_SEC
        settings.max_duration_ms = 0
        qps = FLAGS.qps or 100
        settings.min_query_count = qps * FLAGS.time
        settings.max_query_count = 0
    else:
        settings.min_query_count = 270336
        settings.max_query_count = 0

    target_latency_ns = int(float(FLAGS.max_latency) * NANO_SEC)
    settings.single_stream_expected_latency_ns = target_latency_ns
    settings.multi_stream_target_latency_ns = target_latency_ns
    settings.server_target_latency_ns = target_latency_ns

    log_settings = lg.LogSettings()
    log_settings.log_output.outdir = tempfile.mkdtemp()
    log_settings.log_output.copy_detail_to_stdout = True
    log_settings.log_output.copy_summary_to_stdout = True
    log_settings.enable_trace = False

    def load_query_samples(sample_list):
        """Load query samples and warmup the model."""
        ds.load_query_samples(sample_list)
        data = ds.get_image_list_inmemory()

        def init_fn(cloud_tpu_id):
            tf.logging.info("Load model for %dth cloud tpu", cloud_tpu_id)
            runner.models[cloud_tpu_id].load(
                FLAGS.model,
                FLAGS.output_model_dir,
                data,
                params,
                batch_size=FLAGS.batch_size,
                master=masters[cloud_tpu_id],
                scenario=FLAGS.scenario,
                batch_timeout_micros=FLAGS.batch_timeout_micros)

            # Init TPU.
            for it in range(FLAGS.init_iterations):
                tf.logging.info("Initialize cloud tpu at iteration %d", it)
                for batch_size in FLAGS.batch_size:
                    example, _ = ds.get_indices([sample_list[0]] * batch_size)
                    _ = runner.models[cloud_tpu_id].predict(example)

        threads = []
        for i in range(len(tpu_names)):
            thread = threading.Thread(target=init_fn, args=(i, ))
            threads.append(thread)
            thread.start()

        for thread in threads:
            thread.join()

    sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
    qsl = lg.ConstructQSL(count, min(count, 350), load_query_samples,
                          ds.unload_query_samples)

    lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)

    runner.finish()
    lg.DestroyQSL(qsl)
    lg.DestroySUT(sut)

    tf.io.gfile.mkdir(FLAGS.outdir)

    for oldfile in tf.gfile.Glob(
            os.path.join(log_settings.log_output.outdir, "*")):
        basename = os.path.basename(oldfile)
        newfile = os.path.join(FLAGS.outdir, basename)
        tf.gfile.Copy(oldfile, newfile, overwrite=True)

    if FLAGS.accuracy:
        with tf.gfile.Open(os.path.join(FLAGS.outdir, "results.txt"),
                           "w") as f:
            results = {"mAP": accuracy_coco.main()}
            json.dump(results, f, sort_keys=True, indent=4)
def main():
    global last_timeing
    args = get_args()

    log.info(args)

    # find backend
    backend = get_backend(args.backend)

    # override image format if given
    image_format = args.data_format if args.data_format else backend.image_format()

    # --count applies to accuracy mode only and can be used to limit the number of images
    # for testing. For perf model we always limit count to 200.
    count_override = False
    count = args.count
    if count:
        count_override = True

    # dataset to use
    wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
    ds = wanted_dataset(data_path=args.dataset_path,
                        image_list=args.dataset_list,
                        name=args.dataset,
                        image_format=image_format,
                        pre_process=pre_proc,
                        use_cache=args.cache,
                        count=count, **kwargs)
    # load model to backend
    model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)
    final_results = {
        "runtime": model.name(),
        "version": model.version(),
        "time": int(time.time()),
        "cmdline": str(args),
    }

    config = os.path.abspath(args.config)
    if not os.path.exists(config):
        log.error("{} not found".format(config))
        sys.exit(1)

    if args.output:
        output_dir = os.path.abspath(args.output)
        os.makedirs(output_dir, exist_ok=True)
        os.chdir(output_dir)

    #
    # make one pass over the dataset to validate accuracy
    #
    count = ds.get_item_count()

    # warmup
    warmup_queries = range(args.max_batchsize)
    ds.load_query_samples(warmup_queries)
    for _ in range(2):
        img, _ = ds.get_samples(warmup_queries)
        _ = backend.predict({backend.inputs[0]: img})
    ds.unload_query_samples(None)

    scenario = SCENARIO_MAP[args.scenario]
    runner_map = {
        lg.TestScenario.SingleStream: RunnerBase,
        lg.TestScenario.MultiStream: QueueRunner,
        lg.TestScenario.Server: QueueRunner,
        lg.TestScenario.Offline: QueueRunner
    }
    runner = runner_map[scenario](model, ds, args.threads, post_proc=post_proc, max_batchsize=args.max_batchsize)

    def issue_queries(query_samples):
        runner.enqueue(query_samples)

    def flush_queries():
        pass

    def process_latencies(latencies_ns):
        # called by loadgen to show us the recorded latencies
        global last_timeing
        last_timeing = [t / NANO_SEC for t in latencies_ns]

    settings = lg.TestSettings()
    settings.FromConfig(config, args.model_name, args.scenario)
    settings.scenario = scenario
    settings.mode = lg.TestMode.PerformanceOnly
    if args.accuracy:
        settings.mode = lg.TestMode.AccuracyOnly
    if args.find_peak_performance:
        settings.mode = lg.TestMode.FindPeakPerformance

    if args.time:
        # override the time we want to run
        settings.min_duration_ms = args.time * MILLI_SEC
        settings.max_duration_ms = args.time * MILLI_SEC

    if args.qps:
        qps = float(args.qps)
        settings.server_target_qps = qps
        settings.offline_expected_qps = qps

    if count_override:
        settings.min_query_count = count
        settings.max_query_count = count

    if args.samples_per_query:
        settings.multi_stream_samples_per_query = args.samples_per_query
    if args.max_latency:
        settings.server_target_latency_ns = int(args.max_latency * NANO_SEC)
        settings.multi_stream_target_latency_ns = int(args.max_latency * NANO_SEC)

    # override target latency when it needs to be less than 1ms
    if args.model_name == "mobilenet":
        settings.single_stream_expected_latency_ns =  200000
    elif args.model_name == "resnet50":
        settings.single_stream_expected_latency_ns =  900000
    elif args.model_name == "ssd-mobilenet":
        settings.single_stream_expected_latency_ns = 1000000

    sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
    #qsl = lg.ConstructQSL(count, min(count, 500), ds.load_query_samples, ds.unload_query_samples)
    qsl = lg.ConstructQSL(count, min(count, 1024), ds.load_query_samples, ds.unload_query_samples)

    log.info("starting {}".format(scenario))
    result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
    runner.start_run(result_dict, args.accuracy)
    if args.enable_trace:
        lg.StartTest(sut, qsl, settings)
    else:
        logsettings = lg.LogSettings()
        logsettings.enable_trace = False
        lg.StartTestWithLogSettings(sut, qsl, settings, logsettings)

    if not last_timeing:
        last_timeing = runner.result_timing
    if args.accuracy:
        post_proc.finalize(result_dict, ds, output_dir=args.output)
    add_results(final_results, "{}".format(scenario),
                result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)

    runner.finish()
    lg.DestroyQSL(qsl)
    lg.DestroySUT(sut)

    #
    # write final results
    #
    if args.output:
        with open("results.json", "w") as f:
            json.dump(final_results, f, sort_keys=True, indent=4)
Exemplo n.º 16
0
def benchmark_using_loadgen():
    "Perform the benchmark using python API for the LoadGen library"

    global funnel_should_be_running, warmup_mode, openme_data

    scenario = {
        'SingleStream':     lg.TestScenario.SingleStream,
        'MultiStream':      lg.TestScenario.MultiStream,
        'Server':           lg.TestScenario.Server,
        'Offline':          lg.TestScenario.Offline,
    }[LOADGEN_SCENARIO]

    mode = {
        'AccuracyOnly':     lg.TestMode.AccuracyOnly,
        'PerformanceOnly':  lg.TestMode.PerformanceOnly,
        'SubmissionRun':    lg.TestMode.SubmissionRun,
    }[LOADGEN_MODE]

    ts = lg.TestSettings()
    if LOADGEN_CONFIG_FILE:
        ts.FromConfig(LOADGEN_CONFIG_FILE, 'random_model_name', LOADGEN_SCENARIO)
    ts.scenario = scenario
    ts.mode     = mode

    if LOADGEN_MULTISTREAMNESS:
        ts.multi_stream_samples_per_query = int(LOADGEN_MULTISTREAMNESS)

    if LOADGEN_MAX_DURATION_S:
        ts.max_duration_ms = int(LOADGEN_MAX_DURATION_S)*1000

    if LOADGEN_COUNT_OVERRIDE:
        ts.min_query_count = int(LOADGEN_COUNT_OVERRIDE)
        ts.max_query_count = int(LOADGEN_COUNT_OVERRIDE)

    if LOADGEN_TARGET_QPS:
        target_qps                  = float(LOADGEN_TARGET_QPS)
        ts.multi_stream_target_qps  = target_qps
        ts.server_target_qps        = target_qps
        ts.offline_expected_qps     = target_qps

    sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
    qsl = lg.ConstructQSL(LOADGEN_DATASET_SIZE, LOADGEN_BUFFER_SIZE, load_query_samples, unload_query_samples)

    log_settings = lg.LogSettings()
    log_settings.enable_trace = False

    funnel_thread = threading.Thread(target=send_responses, args=())
    funnel_should_be_running = True
    funnel_thread.start()

    if LOADGEN_WARMUP_SAMPLES:
        warmup_id_range = list(range(LOADGEN_WARMUP_SAMPLES))
        load_query_samples(warmup_id_range)

        warmup_mode = True
        print("Sending out the warm-up samples, waiting for responses...")
        issue_queries([lg.QuerySample(id,id) for id in warmup_id_range])

        while len(in_progress)>0:       # waiting for the in_progress queue to clear up
            time.sleep(1)
        print(" Done!")

        warmup_mode = False

    lg.StartTestWithLogSettings(sut, qsl, ts, log_settings)

    funnel_should_be_running = False    # politely ask the funnel_thread to end
    funnel_thread.join()                # wait for it to actually end

    from_workers.close()
    to_workers.close()

    lg.DestroyQSL(qsl)
    lg.DestroySUT(sut)

    if SIDELOAD_JSON:
        with open(SIDELOAD_JSON, 'w') as sideload_fd:
            json.dump(openme_data, sideload_fd, indent=4, sort_keys=True)
Exemplo n.º 17
0
def eval_func(model):
    args = get_args()

    if args.backend == "pytorch":
        from pytorch_SUT import get_pytorch_sut
        sut = get_pytorch_sut(model, args.preprocessed_data_dir,
                              args.performance_count)
    elif args.backend == "onnxruntime":
        from onnxruntime_SUT import get_onnxruntime_sut
        sut = get_onnxruntime_sut(args.model, args.preprocessed_data_dir,
                                  args.performance_count)
    elif args.backend == "tf":
        from tf_SUT import get_tf_sut
        sut = get_tf_sut(args.model, args.preprocessed_data_dir,
                         args.performance_count)
    elif args.backend == "ov":
        from ov_SUT import get_ov_sut
        sut = get_ov_sut(args.model, args.preprocessed_data_dir,
                         args.performance_count)
    else:
        raise ValueError("Unknown backend: {:}".format(args.backend))

    settings = lg.TestSettings()
    settings.scenario = scenario_map[args.scenario]
    settings.FromConfig(args.mlperf_conf, "3d-unet", args.scenario)
    settings.FromConfig(args.user_conf, "3d-unet", args.scenario)

    if args.accuracy:
        settings.mode = lg.TestMode.AccuracyOnly
    else:
        settings.mode = lg.TestMode.PerformanceOnly

    log_path = "build/logs"
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    log_output_settings = lg.LogOutputSettings()
    log_output_settings.outdir = log_path
    log_output_settings.copy_summary_to_stdout = True
    log_settings = lg.LogSettings()
    log_settings.log_output = log_output_settings

    print("Running Loadgen test...")
    if args.benchmark:
        start = time.time()
    lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings, log_settings)
    if args.benchmark:
        end = time.time()

    if args.accuracy:
        print("Running accuracy script...")
        process = subprocess.Popen(['python3', 'accuracy-brats.py'],
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
        out, err = process.communicate()

        print(out)
        print("Done!", float(err))

        if args.benchmark:
            print('Batch size = 1')
            print('Latency: %.3f ms' % ((end - start) * 1000 / sut.qsl.count))
            print('Throughput: %.3f images/sec' % (sut.qsl.count /
                                                   (end - start)))
            print('Accuracy: {mean:.5f}'.format(mean=float(err)))

    print("Destroying SUT...")
    lg.DestroySUT(sut.sut)

    print("Destroying QSL...")
    lg.DestroyQSL(sut.qsl.qsl)
    return float(err)
def main():
    global num_ins
    global num_cpus
    global in_queue_cnt
    global out_queue_cnt
    global batching
    global queries_so_far
    global Latencies

    queries_so_far = 0

    args = get_args()
    log.info(args)
    scenario = args.scenario
    accuracy_mode = args.accuracy
    perf_count = args.perf_count
    batch_size = args.batch_size
    num_ins = args.num_instance
    num_cpus = args.num_phy_cpus
    batching = args.batching

    # Read Loadgen and workload config parameters
    settings = lg.TestSettings()
    settings.scenario = scenario_map[scenario]
    settings.FromConfig(args.mlperf_conf, "bert", scenario)
    settings.FromConfig(args.user_conf, "bert", scenario)
    settings.mode = lg.TestMode.AccuracyOnly if accuracy_mode else lg.TestMode.PerformanceOnly

    # Establish communication queues
    lock = multiprocessing.Lock()
    init_counter = multiprocessing.Value("i", 0)
    calibrate_counter = multiprocessing.Value("i", 0)
    out_queue = multiprocessing.Queue()

    # Create consumers
    consumers = []
    if scenario == "Server":
        from parse_server_config import configParser

        buckets = configParser("machine_conf.json")
        cutoffs = list(buckets.keys())
        batch_sizes = {}

        in_queue = {j: multiprocessing.JoinableQueue() for j in buckets}
        proc_idx = 0
        num_cpus = 0
        total_ins = 0
        for cutoff in list(buckets.keys()):
            batch_sizes[cutoff] = buckets[cutoff]["batch_size"]
            num_ins = buckets[cutoff]["instances"]
            cpus_per_instance = buckets[cutoff]["cpus_per_instance"]
            num_cpus = num_ins * cpus_per_instance
            total_ins += num_ins

            for j in range(num_ins):
                consumer = Consumer(in_queue[cutoff], out_queue, lock,
                                    init_counter, calibrate_counter, proc_idx,
                                    num_ins, args, cutoff)
                consumer.start_core_idx = proc_idx
                consumer.end_core_idx = proc_idx + cpus_per_instance - 1
                consumers.append(consumer)
                proc_idx = consumer.end_core_idx + 1

        num_ins = total_ins

    else:
        total_ins = num_ins
        in_queue = MultiprocessShapeBasedQueue()
        consumers = [
            Consumer(in_queue, out_queue, lock, init_counter,
                     calibrate_counter, i, num_ins, args)
            for i in range(num_ins)
        ]

    for c in consumers:
        c.start()

    # Dataset object used by constructQSL
    data_set = BERTDataSet(args.vocab, args.perf_count)
    if scenario == "Server":
        issue_queue = InQueueServer(in_queue, batch_sizes, data_set,
                                    settings.min_query_count)
    else:
        issue_queue = InQueue(in_queue, batch_size, data_set)

    # Wait until all sub-processors are ready
    block_until(init_counter, total_ins, 2)

    # Start response thread
    response_worker = threading.Thread(target=response_loadgen,
                                       args=(out_queue, ))
    response_worker.daemon = True
    response_worker.start()

    def issue_queries(query_samples):
        # It's called by loadgen to send query to SUT
        issue_queue.put(query_samples)

    sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
    qsl = lg.ConstructQSL(data_set.count, data_set.perf_count,
                          load_query_samples, unload_query_samples)

    log_path = "build/logs"
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    log_output_settings = lg.LogOutputSettings()
    log_output_settings.outdir = log_path
    log_output_settings.copy_summary_to_stdout = True
    log_settings = lg.LogSettings()
    log_settings.log_output = log_output_settings

    lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)

    # Wait until outQueue done
    while out_queue_cnt < in_queue_cnt:
        time.sleep(0.2)

    if scenario == "Server":
        for i in in_queue:
            in_queue[i].join()
            for j in range(buckets[i]["cpus_per_instance"]):
                in_queue[i].put(None)
    else:
        for i in range(num_ins):
            in_queue.put(None)

    for c in consumers:
        c.join()
    out_queue.put(None)

    if accuracy_mode:
        cmd = "python accuracy-squad.py --log_file={}/mlperf_log_accuracy.json".format(
            log_path)
        subprocess.check_call(cmd, shell=True)

    lg.DestroyQSL(qsl)
    lg.DestroySUT(sut)
Exemplo n.º 19
0
def main():
    global num_ins
    global num_cpus
    global in_queue_cnt
    global out_queue_cnt
    global batching
    global bs_step

    args = get_args()
    log.info(args)
    scenario = args.scenario
    accuracy_mode = args.accuracy
    perf_count = args.perf_count
    batch_size = args.batch_size
    num_ins = args.num_instance
    num_cpus = args.num_phy_cpus
    batching = args.batching

    ## TODO, remove
    log.info('Run with {} instance on {} cpus: '.format(num_ins, num_cpus))

    # Establish communication queues
    lock = multiprocessing.Lock()
    init_counter = multiprocessing.Value("i", 0)
    calibrate_counter = multiprocessing.Value("i", 0)
    out_queue = multiprocessing.Queue()
    in_queue = MultiprocessShapeBasedQueue()

    if args.perf_calibrate:
        with open('prof_new.py', 'w') as f:
            print('prof_bs_step = {}'.format(bs_step), file=f)
            print('prof_map = {', file=f)

    # Start consumers
    consumers = [
        Consumer(in_queue, out_queue, lock, init_counter, calibrate_counter, i,
                 num_ins, args) for i in range(num_ins)
    ]
    for c in consumers:
        c.start()

    # used by constructQSL
    data_set = BERTDataSet(args.vocab, args.perf_count)
    issue_queue = InQueue(in_queue, batch_size, data_set)

    # Wait until all sub-processors ready to do calibration
    block_until(calibrate_counter, num_ins)
    # Wait until all sub-processors done calibration
    block_until(calibrate_counter, 2 * num_ins)
    if args.perf_calibrate:
        with open('prof_new.py', 'a') as f:
            print('}', file=f)
        sys.exit(0)
    # Wait until all sub-processors are ready
    block_until(init_counter, num_ins)

    # Start response thread
    response_worker = threading.Thread(target=response_loadgen,
                                       args=(out_queue, ))
    response_worker.daemon = True
    response_worker.start()

    # Start loadgen
    settings = lg.TestSettings()
    settings.scenario = scenario_map[scenario]
    settings.FromConfig(args.mlperf_conf, "bert", scenario)
    settings.FromConfig(args.user_conf, "bert", scenario)
    settings.mode = lg.TestMode.AccuracyOnly if accuracy_mode else lg.TestMode.PerformanceOnly

    # TODO, for debug, remove
    #settings.server_target_qps = 40
    #settings.server_target_latency_ns = 100000000
    #settings.min_query_count = 100
    #settings.min_duration_ms = 10000

    def issue_queries(query_samples):
        # It's called by loadgen to send query to SUT
        issue_queue.put(query_samples)

    sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
    qsl = lg.ConstructQSL(data_set.count, data_set.perf_count,
                          load_query_samples, unload_query_samples)

    log_path = "build/logs"
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    log_output_settings = lg.LogOutputSettings()
    log_output_settings.outdir = log_path
    log_output_settings.copy_summary_to_stdout = True
    log_settings = lg.LogSettings()
    log_settings.log_output = log_output_settings

    #lg.StartTest(sut, qsl, settings)
    lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)

    # Wait until outQueue done
    while out_queue_cnt < in_queue_cnt:
        time.sleep(0.2)

    in_queue.join()
    for i in range(num_ins):
        in_queue.put(None)
    for c in consumers:
        c.join()
    out_queue.put(None)

    if accuracy_mode:
        cmd = "python accuracy-squad.py --log_file={}/mlperf_log_accuracy.json".format(
            log_path)
        subprocess.check_call(cmd, shell=True)

    lg.DestroyQSL(qsl)
    lg.DestroySUT(sut)
Exemplo n.º 20
0
 def start_run(self):
     lg.StartTestWithLogSettings(self.sut, self.qsl, self.settings, self.log_settings)
Exemplo n.º 21
0
def main():
    """
    Runs 3D UNet performing KiTS19 Kidney Tumore Segmentation task as below:

    1. instantiate SUT and QSL for the chosen backend
    2. configure LoadGen for the chosen scenario
    3. configure MLPerf logger
    4. start LoadGen
    5. collect logs and if needed evaluate inference results
    6. clean up
    """
    # scenarios in LoadGen
    scenario_map = {
        "SingleStream": lg.TestScenario.SingleStream,
        "Offline": lg.TestScenario.Offline,
        "Server": lg.TestScenario.Server,
        "MultiStream": lg.TestScenario.MultiStream
    }

    args = get_args()

    # instantiate SUT as per requested backend; QSL is also instantiated
    if args.backend == "pytorch":
        from pytorch_SUT import get_sut
    elif args.backend == "pytorch_checkpoint":
        from pytorch_checkpoint_SUT import get_sut
    elif args.backend == "onnxruntime":
        from onnxruntime_SUT import get_sut
    elif args.backend == "tensorflow":
        from tensorflow_SUT import get_sut
    else:
        raise ValueError("Unknown backend: {:}".format(args.backend))
    sut = get_sut(args.model, args.preprocessed_data_dir,
                  args.performance_count)

    # setup LoadGen
    settings = lg.TestSettings()
    settings.scenario = scenario_map[args.scenario]
    settings.FromConfig(args.mlperf_conf, "3d-unet", args.scenario)
    settings.FromConfig(args.user_conf, "3d-unet", args.scenario)
    if args.accuracy:
        settings.mode = lg.TestMode.AccuracyOnly
    else:
        settings.mode = lg.TestMode.PerformanceOnly

    # set up mlperf logger
    log_path = Path("build", "logs").absolute()
    log_path.mkdir(parents=True, exist_ok=True)
    log_output_settings = lg.LogOutputSettings()
    log_output_settings.outdir = str(log_path)
    log_output_settings.copy_summary_to_stdout = True
    log_settings = lg.LogSettings()
    log_settings.log_output = log_output_settings

    # start running test, from LoadGen
    print("Running Loadgen test...")
    lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings, log_settings)

    # if needed check accuracy
    if args.accuracy:
        print("Checking accuracy...")
        cmd = "python3 accuracy_kits.py"
        subprocess.check_call(cmd, shell=True)

    # all done
    print("Done!")

    # cleanup
    print("Destroying SUT...")
    lg.DestroySUT(sut.sut)
    print("Destroying QSL...")
    lg.DestroyQSL(sut.qsl.qsl)
Exemplo n.º 22
0
def main():

    global so
    global last_timeing
    global last_loaded
    global result_timeing

    args = get_args()

    log.info(args)

    # find backend
    backend = get_backend(args.backend)

    # --count applies to accuracy mode only and can be used to limit the number of images
    # for testing. For perf model we always limit count to 200.
    count_override = False
    count = args.count
    if count:
        count_override = True
    """
    Python signature
    go_initialize(backend, model_path, dataset_path, count, use_gpu, gpu_id, trace_level, max_batchsize)
    """

    count, err = go_initialize(backend, args.model_path, args.dataset_path,
                               count, args.use_gpu, args.gpu_id,
                               args.trace_level, args.max_batchsize)

    if (err != 'nil'):
        print(err)
        raise RuntimeError('initialization in go failed')

    mlperf_conf = os.path.abspath(args.mlperf_conf)
    if not os.path.exists(mlperf_conf):
        log.error("{} not found".format(mlperf_conf))
        sys.exit(1)

    user_conf = os.path.abspath(args.user_conf)
    if not os.path.exists(user_conf):
        log.error("{} not found".format(user_conf))
        sys.exit(1)

    log_dir = None

    if args.log_dir:
        log_dir = os.path.abspath(args.log_dir)
        os.makedirs(log_dir, exist_ok=True)

    scenario = SCENARIO_MAP[args.scenario]

    def issue_queries(query_samples):
        global so
        global last_timeing
        global result_timeing
        idx = np.array([q.index for q in query_samples]).astype(np.int32)
        query_id = [q.id for q in query_samples]
        if args.dataset == 'brats2019':
            start = time.time()
            response_array_refs = []
            response = []
            for i, qid in enumerate(query_id):
                processed_results = so.IssueQuery(1, idx[i][np.newaxis])
                processed_results = json.loads(
                    processed_results.decode('utf-8'))
                response_array = array.array(
                    "B",
                    np.array(processed_results[0], np.float16).tobytes())
                response_array_refs.append(response_array)
                bi = response_array.buffer_info()
                response.append(lg.QuerySampleResponse(qid, bi[0], bi[1]))
            result_timeing.append(time.time() - start)
            lg.QuerySamplesComplete(response)
        else:
            start = time.time()
            processed_results = so.IssueQuery(len(idx), idx)
            result_timeing.append(time.time() - start)
            processed_results = json.loads(processed_results.decode('utf-8'))
            response_array_refs = []
            response = []
            for idx, qid in enumerate(query_id):
                response_array = array.array(
                    "B",
                    np.array(processed_results[idx], np.float32).tobytes())
                response_array_refs.append(response_array)
                bi = response_array.buffer_info()
                response.append(lg.QuerySampleResponse(qid, bi[0], bi[1]))
            lg.QuerySamplesComplete(response)

    def flush_queries():
        pass

    def process_latencies(latencies_ns):
        # called by loadgen to show us the recorded latencies
        global last_timeing
        last_timeing = [t / NANO_SEC for t in latencies_ns]

    def load_query_samples(sample_list):
        global so
        global last_loaded
        err = go_load_query_samples(sample_list, so)
        last_loaded = time.time()
        if (err != ''):
            print(err)
            raise RuntimeError('load query samples failed')

    def unload_query_samples(sample_list):
        global so
        err = go_unload_query_samples(sample_list, so)
        if (err != ''):
            print(err)
            raise RuntimeError('unload query samples failed')

    settings = lg.TestSettings()
    if args.model_name != "":
        settings.FromConfig(mlperf_conf, args.model_name, args.scenario)
        settings.FromConfig(user_conf, args.model_name, args.scenario)
    settings.scenario = scenario
    settings.mode = lg.TestMode.PerformanceOnly
    if args.accuracy:
        settings.mode = lg.TestMode.AccuracyOnly
    if args.find_peak_performance:
        settings.mode = lg.TestMode.FindPeakPerformance

    if args.time:
        # override the time we want to run
        settings.min_duration_ms = args.time * MILLI_SEC
        settings.max_duration_ms = args.time * MILLI_SEC

    if args.qps:
        qps = float(args.qps)
        settings.server_target_qps = qps
        settings.offline_expected_qps = qps

    if count_override:
        settings.min_query_count = count
        settings.max_query_count = count

    if args.samples_per_query:
        settings.multi_stream_samples_per_query = args.samples_per_query
    if args.max_latency:
        settings.server_target_latency_ns = int(args.max_latency * NANO_SEC)
        settings.multi_stream_target_latency_ns = int(args.max_latency *
                                                      NANO_SEC)

    sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
    qsl = lg.ConstructQSL(count, min(count, 500), load_query_samples,
                          unload_query_samples)

    log.info("starting {}".format(scenario))

    log_path = os.path.realpath(args.log_dir)
    log_output_settings = lg.LogOutputSettings()
    log_output_settings.outdir = log_path
    log_output_settings.copy_summary_to_stdout = True
    log_settings = lg.LogSettings()
    log_settings.log_output = log_output_settings
    # log_settings.enable_trace = True
    # lg.StartTest(sut, qsl, settings)
    lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)

    if not last_timeing:
        last_timeing = result_timeing

    if args.accuracy:
        accuracy_script_paths = {
            'coco':
            os.path.realpath(
                '../inference/vision/classification_and_detection/tools/accuracy-coco.py'
            ),
            'imagenet':
            os.path.realpath(
                '../inference/vision/classification_and_detection/tools/accuracy-imagenet.py'
            ),
            'squad':
            os.path.realpath('../inference/language/bert/accuracy-squad.py'),
            'brats2019':
            os.path.realpath(
                '../inference/vision/medical_imaging/3d-unet/accuracy-brats.py'
            ),
        }
        accuracy_script_path = accuracy_script_paths[args.dataset]
        accuracy_file_path = os.path.join(log_dir, 'mlperf_log_accuracy.json')
        data_dir = os.environ['DATA_DIR']
        if args.dataset == 'coco':
            if args.use_inv_map:
                subprocess.check_call(
                    'python3 {} --mlperf-accuracy-file {} --coco-dir {} --use-inv-map'
                    .format(accuracy_script_path, accuracy_file_path,
                            data_dir),
                    shell=True)
            else:
                subprocess.check_call(
                    'python3 {} --mlperf-accuracy-file {} --coco-dir {}'.
                    format(accuracy_script_path, accuracy_file_path, data_dir),
                    shell=True)
        elif args.dataset == 'imagenet':  # imagenet
            subprocess.check_call(
                'python3 {} --mlperf-accuracy-file {} --imagenet-val-file {}'.
                format(accuracy_script_path, accuracy_file_path,
                       os.path.join(data_dir, 'val_map.txt')),
                shell=True)
        elif args.dataset == 'squad':  # squad
            vocab_path = os.path.join(data_dir, 'vocab.txt')
            val_path = os.path.join(data_dir, 'dev-v1.1.json')
            out_path = os.path.join(log_dir, 'predictions.json')
            cache_path = os.path.join(data_dir, 'eval_features.pickle')
            subprocess.check_call(
                'python3 {} --vocab_file {} --val_data {} --log_file {} --out_file {} --features_cache_file {} --max_examples {}'
                .format(accuracy_script_path, vocab_path, val_path,
                        accuracy_file_path, out_path, cache_path, count),
                shell=True)
        elif args.dataset == 'brats2019':  # brats2019
            base_dir = os.path.realpath(
                '../inference/vision/medical_imaging/3d-unet/build')
            post_dir = os.path.join(base_dir, 'postprocessed_data')
            label_dir = os.path.join(
                base_dir,
                'raw_data/nnUNet_raw_data/Task043_BraTS2019/labelsTr')
            os.makedirs(post_dir, exist_ok=True)
            subprocess.check_call(
                'python3 {} --log_file {} --preprocessed_data_dir {} --postprocessed_data_dir {} --label_data_dir {}'
                .format(accuracy_script_path, accuracy_file_path, data_dir,
                        post_dir, label_dir),
                shell=True)
        else:
            raise RuntimeError('Dataset not Implemented.')

    lg.DestroyQSL(qsl)
    lg.DestroySUT(sut)
    """
    Python signature
    go_finalize(so)
    """
    err = go_finalize(so)
    if (err != ''):
        print(err)
        raise RuntimeError('finialize in go failed')
Exemplo n.º 23
0
def run():
  """Runs the offline mode."""
  global last_timing

  # Initiazation
  final_results, count, runner = setup()

  #
  # run the benchmark with timing
  #
  runner.start_pool()

  def issue_query_offline(query_samples):
    """Adds query to the queue."""
    for i in [1]:
      idx = np.array([q.index for q in query_samples])
      query_id = np.array([q.id for q in query_samples])
      batch_size = FLAGS.batch_size[0]
      for i in range(0, len(query_samples), batch_size):
          runner.enqueue(query_id[i:i + batch_size], idx[i:i + batch_size])

  def flush_queries():
    pass

  def process_latencies(latencies_ns):
    global last_timing
    last_timing = [t / 1e9 for t in latencies_ns]

  sut = lg.ConstructSUT(issue_query_offline, flush_queries, process_latencies)

  masters = []

  outdir = FLAGS.outdir if FLAGS.outdir else tempfile.mkdtemp()
  export_outdir = FLAGS.export_outdir if FLAGS.export_outdir else outdir
  export_outdir = os.path.join(export_outdir, "export_model")

  def load_query_samples(sample_list):
    """Load query samples."""
    runner.ds.load_query_samples(sample_list)
    # Find tpu master.
    if FLAGS.num_tpus == 1:
      runner.model.update_qsl(runner.ds.get_image_list_inmemory())
    else:
      for i in range(FLAGS.num_tpus):
        runner.models[i].update_qsl(runner.ds.get_image_list_inmemory())

  def warmup():
    """Warmup the TPUs."""
    load_query_samples([0])
    if FLAGS.num_tpus == 1:
      log.info("warmup ...")
      runner.warmup(0)
      log.info("warmup done")
    else:
      for cloud_tpu_id in range(FLAGS.num_tpus):
        log.info("warmup %d...", cloud_tpu_id)
        runner.warmup(0, cloud_tpu_id)
        log.info("warmup %d done", cloud_tpu_id)

    # After warmup, give the system a moment to quiesce before putting it under
    # load.
    time.sleep(1)

  if FLAGS.num_tpus == 1:
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
        FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
    master = tpu_cluster_resolver.get_master()

    runner.model.build_and_export(
        FLAGS.model,
        export_model_path=export_outdir,
        batch_size=FLAGS.batch_size,
        master=master,
        scenario=FLAGS.scenario)
    runner.model.load(export_model_path=export_outdir, master=master)
  else:
    # Use the first TPU instance to build and export the graph.
    tpu_names = FLAGS.tpu_name
    tpu_names = tpu_names.split(",")
    for tpu_name in tpu_names:
      tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
          tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
      masters.append(tpu_cluster_resolver.get_master())

    runner.models[0].build_and_export(
        FLAGS.model,
        export_model_path=export_outdir,
        batch_size=FLAGS.batch_size,
        master=masters[0],
        scenario=FLAGS.scenario)

    def init_fn(cloud_tpu_id):
      """Init and warmup each cloud tpu."""
      runner.models[cloud_tpu_id].load(
          export_model_path=export_outdir, master=masters[cloud_tpu_id])

    threads = []
    for i in range(FLAGS.num_tpus):
      thread = threading.Thread(target=init_fn, args=(i,))
      threads.append(thread)
      thread.start()

    for thread in threads:
      thread.join()

  warmup()

  qsl = lg.ConstructQSL(count, min(count, 1024), load_query_samples,
                        runner.ds.unload_query_samples)

  test_scenarios = FLAGS.scenario
  if test_scenarios is None:
    test_scenarios_list = []
  else:
    test_scenarios_list = test_scenarios.split(",")

  max_latency = FLAGS.max_latency
  max_latency_list = max_latency.split(",")
  for scenario in test_scenarios_list:
    for target_latency in max_latency_list:
      log.info("starting %s, latency=%s", scenario, target_latency)
      settings = lg.TestSettings()
      log.info(scenario)
      if FLAGS.accuracy:
        settings.mode = lg.TestMode.AccuracyOnly

      settings.scenario = utils.SCENARIO_MAP[scenario]

      if FLAGS.qps:
        qps = float(FLAGS.qps)
        settings.server_target_qps = qps
        settings.offline_expected_qps = qps

      if FLAGS.time:
        settings.min_duration_ms = 60 * MILLI_SEC
        settings.max_duration_ms = 0
        qps = FLAGS.qps or 100
        settings.min_query_count = qps * FLAGS.time
        settings.max_query_count = int(1.1 * qps * FLAGS.time)
      else:
        settings.min_query_count = (1 << 21)

      if FLAGS.time or FLAGS.qps and FLAGS.accuracy:
        settings.mode = lg.TestMode.PerformanceOnly
      # FIXME: add SubmissionRun once available

      target_latency_ns = int(float(target_latency) * (NANO_SEC / MILLI_SEC))
      settings.single_stream_expected_latency_ns = target_latency_ns
      settings.multi_stream_target_latency_ns = target_latency_ns
      settings.server_target_latency_ns = target_latency_ns

      log_settings = lg.LogSettings()
      # TODO(brianderson): figure out how to use internal file path.
      log_settings.log_output.outdir = tempfile.mkdtemp()
      log_settings.log_output.copy_detail_to_stdout = True
      log_settings.log_output.copy_summary_to_stdout = True
      log_settings.enable_trace = False

      result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
      runner.start_run(result_dict, FLAGS.accuracy)

      lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)

      if FLAGS.accuracy:
        runner.get_post_process().finalize(result_dict, runner.ds)

      utils.add_results(
          final_results, "{}-{}".format(scenario, target_latency),
          result_dict, last_timing,
          time.time() - runner.ds.last_loaded)

  #
  # write final results
  #
  if FLAGS.outdir:
    outfile = os.path.join(FLAGS.outdir, "results.txt")
    with tf.gfile.Open(outfile, "w") as f:
      json.dump(final_results, f, sort_keys=True, indent=4)
  else:
    json.dump(final_results, sys.stdout, sort_keys=True, indent=4)

  runner.finish()
  lg.DestroyQSL(qsl)
  lg.DestroySUT(sut)
Exemplo n.º 24
0
def main(argv):
    del argv

    settings = mlperf_loadgen.TestSettings()
    settings.qsl_rng_seed = FLAGS.qsl_rng_seed
    settings.sample_index_rng_seed = FLAGS.sample_index_rng_seed
    settings.schedule_rng_seed = FLAGS.schedule_rng_seed
    if FLAGS.accuracy_mode:
        settings.mode = mlperf_loadgen.TestMode.AccuracyOnly
    else:
        settings.mode = mlperf_loadgen.TestMode.PerformanceOnly
    settings.scenario = SCENARIO_MAP[FLAGS.scenario]
    if FLAGS.qps:
        qps = float(FLAGS.qps)
        settings.server_target_qps = qps
        settings.offline_expected_qps = qps

    if FLAGS.scenario == "Offline" or FLAGS.scenario == "Server":
        masters = FLAGS.master
        masters = masters.split(",")
        if len(masters) < 1:
            masters = [FLAGS.master]

        runner = loadgen_gnmt.GNMTRunner(input_file=FLAGS.input_file,
                                         ckpt_path=FLAGS.ckpt_path,
                                         hparams_path=FLAGS.hparams_path,
                                         vocab_prefix=FLAGS.vocab_prefix,
                                         outdir=FLAGS.outdir,
                                         batch_size=FLAGS.batch_size,
                                         verbose=FLAGS.verbose,
                                         masters=masters,
                                         scenario=FLAGS.scenario)

        runner.load(FLAGS.batch_timeout_micros)

        # Specify exactly how many queries need to be made
        settings.min_query_count = FLAGS.qps * FLAGS.time
        settings.max_query_count = 0
        settings.min_duration_ms = 60 * MILLI_SEC
        settings.max_duration_ms = 0
        settings.server_target_latency_ns = int(0.25 * NANO_SEC)
        settings.server_target_latency_percentile = 0.97

    else:
        print("Invalid scenario selected")
        assert False

    # Create a thread in the GNMTRunner to start accepting work
    runner.start_worker()

    # Maximum sample ID + 1
    total_queries = FLAGS.query_count
    # Select the same subset of $perf_queries samples
    perf_queries = FLAGS.query_count

    sut = mlperf_loadgen.ConstructSUT(runner.enqueue, flush_queries,
                                      generic_loadgen.process_latencies)
    qsl = mlperf_loadgen.ConstructQSL(total_queries, perf_queries,
                                      runner.load_samples_to_ram,
                                      runner.unload_samples_from_ram)

    log_settings = mlperf_loadgen.LogSettings()
    log_settings.log_output.outdir = tempfile.mkdtemp()
    # Disable detail logs to prevent it from stepping on the summary
    # log in stdout on some systems.
    log_settings.log_output.copy_detail_to_stdout = False
    log_settings.log_output.copy_summary_to_stdout = True
    log_settings.enable_trace = False
    mlperf_loadgen.StartTestWithLogSettings(sut, qsl, settings, log_settings)

    runner.finish()
    mlperf_loadgen.DestroyQSL(qsl)
    mlperf_loadgen.DestroySUT(sut)

    for oldfile in tf.gfile.Glob(
            os.path.join(log_settings.log_output.outdir, "*")):
        basename = os.path.basename(oldfile)
        newfile = os.path.join(FLAGS.outdir, basename)
        tf.gfile.Copy(oldfile, newfile, overwrite=True)

    if FLAGS.accuracy_mode:
        log_accuracy = os.path.join(log_settings.log_output.outdir,
                                    "mlperf_log_accuracy.json")
        tf.gfile.Copy(FLAGS.reference, "/tmp/reference")
        bleu = process_accuracy.get_accuracy("/tmp/reference", log_accuracy)
        print("BLEU: %.2f" % (bleu * 100))  # pylint: disable=superfluous-parens