def get_backend(backend):
    if backend == "tensorflow":
        from backend_tf import BackendTensorflow
        backend = BackendTensorflow()
    elif backend == "onnxruntime":
        from backend_onnxruntime import BackendOnnxruntime
        backend = BackendOnnxruntime()
    elif backend == "null":
        from backend_null import BackendNull
        backend = BackendNull()
    elif backend == "pytorch":
        from backend_pytorch import BackendPytorch
        backend = BackendPytorch()
    elif backend == "pytorch-native":
        from backend_pytorch_native import BackendPytorchNative
        backend = BackendPytorchNative()      
    elif backend == "tflite":
        from backend_tflite import BackendTflite
        backend = BackendTflite()
    elif backend == "tflite-calibrate":
        from backend_tflite_calibrate import BackendTflite
        backend = BackendTflite()
    elif backend == "tflite-ncore":
        from backend_tflite_ncore import BackendTfliteNcore
        backend = BackendTfliteNcore()
    elif backend == "tflite-ncore-offline-imagenet":
        from backend_tflite_ncore_offline_imagenet import BackendTfliteNcoreOfflineImagenet
        backend = BackendTfliteNcoreOfflineImagenet()
    elif backend == "tflite-ncore-offline-ssd":
        from backend_tflite_ncore_offline_ssd import BackendTfliteNcoreOfflineSSD
        backend = BackendTfliteNcoreOfflineSSD()
    else:
        raise ValueError("unknown backend: " + backend)
    return backend
예제 #2
0
파일: main.py 프로젝트: prime91/inference
def get_backend(backend):
    if backend == "tensorflow":
        from backend_tf import BackendTensorflow
        backend = BackendTensorflow()
    elif backend == "onnxruntime":
        from backend_onnxruntime import BackendOnnxruntime
        backend = BackendOnnxruntime()
    elif backend == "null":
        from backend_null import BackendNull
        backend = BackendNull()
    elif backend == "pytorch":
        from backend_pytorch import BackendPytorch
        backend = BackendPytorch()
    elif backend == "pytorch-native":
        from backend_pytorch_native import BackendPytorchNative
        backend = BackendPytorchNative()
    elif backend == "tflite":
        from backend_tflite import BackendTflite
        backend = BackendTflite()
    elif backend == "tvm":
        from backend_tvm import BackendTvm
        backend = BackendTvm()
    else:
        raise ValueError("unknown backend: " + backend)
    return backend
예제 #3
0
def get_backend(backend):
    if backend == "tensorflow":
        from backend_tf import BackendTensorflow
        backend = BackendTensorflow()
    elif backend == "null":
        from backend_null import BackendNull
        backend = BackendNull()
    elif backend == "openvino":
        from backend_openvino import BackendOpenvino
        backend = BackendOpenvino()
    else:
        raise ValueError("unknown backend: " + backend)
    return backend
예제 #4
0
파일: backendbase.py 프로젝트: Ascend/tools
def create_backend_instance(backend_type, args):
    if backend_type == "tensorflow":
        from backend_tf import BackendTensorflow
        backend = BackendTensorflow(model_path,
                                    batchsize,
                                    inputs=None,
                                    outputs=None)
        backend.load(model_path, inputs=args.inputs, outputs=args.outputs)
    elif backend_type == "onnxruntime":
        from backend.onnx_backend import BackendOnnxruntime
        backend = BackendOnnxruntime(args.batchsize)
    elif backend_type == "null":
        from backend_null import BackendNull
        backend = BackendNull()
    elif backend_type == "acl":
        from backend.acl_backend import BackendAcl
        backend = BackendAcl(args.batchsize)
        backend.load(args.model,
                     inputs=args.inputs,
                     outputs=args.outputs,
                     device_id=args.device_id)
    else:
        raise ValueError("unknown backend: ", backend_type)
    return backend
예제 #5
0
def get_backend(backend, dataset_path, dataset_calibration_list):
    if backend == "tensorflow":
        from backend_tf import BackendTensorflow
        backend = BackendTensorflow()
    elif backend == "onnxruntime":
        from backend_onnxruntime import BackendOnnxruntime
        backend = BackendOnnxruntime()
    elif backend == "null":
        from backend_null import BackendNull
        backend = BackendNull()
    elif backend == "pytorch":
        from backend_pytorch import BackendPytorch
        backend = BackendPytorch()
    elif backend == "pytorch-native":
        from backend_pytorch_native import BackendPytorchNative
        backend = BackendPytorchNative()
    elif backend == "pytorch-jit-traced":
        from backend_pytorch_jit_traced import BackendPytorchJITTraced
        backend = BackendPytorchJITTraced()
    elif backend == "pytorch-fp32":
        from backend_pytorch_fp32 import BackendPytorchFP32
        backend = BackendPytorchFP32()
    elif backend == "pytorch-ssd-jit-traced":
        from backend_pytorch_ssd_jit_traced import BackendPytorchSSDJITTraced
        backend = BackendPytorchSSDJITTraced()
    elif backend == "pytorch-yolov3-jit-traced":
        from backend_pytorch_yolov3_jit_traced import BackendPytorchYOLOv3JITTraced
        backend = BackendPytorchYOLOv3JITTraced()
    elif backend == "pytorch-yolov3-fp32":
        from backend_pytorch_yolov3_fp32 import BackendPytorchYOLOv3FP32
        backend = BackendPytorchYOLOv3FP32()
    elif backend == "tflite":
        from backend_tflite import BackendTflite
        backend = BackendTflite()
    elif backend == "edgecortix":
        from backend_edgecortix import BackendEdgecortix
        backend = BackendEdgecortix(dataset_path, dataset_calibration_list)
    else:
        raise ValueError("unknown backend: " + backend)
    return backend
예제 #6
0
        from backend_pytorch import BackendPytorch
        backend = BackendPytorch()
    elif backend == "pytorch-native":
        from backend_pytorch_native import BackendPytorchNative
        backend = BackendPytorchNative()      
    elif backend == "tflite":
        from backend_tflite import BackendTflite
        backend = BackendTflite()
    else:
        raise ValueError("unknown backend: " + backend)
    return backend
#
# def main_model():
#     main_model.args = get_args()

backend = BackendTensorflow()
def load_model():
    global backend
    # find backend
    backend = get_backend("tensorflow")

    # load model to backend
    t1 = time.time()
    model = backend.load("/model/resnet34_tf.22.1.pb", inputs=['image:0'], outputs=['detection_bboxes:0','detection_classes:0','detection_scores:0'])
    t2 = time.time()
    print("load model time= %f" % (t2 - t1))



    #
    # make one pass over the dataset to validate accuracy
def get_backend(backend):
    if backend == "tensorflow":
        from backend_tf import BackendTensorflow
        backend = BackendTensorflow()
    elif backend == "onnxruntime":
        from backend_onnxruntime import BackendOnnxruntime
        backend = BackendOnnxruntime()
    elif backend == "null":
        from backend_null import BackendNull
        backend = BackendNull()
    elif backend == "pytorch":
        from backend_pytorch import BackendPytorch
        backend = BackendPytorch()
    elif backend == "pytorch-native":
        from backend_pytorch_native import BackendPytorchNative
        backend = BackendPytorchNative()
    elif backend == "pytorch-centaur":
        from backend_pytorch_centaur import BackendPytorchCentaur
        backend = BackendPytorchCentaur()
    elif backend == "pytorch-native-calibrate":
        from backend_pytorch_native_calibrate import BackendPytorchNativeCalibrate
        backend = BackendPytorchNativeCalibrate()
    elif backend == "tflite":
        from backend_tflite import BackendTflite
        backend = BackendTflite()
    elif backend == "tflite-calibrate":
        from backend_tflite_calibrate import BackendTflite
        backend = BackendTflite()
    elif backend == "tflite-ncore":
        from backend_tflite_ncore import BackendTfliteNcore
        backend = BackendTfliteNcore()
    elif backend == "tflite-ncore-mobilenet":
        from backend_libncoretflite import BackendTfliteNcoreMobileNetV1
        backend = BackendTfliteNcoreMobileNetV1()
        backend.inputs = ["image_tensor:0"]
    elif backend == "tflite-ncore-resnet":
        from backend_libncoretflite import BackendTfliteNcoreResnet
        backend = BackendTfliteNcoreResnet()
        backend.inputs = ["image_tensor:0"]
    elif backend == "tflite-ncore-ssd":
        from backend_libncoretflite import BackendTfliteNcoreSSD
        backend = BackendTfliteNcoreSSD()
        backend.inputs = ["image_tensor:0"]
    elif backend == "tflite-ncore-mobilenet-offline":
        from backend_libncoretflite import BackendTfliteNcoreMobileNetV1Offline
        backend = BackendTfliteNcoreMobileNetV1Offline()
        backend.inputs = ["image_tensor:0"]
    elif backend == "tflite-ncore-resnet-offline":
        from backend_libncoretflite import BackendTfliteNcoreResnetOffline
        backend = BackendTfliteNcoreResnetOffline()
        backend.inputs = ["image_tensor:0"]
    elif backend == "tflite-ncore-ssd-offline":
        from backend_libncoretflite import BackendTfliteNcoreSSDOffline
        backend = BackendTfliteNcoreSSDOffline()
        backend.inputs = ["image_tensor:0"]
    else:
        raise ValueError("unknown backend: " + backend)
    return backend
예제 #8
0
def main(argv):
    del argv

    global last_timeing

    if FLAGS.scenario == "Server":
        # Disable garbage collection for realtime performance.
        gc.disable()

    # define backend
    backend = BackendTensorflow()

    # override image format if given
    image_format = FLAGS.data_format if FLAGS.data_format else backend.image_format(
    )

    # dataset to use
    wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[
        FLAGS.dataset]
    ds = wanted_dataset(data_path=FLAGS.dataset_path,
                        image_list=FLAGS.dataset_list,
                        name=FLAGS.dataset,
                        image_format=image_format,
                        use_cache=FLAGS.cache,
                        count=FLAGS.count,
                        cache_dir=FLAGS.cache_dir,
                        annotation_file=FLAGS.annotation_file,
                        use_space_to_depth=FLAGS.use_space_to_depth)
    # load model to backend
    # TODO(wangtao): parse flags to params.
    params = dict(ssd_model.default_hparams().values())
    params["conv0_space_to_depth"] = FLAGS.use_space_to_depth
    params["use_bfloat16"] = FLAGS.use_bfloat16
    params["use_fused_bn"] = FLAGS.use_fused_bn

    masters = []
    tpu_names = FLAGS.tpu_name
    tpu_names = tpu_names.split(",")
    for tpu_name in tpu_names:
        tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
            tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        masters.append(tpu_cluster_resolver.get_master())

    #
    # make one pass over the dataset to validate accuracy
    #
    count = FLAGS.count if FLAGS.count else ds.get_item_count()

    #
    # warmup
    #
    log.info("warmup ...")

    batch_size = FLAGS.batch_size[0] if FLAGS.scenario == "Offline" else 1
    backend_lists = []
    for _ in range(len(tpu_names)):
        backend = BackendTensorflow()
        backend_lists.append(backend)
    runner = QueueRunner(backend_lists,
                         ds,
                         FLAGS.threads,
                         post_proc=post_proc,
                         max_batchsize=batch_size)

    runner.start_run({}, FLAGS.accuracy)

    def issue_queries(query_samples):
        for i in [1]:
            runner.enqueue(query_samples)

    def flush_queries():
        pass

    def process_latencies(latencies_ns):
        # called by loadgen to show us the recorded latencies
        global last_timeing
        last_timeing = [t / NANO_SEC for t in latencies_ns]

    tf.logging.info("starting {}, latency={}".format(FLAGS.scenario,
                                                     FLAGS.max_latency))
    settings = lg.TestSettings()
    tf.logging.info(FLAGS.scenario)
    settings.scenario = SCENARIO_MAP[FLAGS.scenario]
    settings.qsl_rng_seed = FLAGS.qsl_rng_seed
    settings.sample_index_rng_seed = FLAGS.sample_index_rng_seed
    settings.schedule_rng_seed = FLAGS.schedule_rng_seed

    if FLAGS.accuracy:
        settings.mode = lg.TestMode.AccuracyOnly
    else:
        settings.mode = lg.TestMode.PerformanceOnly

    if FLAGS.qps:
        qps = float(FLAGS.qps)
        settings.server_target_qps = qps
        settings.offline_expected_qps = qps

    if FLAGS.time:
        settings.min_duration_ms = FLAGS.time * MILLI_SEC
        settings.max_duration_ms = 0
        qps = FLAGS.qps or 100
        settings.min_query_count = qps * FLAGS.time
        settings.max_query_count = 0
    else:
        settings.min_query_count = 270336
        settings.max_query_count = 0

    target_latency_ns = int(float(FLAGS.max_latency) * NANO_SEC)
    settings.single_stream_expected_latency_ns = target_latency_ns
    settings.multi_stream_target_latency_ns = target_latency_ns
    settings.server_target_latency_ns = target_latency_ns

    log_settings = lg.LogSettings()
    log_settings.log_output.outdir = tempfile.mkdtemp()
    log_settings.log_output.copy_detail_to_stdout = True
    log_settings.log_output.copy_summary_to_stdout = True
    log_settings.enable_trace = False

    def load_query_samples(sample_list):
        """Load query samples and warmup the model."""
        ds.load_query_samples(sample_list)
        data = ds.get_image_list_inmemory()

        def init_fn(cloud_tpu_id):
            tf.logging.info("Load model for %dth cloud tpu", cloud_tpu_id)
            runner.models[cloud_tpu_id].load(
                FLAGS.model,
                FLAGS.output_model_dir,
                data,
                params,
                batch_size=FLAGS.batch_size,
                master=masters[cloud_tpu_id],
                scenario=FLAGS.scenario,
                batch_timeout_micros=FLAGS.batch_timeout_micros)

            # Init TPU.
            for it in range(FLAGS.init_iterations):
                tf.logging.info("Initialize cloud tpu at iteration %d", it)
                for batch_size in FLAGS.batch_size:
                    example, _ = ds.get_indices([sample_list[0]] * batch_size)
                    _ = runner.models[cloud_tpu_id].predict(example)

        threads = []
        for i in range(len(tpu_names)):
            thread = threading.Thread(target=init_fn, args=(i, ))
            threads.append(thread)
            thread.start()

        for thread in threads:
            thread.join()

    sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
    qsl = lg.ConstructQSL(count, min(count, 350), load_query_samples,
                          ds.unload_query_samples)

    lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)

    runner.finish()
    lg.DestroyQSL(qsl)
    lg.DestroySUT(sut)

    tf.io.gfile.mkdir(FLAGS.outdir)

    for oldfile in tf.gfile.Glob(
            os.path.join(log_settings.log_output.outdir, "*")):
        basename = os.path.basename(oldfile)
        newfile = os.path.join(FLAGS.outdir, basename)
        tf.gfile.Copy(oldfile, newfile, overwrite=True)

    if FLAGS.accuracy:
        with tf.gfile.Open(os.path.join(FLAGS.outdir, "results.txt"),
                           "w") as f:
            results = {"mAP": accuracy_coco.main()}
            json.dump(results, f, sort_keys=True, indent=4)