def get_backend(backend):
    if backend == "tensorflow":
        from backend_tf import BackendTensorflow
        backend = BackendTensorflow()
    elif backend == "onnxruntime":
        from backend_onnxruntime import BackendOnnxruntime
        backend = BackendOnnxruntime()
    elif backend == "null":
        from backend_null import BackendNull
        backend = BackendNull()
    elif backend == "pytorch":
        from backend_pytorch import BackendPytorch
        backend = BackendPytorch()
    elif backend == "pytorch-native":
        from backend_pytorch_native import BackendPytorchNative
        backend = BackendPytorchNative()      
    elif backend == "tflite":
        from backend_tflite import BackendTflite
        backend = BackendTflite()
    elif backend == "tflite-calibrate":
        from backend_tflite_calibrate import BackendTflite
        backend = BackendTflite()
    elif backend == "tflite-ncore":
        from backend_tflite_ncore import BackendTfliteNcore
        backend = BackendTfliteNcore()
    elif backend == "tflite-ncore-offline-imagenet":
        from backend_tflite_ncore_offline_imagenet import BackendTfliteNcoreOfflineImagenet
        backend = BackendTfliteNcoreOfflineImagenet()
    elif backend == "tflite-ncore-offline-ssd":
        from backend_tflite_ncore_offline_ssd import BackendTfliteNcoreOfflineSSD
        backend = BackendTfliteNcoreOfflineSSD()
    else:
        raise ValueError("unknown backend: " + backend)
    return backend
예제 #2
0
파일: main.py 프로젝트: prime91/inference
def get_backend(backend):
    if backend == "tensorflow":
        from backend_tf import BackendTensorflow
        backend = BackendTensorflow()
    elif backend == "onnxruntime":
        from backend_onnxruntime import BackendOnnxruntime
        backend = BackendOnnxruntime()
    elif backend == "null":
        from backend_null import BackendNull
        backend = BackendNull()
    elif backend == "pytorch":
        from backend_pytorch import BackendPytorch
        backend = BackendPytorch()
    elif backend == "pytorch-native":
        from backend_pytorch_native import BackendPytorchNative
        backend = BackendPytorchNative()
    elif backend == "tflite":
        from backend_tflite import BackendTflite
        backend = BackendTflite()
    elif backend == "tvm":
        from backend_tvm import BackendTvm
        backend = BackendTvm()
    else:
        raise ValueError("unknown backend: " + backend)
    return backend
def get_backend(backend):
    if backend == "tensorflow":
        from backend_tf import BackendTensorflow
        backend = BackendTensorflow()
    elif backend == "onnxruntime":
        from backend_onnxruntime import BackendOnnxruntime
        backend = BackendOnnxruntime()
    elif backend == "null":
        from backend_null import BackendNull
        backend = BackendNull()
    elif backend == "pytorch":
        from backend_pytorch import BackendPytorch
        backend = BackendPytorch()
    elif backend == "pytorch-native":
        from backend_pytorch_native import BackendPytorchNative
        backend = BackendPytorchNative()
    elif backend == "pytorch-centaur":
        from backend_pytorch_centaur import BackendPytorchCentaur
        backend = BackendPytorchCentaur()
    elif backend == "pytorch-native-calibrate":
        from backend_pytorch_native_calibrate import BackendPytorchNativeCalibrate
        backend = BackendPytorchNativeCalibrate()
    elif backend == "tflite":
        from backend_tflite import BackendTflite
        backend = BackendTflite()
    elif backend == "tflite-calibrate":
        from backend_tflite_calibrate import BackendTflite
        backend = BackendTflite()
    elif backend == "tflite-ncore":
        from backend_tflite_ncore import BackendTfliteNcore
        backend = BackendTfliteNcore()
    elif backend == "tflite-ncore-mobilenet":
        from backend_libncoretflite import BackendTfliteNcoreMobileNetV1
        backend = BackendTfliteNcoreMobileNetV1()
        backend.inputs = ["image_tensor:0"]
    elif backend == "tflite-ncore-resnet":
        from backend_libncoretflite import BackendTfliteNcoreResnet
        backend = BackendTfliteNcoreResnet()
        backend.inputs = ["image_tensor:0"]
    elif backend == "tflite-ncore-ssd":
        from backend_libncoretflite import BackendTfliteNcoreSSD
        backend = BackendTfliteNcoreSSD()
        backend.inputs = ["image_tensor:0"]
    elif backend == "tflite-ncore-mobilenet-offline":
        from backend_libncoretflite import BackendTfliteNcoreMobileNetV1Offline
        backend = BackendTfliteNcoreMobileNetV1Offline()
        backend.inputs = ["image_tensor:0"]
    elif backend == "tflite-ncore-resnet-offline":
        from backend_libncoretflite import BackendTfliteNcoreResnetOffline
        backend = BackendTfliteNcoreResnetOffline()
        backend.inputs = ["image_tensor:0"]
    elif backend == "tflite-ncore-ssd-offline":
        from backend_libncoretflite import BackendTfliteNcoreSSDOffline
        backend = BackendTfliteNcoreSSDOffline()
        backend.inputs = ["image_tensor:0"]
    else:
        raise ValueError("unknown backend: " + backend)
    return backend
예제 #4
0
def get_backend(backend, dataset_path, dataset_calibration_list):
    if backend == "tensorflow":
        from backend_tf import BackendTensorflow
        backend = BackendTensorflow()
    elif backend == "onnxruntime":
        from backend_onnxruntime import BackendOnnxruntime
        backend = BackendOnnxruntime()
    elif backend == "null":
        from backend_null import BackendNull
        backend = BackendNull()
    elif backend == "pytorch":
        from backend_pytorch import BackendPytorch
        backend = BackendPytorch()
    elif backend == "pytorch-native":
        from backend_pytorch_native import BackendPytorchNative
        backend = BackendPytorchNative()
    elif backend == "pytorch-jit-traced":
        from backend_pytorch_jit_traced import BackendPytorchJITTraced
        backend = BackendPytorchJITTraced()
    elif backend == "pytorch-fp32":
        from backend_pytorch_fp32 import BackendPytorchFP32
        backend = BackendPytorchFP32()
    elif backend == "pytorch-ssd-jit-traced":
        from backend_pytorch_ssd_jit_traced import BackendPytorchSSDJITTraced
        backend = BackendPytorchSSDJITTraced()
    elif backend == "pytorch-yolov3-jit-traced":
        from backend_pytorch_yolov3_jit_traced import BackendPytorchYOLOv3JITTraced
        backend = BackendPytorchYOLOv3JITTraced()
    elif backend == "pytorch-yolov3-fp32":
        from backend_pytorch_yolov3_fp32 import BackendPytorchYOLOv3FP32
        backend = BackendPytorchYOLOv3FP32()
    elif backend == "tflite":
        from backend_tflite import BackendTflite
        backend = BackendTflite()
    elif backend == "edgecortix":
        from backend_edgecortix import BackendEdgecortix
        backend = BackendEdgecortix(dataset_path, dataset_calibration_list)
    else:
        raise ValueError("unknown backend: " + backend)
    return backend
예제 #5
0
파일: main.py 프로젝트: grant7788/inference
def get_backend(backend, dataset, max_ind_range, data_sub_sample_rate, use_gpu):

    if backend == "pytorch-native":
        from backend_pytorch_native import BackendPytorchNative
        # NOTE: pass model parameters here, the following options are available
        if dataset == "kaggle":
            # 1. Criteo Kaggle Display Advertisement Challenge Dataset (see ./bench/dlrm_s_criteo_kaggle.sh)
            backend = BackendPytorchNative(
                m_spa=16,
                ln_emb=np.array([1460,583,10131227,2202608,305,24,12517,633,3,93145,5683,8351593,3194,27,14992,5461306,10,5652,2173,4,7046547,18,15,286181,105,142572]),
                ln_bot=np.array([13,512,256,64,16]),
                ln_top=np.array([367,512,256,1]),
                use_gpu=use_gpu
            )
        elif dataset == "terabyte":
            if max_ind_range == 10000000:
                # 2. Criteo Terabyte (see ./bench/dlrm_s_criteo_terabyte.sh [--sub-sample=0.875] --max-in-range=10000000)
                backend = BackendPytorchNative(
                    m_spa=64,
                    ln_emb=np.array([9980333,36084,17217,7378,20134,3,7112,1442,61, 9758201,1333352,313829,10,2208,11156,122,4,970,14, 9994222, 7267859, 9946608,415421,12420,101, 36]),
                    ln_bot=np.array([13,512,256,64]),
                    ln_top=np.array([415,512,512,256,1]),
                    use_gpu=use_gpu
                )
            elif max_ind_range == 40000000:
                # 3. Criteo Terabyte MLPerf training (see ./bench/run_and_time.sh --max-in-range=40000000)
                backend = BackendPytorchNative(
                    m_spa=128,
                    ln_emb=np.array([39884406,39043,17289,7420,20263,3,7120,1543,63,38532951,2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36]),
                    ln_bot=np.array([13,512,256,128]),
                    ln_top=np.array([479,1024,1024,512,256,1]),
                    use_gpu=use_gpu
                )
            else:
                raise ValueError("only --max-in-range 10M or 40M is supported")
        else:
            raise ValueError("only kaggle|terabyte dataset options are supported")

    elif backend == "onnxruntime":
        from backend_onnxruntime import BackendOnnxruntime

        # NOTE: pass model parameters here, the following options are available
        if dataset == "kaggle":
            # 1. Criteo Kaggle Display Advertisement Challenge Dataset (see ./bench/dlrm_s_criteo_kaggle.sh)
            backend = BackendOnnxruntime(
                m_spa=16,
                ln_emb=np.array([1460,583,10131227,2202608,305,24,12517,633,3,93145,5683,8351593,3194,27,14992,5461306,10,5652,2173,4,7046547,18,15,286181,105,142572]),
                ln_bot=np.array([13,512,256,64,16]),
                ln_top=np.array([367,512,256,1]),
                use_gpu=use_gpu
            )
        elif dataset == "terabyte":
            if max_ind_range == 10000000:
                # 2. Criteo Terabyte (see ./bench/dlrm_s_criteo_terabyte.sh [--sub-sample=0.875] --max-in-range=10000000)
                backend = BackendOnnxruntime(
                    m_spa=64,
                    ln_emb=np.array([9980333,36084,17217,7378,20134,3,7112,1442,61, 9758201,1333352,313829,10,2208,11156,122,4,970,14, 9994222, 7267859, 9946608,415421,12420,101, 36]),
                    ln_bot=np.array([13,512,256,64]),
                    ln_top=np.array([415,512,512,256,1]),
                    use_gpu=use_gpu
                )
            elif max_ind_range == 40000000:
                # 3. Criteo Terabyte MLPerf training (see ./bench/run_and_time.sh --max-in-range=40000000)
                backend = BackendOnnxruntime(
                    m_spa=128,
                    ln_emb=np.array([39884406,39043,17289,7420,20263,3,7120,1543,63,38532951,2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36]),
                    ln_bot=np.array([13,512,256,128]),
                    ln_top=np.array([479,1024,1024,512,256,1]),
                    use_gpu=use_gpu
                )
            else:
                raise ValueError("only --max-in-range 10M or 40M is supported")
        else:
            raise ValueError("only kaggle|terabyte dataset options are supported")

    else:
        raise ValueError("unknown backend: " + backend)
    return backend