Esempio n. 1
0
def get_remote_model_weight(model: ModelBO):
    """Download a local cache of model from remote ModelDB in a structured path. And generate a configuration file.
    TODO(lym):
        1. set force insert config.pbtxt
        2. set other options in generation of config.pbtxt (e.g. max batch size, instance group...)
    This function will keep a local cache of the used model in the path:
        `~/.modelci/<architecture_name>/<framework>-<engine>/<task>/<version>`
    Arguments:
        model (ModelBO): Model business object.
    Return:
        Path: Model saved path.
    """
    save_path = model.saved_path

    save_path.parent.mkdir(exist_ok=True, parents=True)

    if not save_path.exists():
        with open(str(save_path), 'wb') as f:
            f.write(model.weight.weight)
        if model.engine == Engine.TFS:
            subprocess.call(['unzip', save_path, '-d', '/'])
            os.remove(save_path)
        elif model.engine == Engine.TRT:
            subprocess.call(['unzip', save_path, '-d', '/'])
            os.remove(save_path)

            TRTConverter.generate_trt_config(
                save_path.
                parent,  # ~/.modelci/<model-arch-name>/<framework>-<engine>/<task>/
                inputs=model.inputs,
                outputs=model.outputs,
                arch_name=model.name,
                platform=TensorRTPlatform.TENSORFLOW_SAVEDMODEL)

    return save_path
Esempio n. 2
0
def _generate_model_family(model,
                           model_name: str,
                           task: Task,
                           framework: Framework,
                           filename: str,
                           inputs: List[IOShape],
                           model_input: Optional[List] = None,
                           outputs: List[IOShape] = None,
                           max_batch_size: int = -1):
    generated_dir_list = list()
    generate_this_path = partial(generate_path,
                                 task=task,
                                 model_name=model_name,
                                 framework=framework,
                                 version=filename)
    torchscript_dir = generate_this_path(engine=Engine.TORCHSCRIPT)
    tfs_dir = generate_this_path(engine=Engine.TFS)
    onnx_dir = generate_this_path(engine=Engine.ONNX)
    trt_dir = generate_this_path(engine=Engine.TRT)

    if framework == Framework.PYTORCH:
        # to TorchScript
        if TorchScriptConverter.from_torch_module(model, torchscript_dir):
            generated_dir_list.append(torchscript_dir.with_suffix('.zip'))

        # to ONNX, TODO(lym): batch cache, input shape, opset version
        if ONNXConverter.from_torch_module(model,
                                           onnx_dir,
                                           inputs,
                                           outputs,
                                           model_input,
                                           optimize=False):
            generated_dir_list.append(onnx_dir.with_suffix('.onnx'))

        # to TRT
        # TRTConverter.from_onnx(
        #     onnx_path=onnx_dir.with_suffix('.onnx'), save_path=trt_dir, inputs=inputs, outputs=outputs
        # )
        return generated_dir_list
    elif framework == Framework.TENSORFLOW:
        # to TFS
        TFSConverter.from_tf_model(model, tfs_dir)
        generated_dir_list.append(tfs_dir.with_suffix('.zip'))

        # to TRT
        TRTConverter.from_saved_model(tfs_dir,
                                      trt_dir,
                                      inputs,
                                      outputs,
                                      max_batch_size=32)
        generated_dir_list.append(trt_dir.with_suffix('.zip'))

    return generated_dir_list