Пример #1
0
def main():

    cfg.merge_from_file(args.config)

    model = ModelBuilder()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    model = ModelBuilder()

    model = load_pretrain(model, args.snapshot)

    model.eval().to(device)

    backbone_net = model.backbone

    head_net = model.ban_head

    # backbone input-xf
    backbone_x = torch.randn([1, 3, 255, 255], device=device)
    export_onnx_file_path = './models/onnx/nanotrack_backbone.onnx'
    torch.onnx.export(backbone_net,
                      backbone_x,
                      export_onnx_file_path,
                      input_names=['input'],
                      output_names=['output'],
                      verbose=True)

    # head  change forward  /media/dell/Data/NanoTrack/nanotrack/models/model_builder.py
    head_zf, head_xf = torch.randn([1, 48, 8, 8],
                                   device=device), torch.randn([1, 48, 16, 16],
                                                               device=device)
    export_onnx_file_path = './models/onnx/nanotrack_head.onnx'
    torch.onnx.export(head_net, (head_zf, head_xf),
                      export_onnx_file_path,
                      input_names=['input1', 'input2'],
                      output_names=['output1', 'output2'],
                      verbose=True)

    # 模型简化,否则onnx转换成ncnn会报错

    # """
    # 命令行: python3 -m  onnxsim   input_your_mode_name  output_onnx_model
    # github: github.com/daquexian/onnx-simplifier
    # """s
    import onnx
    from onnxsim import simplify  # if no module named 'onnxsim' , you should run pip install onnx-simplifier  in  terminal

    filename = './models/onnx/nanotrack_backbone_sim.onnx'
    simplified_model, check = simplify(
        './models/onnx/nanotrack_backbone.onnx',
        skip_fuse_bn=False)  #跳过融合BN层,pytorch高版本融合bn层会出错,这里设置不起作用
    onnx.save_model(simplified_model, filename)

    filename = './models/onnx/nanotrack_head_sim.onnx'
    simplified_model, check = simplify(
        './models/onnx/nanotrack_head.onnx',
        skip_fuse_bn=False)  #跳过融合BN层,pytorch高版本融合bn层会出错,这里设置不起作用
    onnx.save_model(simplified_model, filename)
Пример #2
0
    def exportNCNN(self, output_ncnn_file=None):
        if not self.conf.ncnn_model_dir:
            return

        if not self.conf.onnx2ncnn:
            LOG.logE(
                "You must set the onnx2ncnn executable program path in config file. If you want to compile onnx2ncnn tools, reference https://github.com/Tencent/ncnn/wiki/how-to-build#build-for-linux-x86 ",
                exit=True)

        if output_ncnn_file is None:
            output_ncnn_file = self.conf.ncnn_model_dir

        self.ncnn_arch_dir = '{}.param'.format(output_ncnn_file)
        try:
            import onnx
            from onnxsim import simplify
        except:
            LOG.logE(
                "You must install onnx and onnxsim package if you want to convert pytorch to ncnn."
            )

        if not self.conf.onnx_model_dir:
            f = tempfile.NamedTemporaryFile()
            self.conf.onnx_model_dir = f.name

        self.exportONNX()

        cmd = self.conf.onnx2ncnn + " " + self.conf.onnx_model_dir + " " + self.conf.ncnn_arch_dir + " " + output_ncnn_file
        pd = subprocess.Popen(cmd,
                              shell=True,
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE)
        if pd.stderr.read() != b"":
            LOG.logE(
                pd.stderr.read() +
                b". Error occured when export ncnn model. We try to simplify the model first"
            )
            model_op, check_ok = simplify(self.conf.onnx_model_dir,
                                          check_n=3,
                                          perform_optimization=True,
                                          skip_fuse_bn=True,
                                          skip_shape_inference=False)
            onnx.save(model_op, self.conf.onnx_model_dir)
            if not check_ok:
                LOG.logE(
                    "Maybe something wrong when simplify the model, we can't guarantee generate model is right"
                )
            else:
                LOG.logI("Simplify model succeed")
            subprocess.Popen(cmd,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
            if pd.stderr.read() != b"":
                LOG.logE(pd.stderr.read() +
                         b". we can't guarantee generate model is right")

        LOG.logI(
            "Pytorch model convert to NCNN model succeed, save ncnn param file in {}, save ncnn bin file in {}"
            .format(self.conf.ncnn_arch_dir, output_ncnn_file))
Пример #3
0
def onnx_simplify(model, input_shapes):
    model_opt, check_ok = onnxsim.simplify(model,
                                           check_n=3,
                                           perform_optimization=False,
                                           skip_fuse_bn=False,
                                           input_shapes=None)
    return model_opt, check_ok
Пример #4
0
def create_svm(m, optimize_model):
    batch_size = 1
    model_name = f"svm{m}"


    with tf.Session() as sess:
        x = tf.placeholder(tf.float32, shape=(m,), name='x')
        y = tf.placeholder(tf.float32, shape=(1,), name='y')

        w = tf.placeholder(tf.float32, shape=(m,), name='W')
        input_shapes = {"x:0": x.shape, "W:0": w.shape, "y:0": y.shape}

        mu = tf.constant(1, dtype=tf.float32, name="mu")

        h = tf.reduce_sum(w * x)
        c = y * h
        ny = 0 - y
        p = tf.cast((c > y), dtype=ny.dtype) * ny

        g = p * x

        w = tf.subtract(w, mu * g, name="W_out")
        sess.run(tf.initialize_all_variables())

        input_names = ['x:0', 'y:0']
        output_names = ['W_out:0']

        onnx_graph = tf2onnx.tfonnx.process_tf_graph(sess.graph, input_names=input_names, output_names=output_names)
        model_proto = onnx_graph.make_model(model_name)
        model_proto = optimizer.optimize(model_proto, ['eliminate_identity'])
        if optimize_model:
            model_proto, check = simplify(model_proto, input_shapes=input_shapes)
            assert check
        with open(f"./{model_name}.onnx", "wb") as f:
            f.write(model_proto.SerializeToString())
Пример #5
0
    def onnxSimplify(self,
                     check_n=0,
                     perform_optimization=True,
                     skip_fuse_bn=False,
                     input_shapes=None,
                     skipped_optimizers=None,
                     skip_shape_inference=False,
                     input_data=None,
                     dynamic_input_shape=False):
        try:
            import onnx
            from onnxsim import simplify
        except:
            LOG.logE(
                "You must install onnx and onnxsim package first if you want to convert pytorch to ncnn or tnn.",
                exit=True)

        try:
            model_op, check_ok = simplify(self.config.onnx_model_dir, check_n=check_n, perform_optimization=perform_optimization, skip_fuse_bn=skip_fuse_bn,  skip_shape_inference=skip_shape_inference, \
                                        input_shapes=input_shapes, skipped_optimizers=skipped_optimizers, input_data=input_data, dynamic_input_shape=dynamic_input_shape)
        except RuntimeError as e:
            LOG.logE("Simplify model faild. {}".format(e), exit=True)

        onnx.save(model_op, self.config.onnx_model_dir)
        if not check_ok:
            LOG.logE(
                "Maybe something wrong when simplify the model, we can't guarantee generate model is right."
            )
        else:
            LOG.logI("Simplify model succeed")
Пример #6
0
def export_onnx(model, img, file, opset, train, dynamic, simplify):
    # ONNX model export
    prefix = colorstr('ONNX:')
    try:
        check_requirements(('onnx', 'onnx-simplifier'))
        import onnx

        print(f'\n{prefix} starting export with onnx {onnx.__version__}...')
        f = file.with_suffix('.onnx')
        torch.onnx.export(
            model,
            img,
            f,
            verbose=False,
            opset_version=opset,
            training=torch.onnx.TrainingMode.TRAINING
            if train else torch.onnx.TrainingMode.EVAL,
            do_constant_folding=not train,
            input_names=['images'],
            output_names=['output'],
            dynamic_axes={
                'images': {
                    0: 'batch',
                    2: 'height',
                    3: 'width'
                },  # shape(1,3,640,640)
                'output': {
                    0: 'batch',
                    1: 'anchors'
                }  # shape(1,25200,85)
            } if dynamic else None)

        # Checks
        model_onnx = onnx.load(f)  # load onnx model
        onnx.checker.check_model(model_onnx)  # check onnx model
        # print(onnx.helper.printable_graph(model_onnx.graph))  # print

        # Simplify
        if simplify:
            try:
                import onnxsim

                print(
                    f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...'
                )
                model_onnx, check = onnxsim.simplify(
                    model_onnx,
                    dynamic_input_shape=dynamic,
                    input_shapes={'images': list(img.shape)}
                    if dynamic else None)
                assert check, 'assert check failed'
                onnx.save(model_onnx, f)
            except Exception as e:
                print(f'{prefix} simplifier failure: {e}')
        print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
        print(
            f"{prefix} run --dynamic ONNX model inference with: 'python detect.py --weights {f}'"
        )
    except Exception as e:
        print(f'{prefix} export failure: {e}')
Пример #7
0
def create_logistic(m, optimize_model):
    batch_size = 1
    model_name = f"logistic{m}"
    with tf.Session() as sess:
        x = tf.placeholder(tf.float32, shape=(m,), name='x')
        y = tf.placeholder(tf.float32, shape=(1,), name='y')

        w = tf.placeholder(tf.float32, shape=(m,), name='W')
        input_shapes = {"x:0": x.shape, "W:0": w.shape, "y:0": y.shape}

        mu = tf.constant(1, dtype=tf.float32, name="mu")
        h = tf.reduce_sum(tf.multiply(w, x))
        h = tf.math.sigmoid(h)
        d = tf.subtract(h, y)
        g = tf.multiply(d, x)

        g = tf.multiply(mu, g)
        w = tf.subtract(w, g, name='w_out')

        input_names = ['x:0', 'y:0']
        output_names = ['w_out:0']

        onnx_graph = tf2onnx.tfonnx.process_tf_graph(sess.graph, input_names=input_names, output_names=output_names)

        model_proto = onnx_graph.make_model(model_name)

        model_proto = optimizer.optimize(model_proto, ['eliminate_identity'])
        if optimize_model:
            model_proto, check = simplify(model_proto, input_shapes=input_shapes)
            assert check
        with open(f"./{model_name}.onnx", "wb") as f:
            f.write(model_proto.SerializeToString())
Пример #8
0
    def inference_shape(self) -> onnx.ModelProto:
        self.model, check = onnxsim.simplify(
            self.model, skipped_optimizers=['eliminate_duplicate_initializer'])
        assert check
        check_model(self.model)

        return utils.name_nodes(self.model)
Пример #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('input_model', help='Input ONNX model')
    parser.add_argument('output_model', help='Output ONNX model')
    parser.add_argument('check_n', help='Check whether the output is correct with n random inputs',
                        nargs='?', type=int, default=3)
    parser.add_argument('--skip-fuse-bn', help='Skip ONNX fuse_bn_into_conv optimizers. In some cases it causes incorrect model (https://github.com/onnx/onnx/issues/2677).',
                        action='store_true')
    parser.add_argument('--skip-optimization', help='Skip optimization of ONNX optimizers.',
                        action='store_true')
    parser.add_argument(
        '--input-shape', help='The manually-set static input shape, useful when the input shape is dynamic. The value should be "input_name:dim0,dim1,...,dimN" or simply "dim0,dim1,...,dimN" when there is only one input, for example, "data:1,3,224,224" or "1,3,224,224". Note: you might want to use some visualization tools like netron to make sure what the input name and dimension ordering (NCHW or NHWC) is.', type=str, nargs='+')
    args = parser.parse_args()
    print("Simplifying...")
    input_shapes = {}
    if args.input_shape is not None:
        for x in args.input_shape:
            if ':' not in x:
                input_shapes[None] = list(map(int, x.split(',')))
            else:
                pieces = x.split(':')
                # for the input name like input:0
                name, shape = ':'.join(
                    pieces[:-1]), list(map(int, pieces[-1].split(',')))
                input_shapes[name] = shape
    model_opt, check_ok = onnxsim.simplify(
        args.input_model, check_n=args.check_n, perform_optimization=not args.skip_optimization, skip_fuse_bn=args.skip_fuse_bn, input_shapes=input_shapes)

    onnx.save(model_opt, args.output_model)

    if check_ok:
        print("Ok!")
    else:
        print("Check failed, please be careful to use the simplified model, or try specifying \"--skip-fuse-bn\" or \"--skip-optimization\" (run \"python3 -m onnxsim -h\" for details)")
        sys.exit(1)
Пример #10
0
def torch2tvm_module(torch_module: torch.nn.Module,
                     torch_inputs: Tuple[torch.Tensor, ...], target):
    torch_module.eval()
    input_names = []
    input_shapes = {}
    with torch.no_grad():
        for index, torch_input in enumerate(torch_inputs):
            name = "i" + str(index)
            input_names.append(name)
            input_shapes[name] = torch_input.shape
        buffer = io.BytesIO()
        torch.onnx.export(
            torch_module,
            torch_inputs,
            buffer,
            input_names=input_names,
            output_names=["o" + str(i) for i in range(len(torch_inputs))],
            opset_version=10)
        outs = torch_module(*torch_inputs)
        buffer.seek(0, 0)
        onnx_model = onnx.load_model(buffer)
        onnx_model, _ = simplify(onnx_model)
        relay_module, params = tvm.relay.frontend.from_onnx(onnx_model,
                                                            shape=input_shapes)
    with tvm.relay.build_config(opt_level=3):
        graph, tvm_module, params = tvm.relay.build(relay_module,
                                                    target,
                                                    params=params)
    return graph, tvm_module, params
Пример #11
0
def convert_onnx(net, path_module, output, opset=11, simplify=False):
    assert isinstance(net, torch.nn.Module)
    img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32)
    img = img.astype(np.float)
    img = (img / 255. - 0.5) / 0.5  # torch style norm
    img = img.transpose((2, 0, 1))
    img = torch.from_numpy(img).unsqueeze(0).float()

    weight = torch.load(path_module)
    net.load_state_dict(weight)
    net.eval()
    torch.onnx.export(net,
                      img,
                      output,
                      keep_initializers_as_inputs=False,
                      verbose=False,
                      opset_version=opset)
    model = onnx.load(output)
    graph = model.graph
    graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None'
    if simplify:
        from onnxsim import simplify
        model, check = simplify(model)
        assert check, "Simplified ONNX model could not be validated"
    onnx.save(model, output)
Пример #12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('input_model', help='Input ONNX model')
    parser.add_argument('output_model', help='Output ONNX model')
    args = parser.parse_args()
    model_opt = onnxsim.simplify(args.input_model)

    onnx.save(model_opt, args.output_model)
Пример #13
0
    def simpfy_model(self, onnxModelPath, simModelOutPath):
        # load your predefined ONNX model
        model = onnx.load(onnxModelPath)

        # convert model
        model_simp, check = simplify(model)

        onnx.save(model_simp, simModelOutPath)
Пример #14
0
def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')):
    # YOLOv5 ONNX export
    try:
        check_requirements(('onnx',))
        import onnx

        LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
        f = file.with_suffix('.onnx')

        torch.onnx.export(
            model,
            im,
            f,
            verbose=False,
            opset_version=opset,
            training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL,
            do_constant_folding=not train,
            input_names=['images'],
            output_names=['output'],
            dynamic_axes={
                'images': {
                    0: 'batch',
                    2: 'height',
                    3: 'width'},  # shape(1,3,640,640)
                'output': {
                    0: 'batch',
                    1: 'anchors'}  # shape(1,25200,85)
            } if dynamic else None)

        # Checks
        model_onnx = onnx.load(f)  # load onnx model
        onnx.checker.check_model(model_onnx)  # check onnx model

        # Metadata
        d = {'stride': int(max(model.stride)), 'names': model.names}
        for k, v in d.items():
            meta = model_onnx.metadata_props.add()
            meta.key, meta.value = k, str(v)
        onnx.save(model_onnx, f)

        # Simplify
        if simplify:
            try:
                check_requirements(('onnx-simplifier',))
                import onnxsim

                LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
                model_onnx, check = onnxsim.simplify(model_onnx,
                                                     dynamic_input_shape=dynamic,
                                                     input_shapes={'images': list(im.shape)} if dynamic else None)
                assert check, 'assert check failed'
                onnx.save(model_onnx, f)
            except Exception as e:
                LOGGER.info(f'{prefix} simplifier failure: {e}')
        LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
        return f
    except Exception as e:
        LOGGER.info(f'{prefix} export failure: {e}')
Пример #15
0
def export_model(experiment, model, model_name, input_shape,
                 without_decode_detections, simplify_model):
    shape = [
        1,
    ] + input_shape
    x = torch.randn(*shape, requires_grad=True)
    torch_out = model(x)

    outputs = ['output']
    if without_decode_detections:
        outputs.extend(['wh', 'rg'])

    suffix = '_wd' if without_decode_detections else ''
    output_path = experiment / \
        f"centernet_{model_name}_{shape[2]}x{shape[3]}{suffix}.onnx"
    torch.onnx.export(
        model,  # model being run
        x,
        output_path,
        export_params=
        True,  # store the trained parameter weights inside the model file
        opset_version=11,  # the ONNX version to export the model to
        do_constant_folding=
        True,  # whether to execute constant folding for optimization
        input_names=['input'],  # the model's input names
        output_names=outputs,
        dynamic_axes={
            'input': {
                0: 'batch_size'
            },  # variable lenght axes
            'output': {
                0: 'batch_size'
            }
        })

    print(f"Export model to {output_path} successful!")

    if simplify_model:
        print("Simplify model")
        if not has_simplify:
            print(
                "Can't simplify model because ONNX Simplifier is not install")
            print("Please install it: pip install onnx-simplifier")
            return

        onnx_model = onnx.load(output_path)
        simplyfied_model, check = simplify(onnx_model,
                                           input_shapes={"input": shape})
        simplyfied_model = onnx.shape_inference.infer_shapes(simplyfied_model)
        onnx_model = onnx.utils.polish_model(simplyfied_model)
        onnx.checker.check_model(simplyfied_model)
        onnx.checker.check_model(onnx_model)

        output_path = experiment / \
            f"centernet_{model_name}_{shape[2]}x{shape[3]}{suffix}_smpl.onnx"
        onnx.save(simplyfied_model, output_path)

        print(f"Export simplified model to {output_path} successful!")
Пример #16
0
def main(config, model_path, output_path, input_shape=(320, 320)):
    logger = Logger(-1, config.save_dir, False)
    model = build_model(config.model)
    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)
    load_model_weight(model, checkpoint, logger)
    if config.model.arch.backbone.name == 'RepVGG':
        deploy_config = config.model
        deploy_config.arch.backbone.update({'deploy': True})
        deploy_model = build_model(deploy_config)
        from nanodet.model.backbone.repvgg import repvgg_det_model_convert
        model = repvgg_det_model_convert(model, deploy_model)
    dummy_input = torch.autograd.Variable(
        torch.randn(1, 3, input_shape[0], input_shape[1]))
    dynamic_axes = {
        "input": {
            0: "batch_size"
        },
        "output1": {
            0: "batch_size"
        },
        "output2": {
            0: "batch_size"
        },
        "output3": {
            0: "batch_size"
        },
        "output4": {
            0: "batch_size"
        },
        "output5": {
            0: "batch_size"
        },
        "output6": {
            0: "batch_size"
        }
    }

    input_names = ['input']
    output_names = [
        'output1', 'output2', 'output3', 'output4', 'output5', 'output6'
    ]
    torch.onnx.export(model,
                      dummy_input,
                      output_path,
                      verbose=True,
                      keep_initializers_as_inputs=True,
                      opset_version=12,
                      input_names=input_names,
                      output_names=output_names)

    import onnx
    from onnxsim import simplify
    model = onnx.load(output_path)
    # convert model
    model_simp, check = simplify(model)
    onnx.save(model_simp, output_path)
    print('finished exporting onnx ')
Пример #17
0
def convert_torch_model(input_var, model, model_name, optimize_model, training_mode, to_polymath,
                        convert_data_format=False):
    f = io.BytesIO()
    mode = torch.onnx.TrainingMode.TRAINING if training_mode else torch.onnx.TrainingMode.EVAL
    if 'mask_rcnn' not in model_name:
        torch.onnx.export(model,  # model being run
                          input_var,  # model input (or a tuple for multiple inputs)
                          f,  # where to save the model (can be a file or file-like object)
                          export_params=True,  # store the trained parameter weights inside the model file
                          do_constant_folding=True,  # whether to execute constant folding for optimization
                          keep_initializers_as_inputs=True,
                          training=mode,
                          input_names=['input'],  # the model's input names
                          output_names=['output'],
                          opset_version=12)
    else:
        model.eval()
        # input_var = [(input_var,)]
        if isinstance(input_var[0][-1], dict):
            input_var = input_var[0] + ({},)
        else:
            input_var = input_var[0]

        dynamic_axes = {"images_tensors": [0, 1, 2], "boxes": [0, 1], "labels": [0],
                                        "scores": [0], "masks": [0, 1, 2]}
        torch.onnx.export(model,  # model being run
                          input_var,  # model input (or a tuple for multiple inputs)
                          f,  # where to save the model (can be a file or file-like object)
                          do_constant_folding=True,  # whether to execute constant folding for optimization
                          # training=mode,
                          input_names=["images_tensors"],
                          output_names=["boxes", "labels", "scores", "masks"],
                          dynamic_axes=dynamic_axes,
                          opset_version=_onnx_opset_version,
                          verbose=False,
                          # export_params=True,  # store the trained parameter weights inside the model file
                          # keep_initializers_as_inputs=True,
                          # operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN,
                          )
        print(type(f.getvalue()))
    model_proto = onnx.ModelProto.FromString(f.getvalue())
    print_nodes(model_proto)
    onnx.checker.check_model(model_proto)
    add_value_info_for_constants(model_proto)
    model_proto = onnx.shape_inference.infer_shapes(model_proto)
    filepath = f"{CWD}/{model_name}.onnx"
    if optimize_model:
        model_proto, check = simplify(model_proto)
        assert check
    model_proto = update_node_names(model_proto)
    model_proto = update_edge_names(model_proto)
    with open(filepath, "wb") as f:
        f.write(model_proto.SerializeToString())

    if to_polymath:
        graph = pm.from_onnx(filepath)
        pm.pb_store(graph, f"{CWD}/full_dnns/")
Пример #18
0
def main():
    args = make_parser().parse_args()
    logger.info("args value: {}".format(args))
    from yolox.exp.yolox_nano_soso8 import Exp
    exp = Exp()
    exp.merge(args.opts)

    if not args.experiment_name:
        args.experiment_name = exp.exp_name

    model = exp.get_model()
    if args.ckpt is None:
        file_name = os.path.join(exp.output_dir, args.experiment_name)
        ckpt_file = os.path.join(file_name, "best_ckpt.pth")
    else:
        ckpt_file = args.ckpt

    # load the model state dict
    ckpt = torch.load(ckpt_file, map_location="cpu")

    model.eval()
    if "model" in ckpt:
        ckpt = ckpt["model"]
    model.load_state_dict(ckpt)
    model = replace_module(model, nn.SiLU, SiLU)
    model.head.decode_in_inference = args.decode_in_inference

    logger.info("loading checkpoint done.")
    dummy_input = torch.randn(args.batch_size, args.image_channel, exp.test_size[0], exp.test_size[1])

    torch.onnx._export(
        model,
        dummy_input,
        args.output_name,
        input_names=[args.input],
        output_names=[args.output],
        dynamic_axes={args.input: {0: 'batch'},
                      args.output: {0: 'batch'}} if args.dynamic else None,
        opset_version=args.opset,
    )
    logger.info("generated onnx model named {}".format(args.output_name))

    if not args.no_onnxsim:
        import onnx

        from onnxsim import simplify

        input_shapes = {args.input: list(dummy_input.shape)} if args.dynamic else None

        # use onnxsimplify to reduce reduent model.
        onnx_model = onnx.load(args.output_name)
        model_simp, check = simplify(onnx_model,
                                     dynamic_input_shape=args.dynamic,
                                     input_shapes=input_shapes)
        assert check, "Simplified ONNX model could not be validated"
        onnx.save(model_simp, args.output_name)
        logger.info("generated simplified onnx model named {}".format(args.output_name))
Пример #19
0
def main():
    torch.manual_seed(1234)
    tmp = torch.ones(1, 3, 320, 192)
    execute_path = os.path.dirname(os.path.realpath(__file__))
    onnx_file = os.path.join(execute_path, "yolov3.onnx")

    model = onnx.load(onnx_file)
    model_simp, check = simplify(model)
    onnx_simplify_file = os.path.join(execute_path, "yolov3_simplify.onnx")
    onnxmltools.utils.save_model(model_simp, onnx_simplify_file)
Пример #20
0
def simplify(infile, outfile=None):
    if outfile is None:
        assert infile.endswith('.onnx')
        outfile = infile
        infile = infile.replace('.onnx', '.unsimplified.onnx')
        shutil.copyfile(outfile, infile)

    simplified_model, check_ok = onnxsim.simplify(infile, check_n=3, perform_optimization=False)
    assert check_ok
    onnx.save(simplified_model, outfile)
Пример #21
0
def simplify_and_convert(model, output, table_file=""):
    if type(model) == str:
        model = onnx.load(model)
    elif type(model) == onnx.ModelProto:
        pass
    else:
        raise RuntimeError(
            "Input of function convert can only be str, onnx.ModelProto")

    model = onnxsim.simplify(model)
    convert(model, output, table_file)
Пример #22
0
def onnx_optim(path, outpath='out.onnx'):
    # load your predefined ONNX model
    model = onnx.load(path)
    # convert model
    model, check = simplify(model)
    assert check, "Simplified ONNX model could not be validated"
    #remove zero add
    model = eliminate_zero(model)
    #remove zero mul
    model = eliminate_zero(model, mode='Mul')
    onnx.save(model, outpath)
Пример #23
0
def save_simplify_onnx(model, example):
    # save
    torch.onnx.export(model,
                      example,
                      ONNX_PATH,
                      input_names=["input"],
                      output_names=["473"])
    # simplify
    model = onnx.load(ONNX_PATH)
    model_simp, check = simplify(model)
    assert check, "Simplified ONNX model could not be validated"
    onnx.save(model_simp, ONNX_PATH)
def main(model_path, output_path, input_shape=(224, 224), batch_size=1):
    model = effnetv2_s(num_classes=2)
    checkpoint = torch.load(model_path)['state_dict']
    checkpoint = {k.replace('module.', ''): v for k, v in checkpoint.items()} 
    model.load_state_dict(checkpoint)
    dummy_input = torch.autograd.Variable(torch.randn(batch_size, 3, input_shape[0], input_shape[1]))
    torch.onnx.export(model, dummy_input, output_path, verbose=True, keep_initializers_as_inputs=True, opset_version=12)
    onnx_model = onnx.load(output_path)  # load onnx model
    model_simp, check = simplify(onnx_model)
    assert check, "Simplified ONNX model could not be validated"
    onnx.save(model_simp, output_path)
    print('finished exporting onnx ')
def onnx_simplifier(checkpoint_path, export_model_name):
    import onnx
    from onnxsim import simplify

    # load your predefined ONNX model
    model = onnx.load(checkpoint_path)

    # convert model
    model_simp, check = simplify(model)

    assert check, "Simplified ONNX model could not be validated"
    onnx.save(model_simp, export_model_name)
Пример #26
0
    def exportNCNN(self, img):
        if not self.conf.ncnn_param_output_path or not self.conf.ncnn_bin_output_path:
            return
        if not self.conf.onnx2ncnn:
            LOG.logE(
                "You must set the onnx2ncnn executable program path in config file. If you want to compile onnx2ncnn tools, reference https://github.com/Tencent/ncnn/wiki/how-to-build#build-for-linux-x86 ",
                exit=True)

        import onnx
        import subprocess
        import tempfile
        from onnxsim import simplify

        if not self.conf.onnx_output_model_path:
            f = tempfile.NamedTemporaryFile()
            self.conf.onnx_output_model_path = f.name
        self.exportONNX(img)

        cmd = self.conf.onnx2ncnn + " " + self.conf.onnx_output_model_path + " " + self.conf.ncnn_param_output_path + " " + self.conf.ncnn_bin_output_path
        pd = subprocess.Popen(cmd,
                              shell=True,
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE)
        if pd.stderr.read() != b"":
            LOG.logE(
                pd.stderr.read() +
                b". Error occured when export ncnn model. We try to simplify the model first"
            )
            model_op, check_ok = simplify(self.conf.onnx_output_model_path,
                                          check_n=3,
                                          perform_optimization=True,
                                          skip_fuse_bn=True,
                                          skip_shape_inference=False)
            onnx.save(model_op, self.conf.onnx_output_model_path)
            if not check_ok:
                LOG.logE(
                    "Maybe something wrong when simplify the model, we can't guarantee generate model is right"
                )
            else:
                LOG.logI("Simplify model succeed")
            subprocess.Popen(cmd,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
            if pd.stderr.read() != b"":
                LOG.logE(pd.stderr.read() +
                         b". we can't guarantee generate model is right")

        LOG.logI(
            "Pytorch model convert to NCNN model succeed, save ncnn param file in {}, save ncnn bin file in {}"
            .format(self.conf.ncnn_param_output_path,
                    self.conf.ncnn_bin_output_path))
Пример #27
0
def _download(url, name, in_shapes):
    filename = os.path.join(MODEL_DIR, "source", name + ".onnx")
    if not os.path.exists(filename):
        req = requests.get(url)
        onnx_model, check = onnxsim.simplify(onnx.load_model(
            BytesIO(req.content)),
                                             check_n=3,
                                             input_shapes=in_shapes)
        assert check, "Simplified ONNX model could not be validated"
        onnx.save(onnx_model, filename)

    with open(filename, "rb") as file:
        return file.read()
Пример #28
0
def simp(in_file, out_file):
    model = onnx.load(in_file)

    input_shapes = {}
    # input_shapes = { None: [1, 3, 220, 220] }

    model_simp, check = simplify(model,
                                 check_n=1,
                                 skip_shape_inference=True,
                                 input_shapes=input_shapes)

    assert check, "Simplified ONNX model could not be validated"

    onnx.save(model_simp, out_file)
Пример #29
0
def main():
    args = make_parser().parse_args()
    logger.info("args value: {}".format(args))
    exp = get_exp(args.exp_file, args.name)
    exp.merge(args.opts)

    if not args.experiment_name:
        args.experiment_name = exp.exp_name

    model = exp.get_model()
    if args.ckpt is None:
        file_name = os.path.join(exp.output_dir, args.experiment_name)
        ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar")
    else:
        ckpt_file = args.ckpt

    # load the model state dict
    ckpt = torch.load(ckpt_file, map_location="cpu")

    model.eval()
    if "model" in ckpt:
        ckpt = ckpt["model"]
    model.load_state_dict(ckpt)
    model = replace_module(model, nn.SiLU, SiLU)
    model.head.decode_in_inference = False

    logger.info("loading checkpoint done.")
    dummy_input = torch.randn(1, 3, exp.test_size[0], exp.test_size[1])
    torch.onnx._export(
        model,
        dummy_input,
        args.output_name,
        input_names=[args.input],
        output_names=[args.output],
        opset_version=args.opset,
    )
    logger.info("generated onnx model named {}".format(args.output_name))

    if not args.no_onnxsim:
        import onnx

        from onnxsim import simplify

        # use onnxsimplify to reduce reduent model.
        onnx_model = onnx.load(args.output_name)
        model_simp, check = simplify(onnx_model)
        assert check, "Simplified ONNX model could not be validated"
        onnx.save(model_simp, args.output_name)
        logger.info("generated simplified onnx model named {}".format(
            args.output_name))
Пример #30
0
def torch2tvm_module(torch_module: torch.nn.Module,
                     torch_inputs: Tuple[torch.Tensor, ...], target):
    torch_module.eval()
    input_names = []
    input_shapes = {}
    with torch.no_grad():
        for index, torch_input in enumerate(torch_inputs):
            name = "i" + str(index)
            input_names.append(name)
            input_shapes[name] = torch_input.shape

        # buffer = io.BytesIO()
        # torch.onnx.export(torch_module, torch_inputs, buffer, input_names=input_names,
        #                   # output_names=["o" + str(i) for i in range(len(torch_inputs))], opset_version=10)
        #                   output_names=["o" + str(i) for i in range(len(torch_inputs))], opset_version=10)
        # outs = torch_module(*torch_inputs)
        # buffer.seek(0, 0)
        # onnx_model = onnx.load_model(buffer)

        # print('torch_inputs:', len(torch_inputs), type(torch_inputs), input_names)
        # print(["o" + str(i) for i in range(len(torch_inputs))])

        torch.onnx.export(
            torch_module,  # model being run
            torch_inputs[0],  # model input (or a tuple for multiple inputs)
            "super_resolution.onnx",  # where to save the model (can be a file or file-like object)
            export_params=
            True,  # store the trained parameter weights inside the model file
            opset_version=11,  # the ONNX version to export the model to
            do_constant_folding=
            True,  # whether to execute constant folding for optimization
            input_names=['input'],  # the model's input names
            output_names=['output'])
        outs = torch_module(*torch_inputs)
        onnx_model = onnx.load_model("super_resolution.onnx")

        from onnxsim import simplify
        onnx_model, success = simplify(
            onnx_model)  # this simplifier removes conversion bugs.
        assert success
        print('assert success')
        relay_module, params = tvm.relay.frontend.from_onnx(onnx_model,
                                                            shape=input_shapes)
        print('tvm.relay.frontend.from_onnx')
    with tvm.relay.build_config(opt_level=3):
        graph, tvm_module, params = tvm.relay.build(relay_module,
                                                    target,
                                                    params=params)
        print('tvm.relay.build')
    return graph, tvm_module, params