コード例 #1
0
#!/usr/bin/env python
# coding: utf-8

import nn_format
if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--pt7_path",   help="Path of *.pt7", type=str, required=True)
    parser.add_argument("--batch_size", help="Batch size",    type=int, default=1)
    parser.add_argument("--ip",         help="IP addr",       type=str, default='127.0.0.1')
    parser.add_argument("--port",       help="Port number",   type=int, default=60260)
    args = parser.parse_args()

    import torch
    import net

    torch.backends.cudnn.enabled = False
    param = torch.load( args.pt7_path, map_location='cpu' )
    sub_model = net.WRN_extract( batch_size=args.batch_size, weights=param['params'], stats=param['stats'], reduce_port=5002, segment=2, id=1 )

    sub_model.set_requires_grad(False)
    sub_model.eval()

    print(sub_model)

    with torch.no_grad():
        nn_format.nn_server( model=sub_model, host=args.ip, port=args.port, i_shape=(1,3,32,32) )
コード例 #2
0
ファイル: inference_tvm.py プロジェクト: LiGuihong/nonn
    parser.add_argument("--img_dump_size", help="Output axis transpose", type=int)

    args = parser.parse_args()
    o_xpose = [int(x) for x in args.o_xpose.split(',')] if args.o_xpose else None
    img_size = [int(x) for x in args.i_size.split(',')]
    assert(len(img_size) is 3)
    i_shape = tuple( [args.bsize] + img_size )

    if args.time_meas:
        import time
        start_time = time.time()
        temp_time = time.time()

    # Load cross-compiled model
    loaded_json = open(args.model+".json").read()
    loaded_lib = tvm.module.load("./"+args.model+".tar")
    loaded_params = bytearray(open(args.model+".params", "rb").read())

    if args.time_meas:
        print("Load module takes", "%1f" % (time.time()-temp_time), " at ", "%1f" % (time.time()-start_time))
        temp_time = time.time()

    from tvm.contrib import util, cc, graph_runtime
    ctx = tvm.cpu()
    module = graph_runtime.create(loaded_json, loaded_lib, ctx)
    module.load_params(loaded_params)

    import nn_format

    nn_format.nn_server( model=module, host=args.ip, port=args.port, i_shape=i_shape, is_torch=False, o_xpose=o_xpose, img_dump=args.img_dump, img_dump_size=args.img_dump_size )
コード例 #3
0
    if args.time_meas:
        import time
        start_time = time.time()
        temp_time = time.time()

    # Load cross-compiled model
    loaded_json = open(args.model + ".json").read()
    loaded_lib = tvm.module.load("./" + args.model + ".tar")
    loaded_params = bytearray(open(args.model + ".params", "rb").read())

    if args.time_meas:
        print("Load module takes", "%1f" % (time.time() - temp_time), " at ",
              "%1f" % (time.time() - start_time))
        temp_time = time.time()

    from tvm.contrib import util, cc, graph_runtime
    ctx = tvm.cpu()
    module = graph_runtime.create(loaded_json, loaded_lib, ctx)
    module.load_params(loaded_params)

    import torch
    import nn_format
    torch.backends.cudnn.enabled = False

    with torch.no_grad():
        nn_format.nn_server(model=module,
                            host=args.ip,
                            port=args.port,
                            i_shape=(1, 3, 32, 32),
                            is_torch=False,
                            o_xpose=(0, 1, 4, 2, 3))
コード例 #4
0
ファイル: inference_fc.py プロジェクト: LiGuihong/nonn
if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--pt7_path",
                        help="Path of *.pt7",
                        type=str,
                        required=True)
    parser.add_argument("--batch_size", help="Batch size", type=int, default=1)
    parser.add_argument("--ip", help="IP addr", type=str, default='127.0.0.1')
    parser.add_argument("--port", help="Port number", type=int, default=60260)
    args = parser.parse_args()

    import torch
    import net

    torch.backends.cudnn.enabled = False
    param = torch.load(args.pt7_path, map_location='cpu')
    sub_model = net.WRN_fc(batch_size=args.batch_size,
                           weights=param['params'],
                           stats=param['stats'])

    sub_model.set_requires_grad(False)
    sub_model.eval()

    print(sub_model)

    with torch.no_grad():
        nn_format.nn_server(model=sub_model,
                            host=args.ip,
                            port=args.port,
                            i_shape=(1, 256, 8, 8))