示例#1
0
def get_model(opts):

    path = "{}/{}/".format(opts.model_path, opts.model_name)
    log_path = "{}/".format(opts.log_path)
    filename = "model.onnx"

    if not os.path.exists(path):
        print("Creating models directory")
        os.makedirs(path)

    if not os.path.exists(log_path):
        print("Creating logs directory")
        os.makedirs(log_path)

    # Get the model. If it doesn't exist it will be downloaded
    if not os.path.isfile(path + filename):
        print(f"Downloading model to {path + filename}")

    # Create the right input shape
    dummy_input = torch.randn(1, 3, 224, 224)
    model = pretrainedmodels.__dict__[opts.model_name](num_classes=1000,
                                                       pretrained='imagenet')
    torch.onnx.export(model, dummy_input, path + filename)

    model_path = path + filename
    onnx_model = onnx.load(model_path)
    onnx_model.graph.input[0].type.tensor_type.shape.dim[
        0].dim_value = opts.micro_batch_size
    print(
        f"Converting model to batch size {opts.micro_batch_size} and saving to {path + 'model_' + str(opts.micro_batch_size) + '.onnx'}"
    )
    onnx.save(onnx_model, path + f"model_{opts.micro_batch_size}.onnx")
示例#2
0
 def _quantize_onnx(self, filename):
     onnx_model = onnx.load(filename)
     onnx.checker.check_model(onnx_model)
     quantized_model = quantize(
         onnx_model,
         quantization_mode=QuantizationMode.IntegerOps,
         static=False)
     onnx.save(quantized_model, filename)
示例#3
0
def get_model(opts):

    path = "{}/{}/".format(opts.model_path, opts.model_name)
    log_path = "{}/".format(opts.log_path)
    filename = "model.onnx"

    if not os.path.exists(path):
        print("Creating models directory")
        os.makedirs(path)

    if not os.path.exists(log_path):
        print("Creating logs directory")
        os.makedirs(log_path)

    dataset = "imagenet"

    if opts.url:
        # Monkey patch an alternate URL into pretrained models package
        pretrainedmodels.models.resnext.pretrained_settings[
            opts.model_name
        ][dataset]["url"] = opts.url
        print(f"Download URL set to {opts.url}")

    pretrained_model_base_path = opts.pretrained_model_path

    if not pretrained_model_base_path:
        # Get the model. If it doesn't exist it will be downloaded
        if not os.path.isfile(path + filename):
            print(f"Downloading model to {path + filename}")
        pretrained_model_path = path + filename

        # Create the right input shape
        dummy_input = torch.randn(1, 3, 224, 224)
        model = pretrainedmodels.__dict__[opts.model_name](
            num_classes=1000, pretrained=dataset)
        torch.onnx.export(model, dummy_input, pretrained_model_path)
    else:
        pretrained_model_path = os.path.join(
            pretrained_model_base_path,
            opts.model_name,
            filename
        )

    onnx_model = onnx.load(pretrained_model_path)
    onnx_model.graph.input[0].type.tensor_type.shape.dim[0].dim_value = opts.micro_batch_size
    print(
        f"Converting model to batch size {opts.micro_batch_size} and saving to {path + 'model_' + str(opts.micro_batch_size) + '.onnx'}")
    onnx.save(onnx_model, path + f"model_{opts.micro_batch_size}.onnx")
示例#4
0
def main(args):
    import os
    from config import Config

    total_config = Config()
    if not args.dataset or args.dataset not in total_config.DATASETS.keys():
        raise Exception("specify one of the datasets to use in {}".format(
            list(total_config.DATASETS.keys())))
    if not args.snapshot or not os.path.isfile(args.snapshot):
        raise Exception("invalid snapshot")

    dataset = args.dataset
    dataset_class = total_config.DATASETS[dataset]
    dataset_params = total_config.DATASET_PARAMS[dataset]
    model = YoloNet(dataset_config=dataset_params)
    model.load_state_dict(torch.load(args.snapshot)["state_dict"])
    model.eval()

    if args.batch_size:
        batch_size = args.batch_size
    else:
        batch_size = 1

    x = torch.randn(batch_size, 3, dataset_params["img_h"],
                    dataset_params["img_w"])
    torch.onnx.export(
        model,
        x,
        args.onnx_weight_file,
        verbose=True,
        input_names=["input"],
        output_names=["output"],
        do_constant_folding=True,
        operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
        opset_version=11,
    )

    if args.batch_size:
        return

    import onnx

    mp = onnx.load(args.onnx_weight_file)
    mp.graph.input[0].type.tensor_type.shape.dim[0].dim_param = "None"
    mp.graph.output[0].type.tensor_type.shape.dim[0].dim_param = "None"
    onnx.save(mp, "output.onnx")
示例#5
0
    def __init__(self, dataset, model):
        super(ERANBase, self).__init__(dataset, model)

        self.model.eval()

        # export the model to onnx file 'tmp/tmp.onnx'
        input_shape = get_input_shape(dataset)
        x = torch.randn(1,
                        input_shape[0],
                        input_shape[1],
                        input_shape[2],
                        requires_grad=True).cuda()
        torch_out = self.model(x)

        torch.onnx.export(self.model, x, 'tmp/tmp.onnx')
        # export_params=True,
        # opset_version=10,
        # do_constant_folding=True,
        # input_names=['input'],
        # output_names=['output'],
        # dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})

        model_opt, check_ok = onnxsim.simplify('tmp/tmp.onnx',
                                               check_n=3,
                                               perform_optimization=True,
                                               skip_fuse_bn=True,
                                               input_shapes={
                                                   None: (1, input_shape[0],
                                                          input_shape[1],
                                                          input_shape[2])
                                               })
        assert check_ok, "Simplify ONNX model failed"

        onnx.save(model_opt, 'tmp/tmp_simp.onnx')
        new_model, is_conv = read_onnx_net('tmp/tmp_simp.onnx')
        self.new_model, self.is_conv = new_model, is_conv

        self.config = {}

        self.eran = ERAN(self.new_model, is_onnx=True)
示例#6
0
def main():
    args = parse_arguments()
    config = json.load(open(args.config))

    # Dataset used for training the model
    dataset_type = config['train_loader']['type']
    loader = getattr(
        dataloaders,
        config['train_loader']['type'])(**config['train_loader']['args'])
    to_tensor = transforms.ToTensor()
    #normalize = transforms.Normalize(loader.MEAN, loader.STD)
    num_classes = loader.dataset.num_classes
    palette = loader.dataset.palette
    base_size = loader.dataset.base_size

    # Model
    model = getattr(models, config['arch']['type'])(num_classes,
                                                    **config['arch']['args'])
    availble_gpus = list(range(torch.cuda.device_count()))
    device = torch.device('cuda:0' if len(availble_gpus) > 0 else 'cpu')

    checkpoint = torch.load(args.model)
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint.keys():
        print("getting checkpoint")
        checkpoint = checkpoint['state_dict']

    if 'module' in list(checkpoint.keys())[0] and not isinstance(
            model, torch.nn.DataParallel):
        print('convert model to DataParallel')
        model = torch.nn.DataParallel(model)

    model.load_state_dict(checkpoint)
    model.to(device)
    '''
    print("saving model")
    torch.save(model.module.state_dict(), 'model.pkl') 
    print(model.module.state_dict())
    '''
    model.eval()

    ###########################################################################################################

    batch_size = 1

    image = Image.open(args.images).convert('RGB')
    original_size = image.size
    image_name = os.path.basename(args.images)
    target = Image.open("/home/ubuntu/TM2/mask/" + image_name)

    if base_size:
        image = image.resize(size=(base_size, base_size),
                             resample=Image.BILINEAR)
        target = target.resize(size=(base_size, base_size),
                               resample=Image.NEAREST)

    #dummy_input = torch.randn(batch_size, 3, base_size, base_size, device="cuda")
    dummy_input = to_tensor(image).unsqueeze(0).to(device)
    print("exporting model")
    # Export the model
    torch.onnx.export(
        model.module,  # model being run
        dummy_input,  # model input (or a tuple for multiple inputs)
        "model.onnx",  # where to save the model (can be a file or file-like object)
        export_params=
        True,  # store the trained parameter weights inside the model file
        do_constant_folding=
        True,  # whether to execute constant folding for optimization
        input_names=['input'],  # the model's input names
        output_names=['output'],  # the model's output names
        #dynamic_axes={'input' : {0 : 'batch_size'}, 'output' : {0 : 'batch_size'}},    # variable lenght axes
        verbose=True,
        opset_version=11,
        keep_initializers_as_inputs=True)

    # #####################################################################
    print("checking onnx model")
    onnx_model = onnx.load("model.onnx")
    onnx.checker.check_model(onnx_model)
    onnx.checker.check_graph(onnx_model.graph)
    print("onnx model is checked")

    # #####################################################################
    print("optimizing onnx model")
    optimized_onnx_model = optimize(onnx_model)
    onnx.checker.check_model(optimized_onnx_model)
    onnx.checker.check_graph(optimized_onnx_model.graph)
    print("optimization done")
    onnx.save(optimized_onnx_model, 'optimized_model.onnx')

    #############################################
    image = Image.open(args.images).convert('RGB')
    original_size = image.size
    image_name = os.path.basename(args.images)
    target = Image.open("/home/ubuntu/TM2/mask/" + image_name)

    if base_size:
        image = image.resize(size=(base_size, base_size),
                             resample=Image.BILINEAR)
        target = target.resize(size=(base_size, base_size),
                               resample=Image.NEAREST)

    ####################################################################
    pytorch_input = to_tensor(image).unsqueeze(0)
    pytorch_time = time.time()
    with torch.no_grad():
        pytorch_prediction = model(pytorch_input.to(device))
        pytorch_prediction = to_numpy(pytorch_prediction)
        print("pytorch time used:{}".format(time.time() - pytorch_time))

    #######################################################################

    ort_session = onnxruntime.InferenceSession("optimized_model.onnx")
    ort_input = to_numpy(pytorch_input)
    ort_inputs = {ort_session.get_inputs()[0].name: ort_input}
    ort_time = time.time()
    ort_outs = ort_session.run(None, ort_inputs)
    ort_prediction = ort_outs[0]
    print("ort time used:{}".format(time.time() - ort_time))

    # compare ONNX Runtime and PyTorch results
    np.testing.assert_allclose(pytorch_prediction,
                               ort_prediction,
                               rtol=1e-03,
                               atol=1e-05)
    print(
        "Exported model has been tested with ONNXRuntime, and the result looks good!"
    )
    ###################################################################

    with open("model.engine", 'rb') as f, trt.Runtime(
            trt.Logger(trt.Logger.WARNING)) as runtime:
        print("Trying Tensorrt")
        shape_of_output = (batch_size, num_classes, 128, 128)
        engine = runtime.deserialize_cuda_engine(f.read())
        inputs, outputs, bindings, stream = allocate_buffers(
            engine)  # input, output: host # bindings
        with engine.create_execution_context() as context:
            inputs[0].host = ort_input.reshape(-1)

            t1 = time.time()
            trt_outputs = do_inference(context,
                                       bindings=bindings,
                                       inputs=inputs,
                                       outputs=outputs,
                                       stream=stream)  # numpy data
            t2 = time.time()
            feat = postprocess_the_outputs(trt_outputs[0], shape_of_output)

            print('TensorRT ok')
            print("Inference time with the TensorRT engine: {}".format(t2 -
                                                                       t1))
            np.testing.assert_allclose(pytorch_prediction,
                                       feat,
                                       rtol=1e-03,
                                       atol=1e-05)
            print('All completed!')
    print("DLLMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM")
示例#7
0
                in_channels = x
        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers)


# Update the input name and path for your PyTorch model
input_pytorch_model = '/home/pwq/pytorch-cifar/checkpoint/vggcifar16baselineckpt.pth'
model = VGG('VGG16')
#model = ResNet_small(Bottleneck, [3,4,6,3])
net = torch.load(input_pytorch_model)
model.load_state_dict(net['net'])
torch.save(model, './model.pth')

# Create input with the correct dimensions of the input of your model, these are default ImageNet inputs
dummy_model_input = Variable(torch.randn(1, 3, 32, 32))

# Change this path to the output name and path for the ONNX model
output_onnx_model = 'vggcifar16baselineckpt.onnx'

# load the PyTorch model
model = torch.load('./model.pth')
# export the PyTorch model as an ONNX protobuf
torch.onnx.export(model, dummy_model_input, output_onnx_model)

onnx_model = onnx.load('vggcifar16vaselineckpt.onnx')
passes = ["extract_constant_to_initializer", "eliminate_unused_initializer"]
from onnx import optimizer
optimized_model = optimizer.optimize(onnx_model, passes)

onnx.save(optimized_model, 'vggcifar16baselineckpt.onnx')
示例#8
0
        torch.onnx.export(model, dummy_input, sys.argv[5])
        torch.onnx.export(model, dummy_input,
                          sys.argv[5][:-5] + "_backup.onnx")
    else:
        torch.onnx.export(model,
                          dummy_input,
                          sys.argv[5],
                          keep_initializers_as_inputs=True)
        torch.onnx.export(model,
                          dummy_input,
                          sys.argv[5][:-5] + "_backup.onnx",
                          keep_initializers_as_inputs=True)
else:
    onnx_in = sys.argv[1]
    onnx_out = sys.argv[2]

######################################
#  Optimize onnx                     #
######################################

m = onnx.load(onnx_in)

other.pytorch_check_initializer_as_input(m.graph)
m = combo.preprocess(m)
m = combo.pytorch_constant_folding(m)

m = combo.common_optimization(m)

m = combo.postprocess(m)
onnx.save(m, onnx_out)
示例#9
0
# mymodel = create_model(
#         model_name='mobilenetv2_075',
#         pretrained=False,
#         num_classes=2,
#         in_chans=3,
#         global_pool='avg',
#         checkpoint_path='/home/night/PycharmProjects/Picture_Classification/pytorch-image-models/checkpoints/face_mask/mobilenetv2_075_no_prefetcher/mobilenetv2_075.pth.tar')  # '/home/night/PycharmProjects/Picture_Classification/pytorch-image-models/checkpoints/face_mask/checkpoint-36.pth.tar'  # './checkpoints/train/20200319-182337-mobilenetv2_100-224/checkpoint-14.pth.tar'

print("=====> convert pytorch model to onnx...")
# An example input you would normally provide to your model's forward() method
# x = torch.rand(1, 3, 224, 224)
x = torch.rand(1, args.input_channel, args.input_size,
               args.input_size)  # for bincamera classification assignment

# Export the model
# torch_out = torch.onnx._export(mymodel, x, "mobilenetv2.onnx", export_params=True)
torch_out = torch.onnx._export(mymodel, x, args.onnx_model, export_params=True)

print("=====> check onnx model...")
model = onnx.load(args.onnx_model)
onnx.checker.check_model(model)
# Print a human readable representation of the graph
onnx.helper.printable_graph(model.graph)
print(model.graph)

print("=====> Simplifying...")
model_opt = onnxsim.simplify(args.onnx_model)

onnx.save(model_opt, args.onnx_model_sim)
print("onnx model simplify OK!")