Esempio n. 1
0
    def test_pruning(self):
        from lpot import Pruning
        prune = Pruning('fake.yaml')

        dummy_dataset = DummyDataset([tuple([100, 3, 256, 256])])
        dummy_dataloader = PyTorchDataLoader(dummy_dataset)

        def training_func_for_lpot(model):
            prune.model = model
            epochs = 16
            iters = 30
            criterion = nn.CrossEntropyLoss()
            optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)
            for nepoch in range(epochs):
                model.train()
                cnt = 0
                prune.on_epoch_begin(nepoch)
                for image, target in dummy_dataloader:
                    prune.on_batch_begin(cnt)
                    print('.', end='')
                    cnt += 1
                    output = model(image)
                    loss = criterion(output, target)
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()
                    prune.on_batch_end()
                    if cnt >= iters:
                        break
                prune.on_epoch_end()

        dummy_dataset = DummyDataset(tuple([100, 3, 256, 256]), label=True)
        dummy_dataloader = PyTorchDataLoader(dummy_dataset)
        _ = prune(self.model,
                  q_func=training_func_for_lpot,
                  eval_dataloader=dummy_dataloader)
Esempio n. 2
0
    eval_sampler = SequentialSampler(eval_dataset)
    eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, \
        batch_size=args.eval_batch_size)

    def eval_func(model):
        return evaluate_onnxrt(args, model, tokenizer, eval_dataloader)

    if args.benchmark:
        model = onnx.load(args.model_path)

        from lpot.data.datasets.dummy_dataset import DummyDataset
        from lpot.data.dataloaders.onnxrt_dataloader import ONNXRTDataLoader
        shapes, lows, highs = parse_dummy_input(model, args.benchmark_nums,
                                                args.max_seq_length)
        dummy_dataset = DummyDataset(shapes,
                                     low=lows,
                                     high=highs,
                                     dtype="int64")
        dummy_dataloader = ONNXRTDataLoader(dummy_dataset)

        print(
            '---------------------------------------------------------------')
        if args.accuracy_only:
            results = evaluate_onnxrt(args, model, tokenizer, eval_dataloader)
            print("Accuracy: %.5f" % results)
        else:
            results = evaluate_onnxrt(args,
                                      model,
                                      tokenizer,
                                      dummy_dataloader,
                                      benchmark=True)
            latency = np.array(results).mean() / args.eval_batch_size
Esempio n. 3
0
                        help='Shapes of input to model, like 3x224x224 or 128x768, 128x256')
    args = parser.parse_args()
    model = onnx.load(args.model_path)

    shapes, lows, highs, dtypes = parse_dummy_input(model, args.benchmark_nums)

    if args.input_shape:
        input_shape = args.input_shape.replace(' ', '')
        input_shapes = input_shape.split(',')
        input_shapes = [input_shapes] if type(input_shapes)!=list else input_shapes
        input_shapes = [shape.split('x') for shape in input_shapes]
        shapes = [tuple([args.benchmark_nums] + [int(dim) for dim in shape]) for shape in input_shapes]

    from lpot.data.datasets.dummy_dataset import DummyDataset
    from lpot.data.dataloaders.onnxrt_dataloader import ONNXRTDataLoader
    dummy_dataset = DummyDataset(shapes, low=lows, high=highs, dtype=dtypes, label=True)
    dummy_dataloader = ONNXRTDataLoader(dummy_dataset, batch_size=args.eval_batch_size)

    def eval_func(model):
        return evaluate_onnxrt(model, dummy_dataloader, reference)

    if args.benchmark:
        from lpot import Benchmark, common
        evaluator = Benchmark(args.config)
        evaluator.model = common.Model(model)
        evaluator.b_dataloader = dummy_dataloader
        results = evaluator()
        for mode, result in results.items():
            acc, batch_size, result_list = result
            latency = np.array(result_list).mean() / batch_size