Exemple #1
0
def test_bert_full(gpu):
    data_directory = os.path.join(os.path.dirname(__file__), "data")

    bert_path = os.path.join(data_directory, "bert_infer.onnx")
    # Download onnx model from https://polybox.ethz.ch/index.php/s/tVng5qwlrukhZ5A
    if not os.path.exists(bert_path):
        subprocess.check_call([
            "wget",
            "https://polybox.ethz.ch/index.php/s/tVng5qwlrukhZ5A/download",
            "--output-document={}".format(bert_path)
        ])

    model = onnx.load(bert_path)

    dace_model = ONNXModel("bert", model, cuda=gpu, infer_shapes=False)
    feed = {
        "input_ids:0": np.load(os.path.join(data_directory, "input_ids.npy")),
        "input_mask:0": np.load(os.path.join(data_directory,
                                             "input_mask.npy")),
        "segment_ids:0":
        np.load(os.path.join(data_directory, "segment_ids.npy")),
        "ONNX_OneHot216_o0__d0": 2
    }
    outputs = dace_model(**feed)
    unstack_0 = np.load(os.path.join(data_directory, "unstack_0.npy"))
    unstack_1 = np.load(os.path.join(data_directory, "unstack_1.npy"))

    assert np.all(np.abs(outputs[1] - unstack_0) < 1e-4)
    assert np.all(np.abs(outputs[0] - unstack_1) < 1e-4)
def test_slice(gpu, sdfg_name):
    model = onnx.load(os.path.join(data_directory, "slice.onnx"))
    dace_model = ONNXModel(sdfg_name, model, cuda=gpu)

    out = dace_model(data=np.ones((2, ), dtype=np.float32))
    assert out.shape == (1, )
    assert out[0] == 1.0
def test_slice(gpu):
    model = onnx.load("tests/onnx_files/slice.onnx")
    dace_model = ONNXModel("slice", model, cuda=gpu)

    out = dace_model(data=np.ones((2, ), dtype=np.float32))
    assert out.shape == (1, )
    assert out[0] == 1.0
Exemple #4
0
    def _initialize_sdfg(self, dummy_inputs):
        # TODO change to StringIO if not too big
        with tempfile.TemporaryDirectory() as dir_name:
            export_name = os.path.join(dir_name, "export.onnx")

            torch.onnx.export(
                self.model,
                dummy_inputs,
                export_name,
                verbose=logging.root.level <= logging.DEBUG,
                training=(TrainingMode.TRAINING
                          if self.train else TrainingMode.EVAL),
                opset_version=12,
                strip_doc_string=False,
                export_params=not self.backward,
                # pytorch constant folding will add new unnamed inputs to the graph and remove some of the
                # named parameters of the model: this means that we can't match with the state dict
                # anymore, so we disable this. Our CF is more flexible.
                do_constant_folding=False)

            onnx_model = infer_shapes(onnx.load(export_name))
            self.onnx_model = onnx_model

            dace_model = ONNXModel(self.sdfg_name,
                                   onnx_model,
                                   infer_shapes=False,
                                   cuda=self.cuda,
                                   parent_pytorch_module=self.model)
            self.sdfg = dace_model.sdfg
            self.dace_model = dace_model

            self.sdfg.validate()

            for _, hook in self.post_onnx_hooks.items():
                hook(self.dace_model)

            if self.backward:
                function = make_backward_function(dace_model)

                for _, hook in self.post_autodiff_hooks.items():
                    hook(function._forward_model.sdfg, function._backward_sdfg)

                def forward(*args):
                    args_and_params = list(args)
                    args_and_params.extend(self.parameters())
                    return function.apply(*args_and_params)

                return forward
            else:

                return dace_model
def test_efficientnet(gpu, default_implementation, sdfg_name):
    if gpu:
        pytest.skip("GPU EfficientNet is currently broken due to Gemv")
    data_directory = os.path.join(os.path.dirname(__file__), "data")

    path = os.path.join(data_directory, "efficientnet.onnx")
    # Download model
    if not os.path.exists(path):
        subprocess.check_call([
            "wget",
            "http://spclstorage.inf.ethz.ch/~rauscho/efficientnet-lite4-11.onnx",
            "--output-document={}".format(path), "--no-verbose"
        ])

    model = onnx.load(path)

    dace_model = ONNXModel(sdfg_name, model, cuda=gpu)
    test_input = np.random.rand(1, 3, 224, 224).astype(np.float32)
    dace_model(test_input)
Exemple #6
0
    def _initialize_sdfg(self, dummy_inputs) -> ONNXModel:
        with tempfile.TemporaryDirectory() as dir_name:
            export_name = os.path.join(dir_name, "export.onnx")

            torch.onnx.export(self.model,
                              dummy_inputs,
                              export_name,
                              verbose=logging.root.level <= logging.DEBUG,
                              training=(TrainingMode.TRAINING
                                        if self.train else TrainingMode.EVAL),
                              opset_version=12)

            onnx_model = infer_shapes(onnx.load(export_name))
            self.onnx_model = onnx_model

            dace_model = ONNXModel("dace_model",
                                   onnx_model,
                                   infer_shapes=False,
                                   cuda=self.cuda,
                                   apply_strict=self.apply_strict)
            self.sdfg = dace_model.sdfg
            self.sdfg.validate()

            return dace_model
def test_reshape(gpu, default_implementation, sdfg_name):
    model = onnx.load(os.path.join(data_directory, "reshape.onnx"))
    dace_model = ONNXModel(sdfg_name, model, cuda=gpu)
    dace_model()
def test_reshape(gpu):
    model = onnx.load("tests/onnx_files/reshape.onnx")
    dace_model = ONNXModel("reshape", model, cuda=gpu)
    dace_model()