Ejemplo n.º 1
0
def onnx_to_singa(niter, use_cpu=False):
    if use_cpu:
        print("Using CPU")
        dev = device.get_default_device()
    else:
        print("Using GPU")
        dev = device.create_cuda_gpu()
    model = sonnx.load("mlp.onnx")
    backend = sonnx.prepare(model, device=dev)
    sgd = opt.SGD(0.1)
    inputs = Tensor(
        data=data,
        device=dev,
        requires_grad=False,
        stores_grad=False,
        name="input",
    )
    target = Tensor(
        data=label,
        device=dev,
        requires_grad=False,
        stores_grad=False,
        name="target",
    )

    for i in range(100):
        y = backend.run([inputs])[0]
        loss = autograd.softmax_cross_entropy(y, target)
        for p, gp in autograd.backward(loss):
            sgd.update(p, gp)
        loss_rate = tensor.to_numpy(loss)[0]
        accuracy_rate = accuracy(tensor.to_numpy(y), label)

        print("Iter {}, accurate={}, loss={}".format(i, accuracy_rate, loss_rate))
Ejemplo n.º 2
0
    def test_sigmoid(self):
        X = np.array([[-1, 0, 1]]).astype(np.float32)
        x = tensor.from_numpy(X)
        x.to_device(gpu_dev)
        y = autograd.Sigmoid()(x)[0]

        # frontend
        model = sonnx.to_onnx([x], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x])
        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
Ejemplo n.º 3
0
    def test_max_pool(self):
        x = tensor.Tensor(shape=(2, 3, 4, 4), device=gpu_dev)
        x.gaussian(0.0, 1.0)
        y = autograd.MaxPool2d(2, 2, 0)(x)

        # frontend
        model = sonnx.to_onnx([x], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x])

        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
Ejemplo n.º 4
0
    def test_linear(self):
        x = tensor.Tensor(shape=(2, 20), device=gpu_dev)
        x.gaussian(0.0, 1.0)
        x1 = x.clone()
        y = autograd.Linear(20, 1, bias=False)(x)

        # frontend
        model = sonnx.to_onnx([x], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x1])

        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
Ejemplo n.º 5
0
    def test_inference(self):
        x = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev)
        x.gaussian(0.0, 1.0)
        x1 = autograd.Conv2d(3, 1, 2)(x)
        y = autograd.Conv2d(1, 1, 2)(x1)

        # frontend
        model = sonnx.to_onnx([x], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x], last_layers=-1)

        np.testing.assert_array_almost_equal(tensor.to_numpy(x1),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
Ejemplo n.º 6
0
    def test_Sqrt(self):
        x = np.array([0.1, 1.0, 0.4, 4.0, 0.9,
                      9.0]).reshape(3, 2).astype(np.float32)
        x = tensor.from_numpy(x)
        y = autograd.sqrt(x)

        # frontend
        model = sonnx.to_onnx([x], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x])

        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
Ejemplo n.º 7
0
    def test_relu(self):
        X = np.array([0.8, -1.2, 3.3, -3.6, -0.5,
                      0.5]).reshape(3, 2).astype(np.float32)
        XT = np.array([0.8, 0, 3.3, 0, 0, 0.5]).reshape(3,
                                                        2).astype(np.float32)
        x = tensor.from_numpy(X)
        x.to_device(gpu_dev)
        y = autograd.ReLU()(x)[0]

        # frontend
        model = sonnx.to_onnx([x], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x])
        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
Ejemplo n.º 8
0
    def test_transpose(self):
        x = np.random.randn(3, 2, 1)
        y = x.transpose(1, 2, 0)

        x = tensor.from_numpy(x)
        x.to_device(cpu_dev)

        y = autograd.transpose(x, (1, 2, 0))

        # frontend
        model = sonnx.to_onnx([x], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x])

        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
Ejemplo n.º 9
0
    def test_concat(self):
        X1 = np.random.randn(3, 4, 5).astype(np.float32)
        X2 = np.random.randn(3, 4, 5).astype(np.float32)

        x1 = tensor.from_numpy(X1)
        x2 = tensor.from_numpy(X2)
        x1.to_device(gpu_dev)
        x2.to_device(gpu_dev)
        y = autograd.Concat()(x1, x2)[0]

        # frontend
        model = sonnx.to_onnx([x1, x2], [y])

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x1, x2])

        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
Ejemplo n.º 10
0
    def test_HardSigmoid(self):
        x = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
                      0.9]).reshape(3, 2).astype(np.float32)
        a = 0.2
        g = 0.5

        x = tensor.from_numpy(x)
        x.to_device(gpu_dev)
        y = autograd.hardsigmoid(x, a, g)

        # frontend
        model = sonnx.to_onnx([x], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x])

        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
Ejemplo n.º 11
0
    def test_ELu(self):
        x = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
                      0.9]).reshape(3, 2).astype(np.float32)
        #y = gamma * (alpha * e^x - alpha) for x <= 0, y = gamma * x for x > 0
        a = 1.
        x = tensor.from_numpy(x)
        x.to_device(gpu_dev)

        y = autograd.elu(x, a)

        # frontend
        model = sonnx.to_onnx([x], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x])

        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
Ejemplo n.º 12
0
    def test_matmul(self):
        X1 = np.random.randn(4, 5).astype(np.float32)
        X2 = np.random.randn(5, 4).astype(np.float32)

        x1 = tensor.from_numpy(X1)
        x2 = tensor.from_numpy(X2)
        x1.to_device(gpu_dev)
        x2.to_device(gpu_dev)

        y = autograd.Matmul()(x1, x2)[0]

        # frontend
        model = sonnx.to_onnx([x1, x2], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x1, x2])

        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
Ejemplo n.º 13
0
    def test_min(self):
        X0 = np.array([0.1, 0.2, 2.0, 0.0, 0.1,
                       0.2]).reshape(3, 2).astype(np.float32)
        X1 = np.array([1.0, 2.0, 1.0, 2.1, 0.0,
                       2.0]).reshape(3, 2).astype(np.float32)
        x0 = tensor.from_numpy(X0)
        x1 = tensor.from_numpy(X1)
        x0.to_device(gpu_dev)
        x1.to_device(gpu_dev)

        y = autograd.min(x0, x1)

        # frontend
        model = sonnx.to_onnx([x0, x1], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x0, x1])

        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
Ejemplo n.º 14
0
    def test_Greater(self):
        x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
                       0.9]).reshape(3, 2).astype(np.float32)
        x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3,
                                                         2).astype(np.float32)
        x0 = tensor.from_numpy(x0)
        x1 = tensor.from_numpy(x1)
        x0.to_device(cpu_dev)
        x1.to_device(cpu_dev)

        y = autograd.greater(x0, x1)

        # frontend
        model = sonnx.to_onnx([x0, x1], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x0, x1])

        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
Ejemplo n.º 15
0
def onnx_to_singa(epochs, use_cpu=False, batchsize=32):
    (x_train, y_train), (x_test, y_test), dev = common(use_cpu)
    model = sonnx.load("cnn.onnx")
    backend = sonnx.prepare(model, dev)
    autograd.training = True
    sgd = opt.SGD(lr=0.01)
    niter = x_train.shape[0] // batchsize
    for epoch in range(epochs):
        accuracy_rate = 0.0
        loss_rate = 0.0
        for i in range(niter):
            inputs = tensor.Tensor(
                device=dev,
                data=x_train[i * batchsize : (i + 1) * batchsize],
                stores_grad=False,
                name="input",
            )
            targets = tensor.Tensor(
                device=dev,
                data=y_train[i * batchsize : (i + 1) * batchsize],
                requires_grad=False,
                stores_grad=False,
                name="target",
            )
            y = backend.run([inputs])[0]
            loss = autograd.softmax_cross_entropy(y, targets)

            accuracy_rate += accuracy(
                tensor.to_numpy(y), y_train[i * batchsize : (i + 1) * batchsize]
            )
            loss_rate += tensor.to_numpy(loss)[0]

            for p, gp in autograd.backward(loss):
                sgd.update(p, gp)

        print("accuracy is {}, loss is {}".format(accuracy_rate / niter, loss_rate / niter))
Ejemplo n.º 16
0
    epochs = 1

    sgd = opt.SGD(lr=0.00)

    x_train = preprocess(train[0])
    y_train = to_categorical(train[1], num_classes)

    x_test = preprocess(test[0])
    y_test = to_categorical(test[1], num_classes)
    print('the shape of training data is', x_train.shape)
    print('the shape of training label is', y_train.shape)
    print('the shape of testing data is', x_test.shape)
    print('the shape of testing label is', y_test.shape)

    model = onnx.load('cnn.onnx')
    rep = sonnx.prepare(model, dev)
    print('finish init')
    autograd.training = True
    # training process
    for epoch in range(1):
        inputs = tensor.Tensor(device=dev,
                               data=x_train[0:100],
                               stores_grad=False)
        targets = tensor.Tensor(device=dev,
                                data=y_train[0:100],
                                requires_grad=False,
                                stores_grad=False)
        y0 = rep.run([inputs])[0]
        loss = autograd.softmax_cross_entropy(y0, targets)
        print('outputs', tensor.to_numpy(loss)[0])
Ejemplo n.º 17
0
    tmp_dict = {}
    for idx in range(0, n):
        logging.info("starting infer sample {}...".format(idx))
        item = eval_examples[idx]
        inputs = [
            np.array([item.qas_id], dtype=np.int32),
            segment_ids[idx:idx + bs].astype(np.int32),
            input_mask[idx:idx + bs].astype(np.int32),
            input_ids[idx:idx + bs].astype(np.int32),
        ]

        if sg_ir is None:
            # prepare the model
            logging.info("model is none, prepare model...")
            sg_ir = sonnx.prepare(onnx_model,
                                  device=dev,
                                  init_inputs=inputs,
                                  keep_initializers_as_inputs=False)
            model = Infer(sg_ir)

        x_batch = []
        for inp in inputs:
            tmp_tensor = tensor.from_numpy(inp)
            tmp_tensor.to_device(dev)
            x_batch.append(tmp_tensor)

        logging.info("model running for sample {}...".format(idx))
        outputs = model.forward(x_batch)

        logging.info("hanlde the result of sample {}...".format(idx))
        result = []
        for outp in outputs:
Ejemplo n.º 18
0
    url = 'https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet18v1/resnet18v1.tar.gz'
    download_dir = '/tmp/'
    model_path = os.path.join(download_dir, 'resnet18v1', 'resnet18v1.onnx')

    logging.info("onnx load model...")
    download_model(url)
    onnx_model = onnx.load(model_path)

    # set batch size
    onnx_model = update_batch_size(onnx_model, 1)

    # prepare the model
    logging.info("prepare model...")
    dev = device.create_cuda_gpu()
    sg_ir = sonnx.prepare(onnx_model, device=dev)
    autograd.training = False
    model = Infer(sg_ir)

    # verifty the test
    # from utils import load_dataset
    # inputs, ref_outputs = load_dataset(os.path.join('/tmp', 'resnet18v1', 'test_data_set_0'))
    # x_batch = tensor.Tensor(device=dev, data=inputs[0])
    # outputs = model.forward(x_batch)
    # for ref_o, o in zip(ref_outputs, outputs):
    #     np.testing.assert_almost_equal(ref_o, tensor.to_numpy(o), 4)

    # inference
    logging.info("preprocessing...")
    img, labels = get_image_labe()
    img = preprocess(img)
Ejemplo n.º 19
0
# under the License.
#


# load and run the onnx model exported from pytorch
# https://github.com/onnx/tutorials/blob/master/tutorials/PytorchOnnxExport.ipynb


import argparse
from singa import device
from singa import sonnx
from singa import tensor


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Load model from pytorch")
    parser.add_argument("--use_cpu", action="store_true")
    args = parser.parse_args()
    if args.use_cpu:
        print("Using CPU")
        dev = device.get_default_device()
    else:
        print("Using GPU")
        dev = device.create_cuda_gpu()
    model = sonnx.load("alexnet.onnx")
    backend = sonnx.prepare(model, dev)
    input_name = model.graph.inputs[0].name
    inputs = tensor.Tensor(shape=(2, 3, 224, 224), device=dev, name=input_name)
    inputs.gaussian(0, 0.01)
    y = backend.run([inputs])[0]