示例#1
0
    def test_rnn_with_seq_lengths(self, dev=gpu_dev):
        bs = 2
        seq_length = 3
        hidden_size = 2
        em_size = 2
        x_np = np.array([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
                         [[0.3, 0.3], [0.4, 0.4], [0.0,
                                                   0.0]]]).astype(np.float32)
        y_np = np.array([[0.4, 0.4], [0.5, 0.5]]).astype(np.float32)
        seq_lengths_np = np.array([3, 2]).astype(np.int32)

        x = tensor.from_numpy(x_np)
        x.to_device(dev)
        y = tensor.from_numpy(y_np)
        y.to_device(dev)
        seq_lengths = tensor.from_numpy(seq_lengths_np)

        m = LSTMModel3(hidden_size)
        m.compile([x, seq_lengths],
                  is_train=True,
                  use_graph=False,
                  sequential=False)
        m.train()
        for i in range(10):
            out = m.forward(x, seq_lengths)
            loss = autograd.mse_loss(out, y)
            print("train l:", tensor.to_numpy(loss))
            m.optimizer(loss)
        m.eval()
        out = m.forward(x, seq_lengths)
        loss = autograd.mse_loss(out, y)
        print(" eval l:", tensor.to_numpy(loss))
示例#2
0
    def test_MeanSquareError(self):
        X=np.array([4.3,5.4,3.3,3.6,5.7,6.0]).reshape(3,2).astype(np.float32)
        T=np.array([4.4,5.3,3.2,3.7,5.4,6.3]).reshape(3,2).astype(np.float32)
        x=tensor.from_numpy(X)
        t=tensor.from_numpy(T)
        x.to_device(gpu_dev)
        t.to_device(gpu_dev)

        loss= autograd.mse_loss(x,t)
        dx=loss.creator.backward()[0]

        loss_np=tensor.to_numpy(loss)[0]
        self.assertAlmostEqual(loss_np, 0.0366666, places=4)
        self.check_shape(dx.shape(), (3, 2))
    def test_MeanSquareError(self):
        X=np.array([4.3,5.4,3.3,3.6,5.7,6.0]).reshape(3,2).astype(np.float32)
        T=np.array([4.4,5.3,3.2,3.7,5.4,6.3]).reshape(3,2).astype(np.float32)
        x=tensor.from_numpy(X)
        t=tensor.from_numpy(T)
        x.to_device(gpu_dev)
        t.to_device(gpu_dev)

        loss= autograd.mse_loss(x,t)
        dx=loss.creator.backward()[0]

        loss_np=tensor.to_numpy(loss)
        self.assertAlmostEqual(loss_np, 0.0366666, places=4)
        self.check_shape(dx.shape(), (3, 2))
示例#4
0
文件: rnn.py 项目: joddiy/singa-onnx
np.random.seed(0)
data = np.random.randn(4, 2).astype(np.float32)
h = np.random.randn(4, 2).astype(np.float32)
label = np.random.randn(4, 2).astype(np.float32)
print('train_data_shape:', data.shape)
print('train_label_shape:', label.shape)

rnn = autograd.RNN(2, 2)

sgd = optimizer.SGD(0.00)
dev = device.get_default_device()
# training process
for i in range(1):
    inputs = tensor.Tensor(device=dev, data=data, stores_grad=False)
    h0 = tensor.Tensor(device=dev, data=h, stores_grad=False)
    targets = tensor.Tensor(device=dev,
                            data=label,
                            requires_grad=False,
                            stores_grad=False)
    x = rnn(inputs, h0)
    loss = autograd.mse_loss(x, targets)
    gradient = autograd.backward(loss)
    for p, gp in gradient:
        sgd.apply(0, gp, p, '')
    if (i % 100 == 0):
        print('training loss = ', tensor.to_numpy(loss)[0])

model = sonnx.get_onnx_model(loss, inputs, target)

onnx.save(model, 'linear.onnx')