def test_retraining(self): # forward x = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev) x.gaussian(0.0, 1.0) x1 = autograd.Conv2d(3, 1, 2)(x) x2 = autograd.Conv2d(1, 1, 2)(x1) y = autograd.Flatten()(x2)[0] y_t = tensor.Tensor(shape=(2, 1), device=gpu_dev) y_t.gaussian(0.0, 1.0) loss = autograd.MeanSquareError()(y, y_t)[0] # backward sgd = opt.SGD(lr=0.01) for p, gp in autograd.backward(loss): sgd.update(p, gp) sgd.step() # frontend model = sonnx.to_onnx([x], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) for idx, tens in sg_ir.tensor_map.items(): tens.requires_grad = True tens.stores_grad = True sg_ir.tensor_map[idx] = tens # forward y_o = sg_ir.run([x])[0] # backward loss = autograd.MeanSquareError()(y_o, y_t)[0] sgd = opt.SGD(lr=0.01) for p, gp in autograd.backward(loss): sgd.update(p, gp) sgd.step()
def test_transfer_learning(self): # forward x = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev) x.gaussian(0.0, 1.0) x1 = autograd.Conv2d(3, 1, 2)(x) y = autograd.Flatten()(x1)[0] y_t = tensor.Tensor(shape=(2, 4), device=gpu_dev) y_t.gaussian(0.0, 1.0) loss = autograd.MeanSquareError()(y, y_t)[0] # backward sgd = opt.SGD(lr=0.01) for p, gp in autograd.backward(loss): sgd.update(p, gp) sgd.step() # frontend model = sonnx.to_onnx([x], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) # forward x1 = sg_ir.run([x], last_layers=-1)[0] x2 = autograd.Conv2d(1, 1, 2)(x1) y_o = autograd.Flatten()(x2)[0] # backward y_ot = tensor.Tensor(shape=(2, 1), device=gpu_dev) y_ot.gaussian(0.0, 1.0) loss = autograd.MeanSquareError()(y_o, y_ot)[0] sgd = opt.SGD(lr=0.01) for p, gp in autograd.backward(loss): sgd.update(p, gp) sgd.step()
def test_batch_norm(self): x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32) s = np.array([1.0, 1.5]).astype(np.float32) bias = np.array([0, 1]).astype(np.float32) mean = np.array([0, 3]).astype(np.float32) var = np.array([1, 1.5]).astype(np.float32) x = tensor.from_numpy(x) x.to_device(gpu_dev) s = tensor.from_numpy(s) s.to_device(gpu_dev) bias = tensor.from_numpy(bias) mean = tensor.from_numpy(mean) var = tensor.from_numpy(var) bias.to_device(gpu_dev) mean.to_device(gpu_dev) var.to_device(gpu_dev) handle = singa.CudnnBatchNormHandle(0.9, x.data) y = autograd.batchnorm_2d(handle, x, s, bias, mean, var) # frontend model = sonnx.to_onnx([x, s, bias, mean, var], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x, s, bias, mean, var]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def test_conv2d(self): x = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev) x.gaussian(0.0, 1.0) y = autograd.Conv2d(3, 1, 2)(x) # frontend model = sonnx.to_onnx([x], [y]) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def test_softmax(self): X = np.array([[-1, 0, 1]]).astype(np.float32) x = tensor.from_numpy(X) x.to_device(gpu_dev) y = autograd.SoftMax()(x)[0] # frontend model = sonnx.to_onnx([x], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def test_max_pool(self): x = tensor.Tensor(shape=(2, 3, 4, 4), device=gpu_dev) x.gaussian(0.0, 1.0) y = autograd.MaxPool2d(2, 2, 0)(x) # frontend model = sonnx.to_onnx([x], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def test_linear(self): x = tensor.Tensor(shape=(2, 20), device=gpu_dev) x.gaussian(0.0, 1.0) x1 = x.clone() y = autograd.Linear(20, 1, bias=False)(x) # frontend model = sonnx.to_onnx([x], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x1]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def test_Sqrt(self): x = np.array([0.1, 1.0, 0.4, 4.0, 0.9, 9.0]).reshape(3, 2).astype(np.float32) x = tensor.from_numpy(x) y = autograd.sqrt(x) # frontend model = sonnx.to_onnx([x], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def test_inference(self): x = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev) x.gaussian(0.0, 1.0) x1 = autograd.Conv2d(3, 1, 2)(x) y = autograd.Conv2d(1, 1, 2)(x1) # frontend model = sonnx.to_onnx([x], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x], last_layers=-1) np.testing.assert_array_almost_equal(tensor.to_numpy(x1), tensor.to_numpy(y_t[0]), decimal=5)
def test_relu(self): X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 2).astype(np.float32) XT = np.array([0.8, 0, 3.3, 0, 0, 0.5]).reshape(3, 2).astype(np.float32) x = tensor.from_numpy(X) x.to_device(gpu_dev) y = autograd.ReLU()(x)[0] # frontend model = sonnx.to_onnx([x], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def test_transpose(self): x = np.random.randn(3, 2, 1) y = x.transpose(1, 2, 0) x = tensor.from_numpy(x) x.to_device(cpu_dev) y = autograd.transpose(x, (1, 2, 0)) # frontend model = sonnx.to_onnx([x], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def test_concat(self): X1 = np.random.randn(3, 4, 5).astype(np.float32) X2 = np.random.randn(3, 4, 5).astype(np.float32) x1 = tensor.from_numpy(X1) x2 = tensor.from_numpy(X2) x1.to_device(gpu_dev) x2.to_device(gpu_dev) y = autograd.Concat()(x1, x2)[0] # frontend model = sonnx.to_onnx([x1, x2], [y]) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x1, x2]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def test_HardSigmoid(self): x = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) a = 0.2 g = 0.5 x = tensor.from_numpy(x) x.to_device(gpu_dev) y = autograd.hardsigmoid(x, a, g) # frontend model = sonnx.to_onnx([x], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def test_ELu(self): x = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) #y = gamma * (alpha * e^x - alpha) for x <= 0, y = gamma * x for x > 0 a = 1. x = tensor.from_numpy(x) x.to_device(gpu_dev) y = autograd.elu(x, a) # frontend model = sonnx.to_onnx([x], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def test_matmul(self): X1 = np.random.randn(4, 5).astype(np.float32) X2 = np.random.randn(5, 4).astype(np.float32) x1 = tensor.from_numpy(X1) x2 = tensor.from_numpy(X2) x1.to_device(gpu_dev) x2.to_device(gpu_dev) y = autograd.Matmul()(x1, x2)[0] # frontend model = sonnx.to_onnx([x1, x2], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x1, x2]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def test_Greater(self): x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32) x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3, 2).astype(np.float32) x0 = tensor.from_numpy(x0) x1 = tensor.from_numpy(x1) x0.to_device(cpu_dev) x1.to_device(cpu_dev) y = autograd.greater(x0, x1) # frontend model = sonnx.to_onnx([x0, x1], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x0, x1]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def dump_parameters(self): params = {} # Save model parameters with tempfile.NamedTemporaryFile() as tmp: # Save whole model model = sonnx.to_onnx([self._x], [self._y]) onnx.save(model, tmp.name) # Read from temp h5 file & encode it to base64 string with open(tmp.name, 'rb') as f: model_bytes = f.read() params['model_bytes'] = base64.b64encode(model_bytes).decode( 'utf-8') # Save pre-processing params params['image_size'] = self._image_size params['num_classes'] = self._num_classes params['normalize_mean'] = json.dumps(self._normalize_mean) params['normalize_std'] = json.dumps(self._normalize_std) return params
def test_min(self): X0 = np.array([0.1, 0.2, 2.0, 0.0, 0.1, 0.2]).reshape(3, 2).astype(np.float32) X1 = np.array([1.0, 2.0, 1.0, 2.1, 0.0, 2.0]).reshape(3, 2).astype(np.float32) x0 = tensor.from_numpy(X0) x1 = tensor.from_numpy(X1) x0.to_device(gpu_dev) x1.to_device(gpu_dev) y = autograd.min(x0, x1) # frontend model = sonnx.to_onnx([x0, x1], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x0, x1]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def make_onnx(x, y): return sonnx.to_onnx([x], [y])