def _run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.1): # np api (y_1, rm_1, rv_1, bm_1, bv_1) = _np_bn_training(x_0, s_0, b_0, rm_0, rv_0, momentum=m_0) # singa api hndl = singa_api.BatchNormHandle( m_0, tensor.Tensor(device=dev, data=x_0).data) (y_2_c, bm_2_c, bv_2_c) = singa_api.CpuBatchNormForwardTraining( hndl, tensor.Tensor(device=dev, data=x_0).data, tensor.Tensor(device=dev, data=s_0).data, tensor.Tensor(device=dev, data=b_0).data, tensor.Tensor(device=dev, data=rm_0).data, tensor.Tensor(device=dev, data=rv_0).data) np.testing.assert_array_almost_equal( y_1, tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)), decimal=5) np.testing.assert_array_almost_equal( bm_1, tensor.to_numpy(_cTensor_to_pyTensor(bm_2_c)), decimal=5) #print(bv_1) #print(tensor.to_numpy(_cTensor_to_pyTensor(bv_2_c))) #np.testing.assert_array_almost_equal( # bv_1, tensor.to_numpy(_cTensor_to_pyTensor(bv_2_c)), decimal=3) return
def test_batchnorm_backward_dnnl(self): dev = cpu_dev N = 1 C = 3 H = 2 W = 2 data_shape = [N, C, H, W] param_shape = [1, C, 1, 1] data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] x_0 = np.array(data, dtype=np.float32).reshape(data_shape) y_0 = np.array(data, dtype=np.float32).reshape(data_shape) dy_0 = np.array(data, dtype=np.float32).reshape(data_shape) scale_0 = np.array([1] * C, dtype=np.float32).reshape(param_shape) bias_0 = np.array([0] * C, dtype=np.float32).reshape(param_shape) mean_0 = x_0.mean(axis=(0, 2, 3), keepdims=True) var_0 = x_0.var(axis=(0, 2, 3), keepdims=True) hndl = singa_api.BatchNormHandle( 0.1, tensor.Tensor(device=dev, data=x_0).data) (dx_2_c, _, _) = singa_api.CpuBatchNormBackwardx( hndl, tensor.Tensor(device=dev, data=y_0).data, tensor.Tensor(device=dev, data=dy_0).data, tensor.Tensor(device=dev, data=x_0).data, tensor.Tensor(device=dev, data=scale_0).data, tensor.Tensor(device=dev, data=bias_0).data, tensor.Tensor(device=dev, data=mean_0).data, tensor.Tensor(device=dev, data=var_0).data, ) dx_truth = np.array([[[[-1.0769e-05, -3.5985e-06], [3.5985e-06, 1.0769e-05]], [[-1.0769e-05, -3.5985e-06], [3.5985e-06, 1.0769e-05]], [[-1.0769e-05, -3.5985e-06], [3.5985e-06, 1.0769e-05]]]]) np.testing.assert_array_almost_equal( tensor.to_numpy(_cTensor_to_pyTensor(dx_2_c)), dx_truth) return
def _run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=0.1): # np api y_1 = _np_bn_testing(x_0, s_0, b_0, rm_0, rv_0, momentum=m_0) # singa api hndl = singa_api.BatchNormHandle( m_0, tensor.Tensor(device=dev, data=x_0).data) y_2_c = singa_api.CpuBatchNormForwardInference( hndl, tensor.Tensor(device=dev, data=x_0).data, tensor.Tensor(device=dev, data=s_0).data, tensor.Tensor(device=dev, data=b_0).data, tensor.Tensor(device=dev, data=rm_0).data, tensor.Tensor(device=dev, data=rv_0).data) #print(y_1) #print(tensor.to_numpy(_cTensor_to_pyTensor(y_2_c))) np.testing.assert_array_almost_equal( y_1, tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)), decimal=5) return
def test_batch_norm(self): x_shape = [2, 2] x = singa_wrap.Tensor(x_shape) x.CopyFloatDataFromHostPtr([1, 2, 3, 4]) dy_shape = [2, 2] dy = singa_wrap.Tensor(dy_shape) dy.CopyFloatDataFromHostPtr([4, 3, 2, 1]) scale_shape = [2] scale = singa_wrap.Tensor(scale_shape) scale.CopyFloatDataFromHostPtr([1, 1]) bias_shape = [2] bias = singa_wrap.Tensor(bias_shape) bias.CopyFloatDataFromHostPtr([0, 0]) mean_shape = [2] mean = singa_wrap.Tensor(mean_shape) mean.CopyFloatDataFromHostPtr([1, 2]) var = singa_wrap.Tensor(mean_shape) var.CopyFloatDataFromHostPtr([1, 2]) handle = singa_wrap.BatchNormHandle(0.9, x) # 2D Forward Inference y = singa_wrap.CpuBatchNormForwardInference(handle, x, scale, bias, mean, var) self.assertListEqual([2, 2], list(y.shape())) # 2D Forward Training (y, mean_updated, var_updated) = singa_wrap.CpuBatchNormForwardTraining( handle, x, scale, bias, mean, var) self.assertListEqual([2, 2], list(y.shape())) self.assertListEqual([2], list(mean_updated.shape())) self.assertListEqual([2], list(var_updated.shape())) # 2D Backward dx (dx, dscale, dbias) = singa_wrap.CpuBatchNormBackwardx(handle, y, dy, x, scale, bias, mean_updated, var_updated) self.assertListEqual([2, 2], list(dx.shape())) self.assertListEqual([2], list(dscale.shape())) self.assertListEqual([2], list(dbias.shape())) # 4D Forward Inference x2_shape = [1, 2, 4, 4] x2 = singa_wrap.Tensor(x2_shape) x2.CopyFloatDataFromHostPtr( [0.0736655, 0.0459045, 0.0779517, 0.0771059, 0.0586862, 0.0561263, 0.0708457, 0.0977273, 0.0405025, -0.170897, 0.0208982, 0.136865, -0.0367905, -0.0618205, -0.0103908, -0.0522777, -0.122161, -0.025427, -0.0718576, -0.185941, 0.0166533, 0.178679, -0.0576606, -0.137817, 0.150676, 0.153442, -0.0929899, -0.148675, -0.112459, -0.106284, -0.103074, -0.0668811]) handle = singa_wrap.BatchNormHandle(0.9, x) y2 = singa_wrap.CpuBatchNormForwardInference(handle, x2, scale, bias, mean, var) self.assertListEqual([1, 2, 4, 4], list(y2.shape()))