def run_syncbn(trace_mode): x = F.ones([2, 16, 4, 4], dtype="float32") net = Sequential( Conv2d(16, 16, 1), SyncBatchNorm(16), Conv2d(16, 16, 1), SyncBatchNorm(16), ) gm = ad.GradManager().attach( net.parameters(), callbacks=dist.make_allreduce_cb("MEAN") ) opt = optimizer.SGD(net.parameters(), 1e-3) def train_func(x): with gm: y = net(x) loss = y.mean() gm.backward(loss) opt.step().clear_grad() return loss if trace_mode is not None: train_func = trace(train_func, symbolic=trace_mode) for _ in range(3): loss = train_func(x) loss.numpy()
def test_syncbn2d_grad(): nr_chan = 8 data_shape = (3, nr_chan, 16, 16) syncbn = SyncBatchNorm(8, track_running_stats=False) bn = BatchNorm2d(8, track_running_stats=False) for i in range(4): if i == 2: syncbn.training = False bn.training = False inp = Tensor(np.random.normal(loc=2.3, size=data_shape).astype(np.float32)) diff = Tensor(np.random.normal(size=data_shape).astype(np.float32)) with GradManager().attach(inp) as gm: oup = syncbn(inp) gm.backward(oup, diff) grad = inp.grad inp.grad = None with GradManager().attach(inp) as gm: oup_expect = bn(inp) gm.backward(oup_expect, diff) grad_expect = inp.grad inp.grad = None _assert_allclose(oup.numpy(), oup_expect.numpy()) _assert_allclose(grad.numpy(), grad_expect.numpy())
def worker(data, yv_expect, running_mean, running_var): rank = dist.get_rank() bn = SyncBatchNorm(nr_chan, momentum=momentum, eps=eps) for i in range(steps): yv = bn(Tensor(data[rank][i])) _assert_allclose(yv.numpy(), yv_expect[rank]) _assert_allclose(bn.running_mean.numpy(), running_mean) _assert_allclose(bn.running_var.numpy(), running_var)
def worker(rank, data, yv_expect, running_mean, running_var): if mge.get_device_count("gpu") < nr_ranks: return dist.init_process_group("localhost", port, nr_ranks, rank, rank) bn = SyncBatchNorm(nr_chan, momentum=momentum, eps=eps) for i in range(steps): yv = bn(Tensor(data[i])) _assert_allclose(yv.numpy(), yv_expect) _assert_allclose(bn.running_mean.numpy(), running_mean) _assert_allclose(bn.running_var.numpy(), running_var)
def worker(rank, data, yv_expect, running_mean, running_var): if mge.get_device_count("gpu") < nr_ranks: return dist.init_process_group("localhost", 2333, nr_ranks, rank, rank) bn = SyncBatchNorm(nr_chan, momentum=momentum, eps=eps) data_tensor = tensor() for i in range(steps): data_tensor.set_value(data[i]) yv = bn(data_tensor) assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6) assertTensorClose(running_mean, bn.running_mean.numpy(), max_err=5e-6) assertTensorClose(running_var, bn.running_var.numpy(), max_err=5e-6)
def worker(rank, data, yv_expect, running_mean, running_var): if not mge.is_cuda_available(): return dist.init_process_group("localhost", 2333, 4, rank, rank) bn = SyncBatchNorm(nr_chan, momentum=momentum, eps=eps) data_tensor = tensor() for i in range(steps): data_tensor.set_value(data[i]) yv = bn(data_tensor) assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6) assertTensorClose(running_mean, bn.running_mean.numpy(), max_err=5e-6) assertTensorClose(running_var, bn.running_var.numpy(), max_err=5e-6)
def worker(data, yv_expect, running_mean, running_var): with amp.autocast(enabled=enable_amp): rank = dist.get_rank() bn = SyncBatchNorm(nr_chan, momentum=momentum, eps=eps) for i in range(steps): yv = bn(Tensor(data[rank][i])) if enable_amp: np.testing.assert_allclose( yv.numpy(), yv_expect[rank], atol=5e-4, rtol=5e-4 ) else: _assert_allclose(yv.numpy(), yv_expect[rank]) _assert_allclose(bn.running_mean.numpy(), running_mean) _assert_allclose(bn.running_var.numpy(), running_var)
def test_syncbn1d(): nr_chan = 8 data_shape = (3, nr_chan, 4) momentum = 0.9 bn = SyncBatchNorm(nr_chan, momentum=momentum) running_mean = np.zeros((1, nr_chan, 1), dtype=np.float32) running_var = np.ones((1, nr_chan, 1), dtype=np.float32) data = tensor() for i in range(3): xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32) mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True) xv_transposed = np.transpose(xv, [0, 2, 1]).reshape( (data_shape[0] * data_shape[2], nr_chan)) var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1)) sd = np.sqrt(var_biased + bn.eps) var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape( (1, nr_chan, 1)) running_mean = running_mean * momentum + mean * (1 - momentum) running_var = running_var * momentum + var_unbiased * (1 - momentum) data.set_value(xv) yv = bn(data) yv_expect = (xv - mean) / sd assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6) assertTensorClose(running_mean.reshape(-1), bn.running_mean.numpy().reshape(-1), max_err=5e-6) assertTensorClose(running_var.reshape(-1), bn.running_var.numpy().reshape(-1), max_err=5e-6) # test set 'training' flag to False mean_backup = bn.running_mean.numpy() var_backup = bn.running_var.numpy() bn.training = False xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32) data.set_value(xv) yv1 = bn(data) yv2 = bn(data) assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0) assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0) assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0) yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps) assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_syncbn2d_no_stats(): nr_chan = 8 data_shape = (3, nr_chan, 16, 16) bn = SyncBatchNorm(8, track_running_stats=False) for i in range(4): if i == 2: bn.training = False xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32) xv_transposed = np.transpose(xv, [0, 2, 3, 1]).reshape( (data_shape[0] * data_shape[2] * data_shape[3], nr_chan)) mean = np.mean(xv_transposed, axis=0).reshape(1, nr_chan, 1, 1) var = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1, 1)) sd = np.sqrt(var + bn.eps) yv = bn(Tensor(xv)) yv_expect = (xv - mean) / sd _assert_allclose(yv.numpy(), yv_expect)
def test_syncbn2d(): nr_chan = 8 data_shape = (3, nr_chan, 16, 16) momentum = 0.9 bn = SyncBatchNorm(nr_chan, momentum=momentum) running_mean = np.zeros((1, nr_chan, 1, 1), dtype=np.float32) running_var = np.ones((1, nr_chan, 1, 1), dtype=np.float32) for i in range(3): xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32) xv_transposed = np.transpose(xv, [0, 2, 3, 1]).reshape( (data_shape[0] * data_shape[2] * data_shape[3], nr_chan)) mean = np.mean(xv_transposed, axis=0).reshape(1, nr_chan, 1, 1) var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1, 1)) sd = np.sqrt(var_biased + bn.eps) var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape( (1, nr_chan, 1, 1)) running_mean = running_mean * momentum + mean * (1 - momentum) running_var = running_var * momentum + var_unbiased * (1 - momentum) yv = bn(Tensor(xv)) yv_expect = (xv - mean) / sd _assert_allclose(yv.numpy(), yv_expect) _assert_allclose(bn.running_mean.numpy(), running_mean) _assert_allclose(bn.running_var.numpy(), running_var) # test set 'training' flag to False mean_backup = bn.running_mean.numpy() var_backup = bn.running_var.numpy() bn.training = False xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32) data = Tensor(xv) yv1 = bn(data) yv2 = bn(data) np.testing.assert_equal(yv1.numpy(), yv2.numpy()) np.testing.assert_equal(mean_backup, bn.running_mean.numpy()) np.testing.assert_equal(var_backup, bn.running_var.numpy()) yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps) _assert_allclose(yv1.numpy(), yv_expect)
def test_syncbn_no_stats(): nr_chan = 8 data_shape = (3, nr_chan, 4) bn = SyncBatchNorm(8, track_running_stats=False) data = tensor() for i in range(4): if i == 2: bn.training = False xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32) mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True) var = np.var( np.transpose(xv, [0, 2, 1]).reshape( (data_shape[0] * data_shape[2], nr_chan) ), axis=0, ).reshape((1, nr_chan, 1)) sd = np.sqrt(var + bn.eps) data.set_value(xv) yv = bn(data) yv_expect = (xv - mean) / sd assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)