def test_compute_strided_conv(): conv = Conv2D(conv_format=TensorFlowNHWC, stride=2) features = np.arange(16, dtype=np.float32).reshape(1, 4, 4, 1) filters = np.arange(4, dtype=np.float32).reshape(2, 2, 1, 1) biases = np.zeros((1, 1)) conv.check_incoming_shapes(StaticShape.of_tensor(features), StaticShape.of_tensor(filters), StaticShape.of_tensor(biases)) conv.compute(features, filters, biases) actual = conv.output assert actual.shape == (1, 2, 2, 1) np.testing.assert_allclose( actual, np.array([[[[24.], [36.]], [[72.], [84.]]]], dtype=np.float32)) dx, df, db = conv.partials(np.ones(actual.shape)) assert dx.shape == features.shape assert df.shape == filters.shape assert db.shape == biases.shape np.testing.assert_allclose( dx, np.array([[[[0.], [1.], [0.], [1.]], [[2.], [3.], [2.], [3.]], [[0.], [1.], [0.], [1.]], [[2.], [3.], [2.], [3.]]]], dtype=np.float32)) np.testing.assert_allclose( df, np.array([[[[20.]], [[24.]]], [[[36.]], [[40.]]]], dtype=np.float32))
def test_batch_norm_incorrect_shape(): beta_shape = StaticShape(2, 1) gamma_shape = StaticShape(1, 2) x_shape = StaticShape(2, Dim.unknown()) bn = BatchNormTraining(momentum=0.99, epsilon=1e-3) with pytest.raises(ValueError): bn.check_incoming_shapes(beta_shape, gamma_shape, x_shape)
def test_valid_shape_and_stride(): pool = MaxPool(stride=2) m = Dim.unknown() features = StaticShape(m, 3, 40, 30) pool.check_incoming_shapes(features) out_shape = pool.compute_out_shape(features) assert out_shape == StaticShape(m, 3, 20, 15)
def test_valid_shape_without_padding_and_stride(): conv = Conv2D(padding=0, stride=1) m = Dim.unknown() features = StaticShape(m, 3, 40, 30) filters = StaticShape(16, 3, 9, 9) biases = StaticShape(16, 1) conv.check_incoming_shapes(features, filters, biases) out_shape = conv.compute_out_shape(features, filters, biases) assert out_shape == StaticShape(m, 16, 32, 22)
def test_invalid_bias(): conv = Conv2D() m = Dim.unknown() features = StaticShape(m, 3, 40, 30) filters = StaticShape(16, 3, 2, 2) biases = StaticShape(17, 1) with pytest.raises(ValueError) as ex: conv.check_incoming_shapes(features, filters, biases) assert str(ex.value) == f"Number of bias should match number of filters " \ f"but got 17 and 16"
def test_invalid_number_of_channels(): conv = Conv2D() m = Dim.unknown() features = StaticShape(m, 3, 40, 30) filters = StaticShape(16, 4, 2, 2) biases = StaticShape(16, 1) with pytest.raises(ValueError) as ex: conv.check_incoming_shapes(features, filters, biases) assert str(ex.value) == f"Number of channels should be the same " \ f"in features(3) and filters(4)"
def test_compute_valid_conv(): conv = Conv2D(conv_format=TensorFlowNHWC) features = np.arange(72, dtype=np.float32).reshape(2, 3, 4, 3) filters = np.arange(24, dtype=np.float32).reshape(2, 2, 3, 2) biases = np.zeros((2, 1)) conv.check_incoming_shapes(StaticShape.of_tensor(features), StaticShape.of_tensor(filters), StaticShape.of_tensor(biases)) conv.compute(features, filters, biases) actual = conv.output assert actual.shape == (2, 2, 3, 2) np.testing.assert_allclose( actual, np.array([[[[1624., 1726.], [2020., 2158.], [2416., 2590.]], [[3208., 3454.], [3604., 3886.], [4000., 4318.]]], [[[6376., 6910.], [6772., 7342.], [7168., 7774.]], [[7960., 8638.], [8356., 9070.], [8752., 9502.]]]], dtype=np.float32)) dfeatures, dfilters, dbias = conv.partials(np.ones(actual.shape)) assert dfeatures.shape == features.shape assert dfilters.shape == filters.shape assert dbias.shape == biases.shape np.testing.assert_allclose( dfeatures, np.array([[[[1., 5., 9.], [14., 22., 30.], [14., 22., 30.], [13., 17., 21.]], [[26., 34., 42.], [76., 92., 108.], [76., 92., 108.], [50., 58., 66.]], [[25., 29., 33.], [62., 70., 78.], [62., 70., 78.], [37., 41., 45.]]], [[[1., 5., 9.], [14., 22., 30.], [14., 22., 30.], [13., 17., 21.]], [[26., 34., 42.], [76., 92., 108.], [76., 92., 108.], [50., 58., 66.]], [[25., 29., 33.], [62., 70., 78.], [62., 70., 78.], [37., 41., 45.]]]], dtype=np.float32)) np.testing.assert_allclose( dfilters, np.array([[[[324., 324.], [336., 336.], [348., 348.]], [[360., 360.], [372., 372.], [384., 384.]]], [[[468., 468.], [480., 480.], [492., 492.]], [[504., 504.], [516., 516.], [528., 528.]]]], dtype=np.float32))
def test_compute_max_pool(): pool = MaxPool(stride=2) features = np.arange(2 * 3 * 4 * 4, dtype=np.float32).reshape(2, 3, 4, 4) pool.check_incoming_shapes(StaticShape.of_tensor(features)) pool.compute(features) actual = pool.output assert actual.shape == (2, 3, 2, 2) e1 = np.array([5, 7, 13, 15, 21, 23, 29, 31, 37, 39, 45, 47]) \ .reshape(3, 2, 2).astype(np.float32) expected = np.stack([e1, e1 + 48], axis=0) assert expected.shape == (2, 3, 2, 2) np.testing.assert_allclose(actual, expected) d_features, = pool.partials(np.ones(actual.shape)) assert d_features.shape == features.shape row1 = [0, 0, 0, 0] row2 = [0, 1, 0, 1] channel = np.stack([row1, row2, row1, row2], axis=0) sample = np.stack([channel, channel, channel], axis=0) expected_partial = np.stack([sample, sample], axis=0) np.testing.assert_allclose(d_features, expected_partial)
def test_argmax_invalid_arg(): a = np.arange(6).reshape(2, 3) op = ArgMax(axis=4) with pytest.raises(ValueError) as ex: op.check_incoming_shapes(StaticShape.from_tuple(a.shape)) assert str(ex.value) == "axis is out of bounds of shape " \ "(Dim.of(2), Dim.of(3)) got 4"
def test_compute_max_pool_with_other_format(): pool = MaxPool(stride=2, conv_format=TensorFlowNHWC) features = np.arange(2 * 4 * 4 * 3, dtype=np.float32).reshape(2, 4, 4, 3) pool.check_incoming_shapes(StaticShape.of_tensor(features)) pool.compute(features) actual = pool.output np.testing.assert_allclose( actual, np.array([[[[15., 16., 17.], [21., 22., 23.]], [[39., 40., 41.], [45., 46., 47.]]], [[[63., 64., 65.], [69., 70., 71.]], [[87., 88., 89.], [93., 94., 95.]]]], dtype=np.float32)) d_features, = pool.partials(np.ones(actual.shape)) np.testing.assert_allclose( d_features, np.array([[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [1., 1., 1.], [0., 0., 0.], [1., 1., 1.]], [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [1., 1., 1.], [0., 0., 0.], [1., 1., 1.]]], [[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [1., 1., 1.], [0., 0., 0.], [1., 1., 1.]], [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [1., 1., 1.], [0., 0., 0.], [1., 1., 1.]]]], dtype=np.float32))
def test_argmax(): a = np.arange(6).reshape(2, 3) op1 = ArgMax(axis=0) op1.compute(a) op2 = ArgMax(axis=1) op2.compute(a) np.testing.assert_equal(op1.output, np.array([1, 1, 1])) np.testing.assert_equal(op2.output, np.array([2, 2])) op1.check_incoming_shapes(StaticShape.from_tuple(a.shape)) op2.check_incoming_shapes(StaticShape.from_tuple(a.shape)) out_shape1 = op1.compute_out_shape(StaticShape.from_tuple(a.shape)) out_shape2 = op2.compute_out_shape(StaticShape.from_tuple(a.shape)) assert out_shape1.to_numpy() == (3,) assert out_shape2.to_numpy() == (2,)
def test_transpose(): m = Dim.unknown() shape = StaticShape(2, 3, m) assert shape.transpose((1, 0, 2)) == StaticShape(3, 2, m) assert shape.transpose(1, 0, 2) == StaticShape(3, 2, m) assert shape.transpose(2, 1, 0) == StaticShape(m, 3, 2)
def test_l2_regularization(): # TODO Auto adjustment lamda = wanted_decay_rate_percent * m / learning_rate # wanted_decay_rate_percent = 0.1 (10%) w1 = np.array([[1, 2, 3], [1, 2, 3]]) w2 = np.array([[1, 2], [3, 4]]) lamda = 10.0 batch_size = 32 r = lamda / batch_size norm = reg.L2NormRegularization(lamda) norm.check_incoming_shapes(StaticShape.scalar(), StaticShape.from_tuple((1, 2))) norm.compute(batch_size, w1, w2) grad = norm.partials(1) np.testing.assert_equal(norm.output, 9.0625) np.testing.assert_allclose(grad[0], - norm.output / batch_size) np.testing.assert_allclose(grad[1], r * w1) np.testing.assert_allclose(grad[2], r * w2)
def test_invalid_shape_and_stride(): pool = MaxPool(stride=3) m = Dim.unknown() features = StaticShape(m, 3, 40, 30) with pytest.raises(ValueError) as ex: pool.check_incoming_shapes(features) assert str( ex.value) == "Height (40) should be a multiple of stride 3 but is not"
def test_save_var_back_and_forth(): to_save = np.array(50.0) var = Var(initializer=ConstantInitializer(to_save), shape=StaticShape.from_tuple(to_save.shape), dtype=to_save.dtype) var.initialize_if_needed() bak = saver.save(var) var.output = np.array(0.0) assert var.output != to_save saver.restore(var, bak) assert var.output == to_save
def test_compute_same_conv(): conv = Conv2D(conv_format=TensorFlowNHWC, padding=1) features = np.arange(16, dtype=np.float32).reshape(1, 4, 4, 1) filters = np.arange(9, dtype=np.float32).reshape(3, 3, 1, 1) biases = np.zeros((1, 1)) conv.check_incoming_shapes(StaticShape.of_tensor(features), StaticShape.of_tensor(filters), StaticShape.of_tensor(biases)) conv.compute(features, filters, biases) actual = conv.output assert actual.shape == (1, 4, 4, 1) np.testing.assert_allclose( actual, np.array([[[[73.], [121.], [154.], [103.]], [[171.], [258.], [294.], [186.]], [[279.], [402.], [438.], [270.]], [[139.], [187.], [202.], [113.]]]], dtype=np.float32)) dx, df, db = conv.partials(np.ones(actual.shape)) assert dx.shape == features.shape assert df.shape == filters.shape assert db.shape == biases.shape np.testing.assert_allclose( dx, np.array( [[[[8.], [15.], [15.], [12.]], [[21.], [36.], [36.], [27.]], [[21.], [36.], [36.], [27.]], [[20.], [33.], [33.], [24.]]]], dtype=np.float32)) np.testing.assert_allclose( df, np.array([[[[45.]], [[66.]], [[54.]]], [[[84.]], [[120.]], [[96.]]], [[[81.]], [[114.]], [[90.]]]], dtype=np.float32))
def test_broadcasted_shape(): assert StaticShape(2, 5).broadcast(StaticShape(2, 5)) == StaticShape(2, 5) assert StaticShape(2, 5).broadcast(StaticShape(1)) == StaticShape(2, 5) assert StaticShape(3, 1).broadcast(StaticShape(1, 3)) == StaticShape(3, 3) assert StaticShape(7, 3, 3).broadcast(StaticShape(3, 3)) == StaticShape(7, 3, 3) m = Dim.unknown() n = Dim.unknown() assert StaticShape(m, 3, 3).broadcast(StaticShape(3, 1)) == StaticShape(m, 3, 3) assert StaticShape(m, 4).broadcast(StaticShape()) == StaticShape(m, 4) assert StaticShape(m, n).broadcast(StaticShape(1)) == StaticShape(m, n) assert StaticShape(m, 1).broadcast(StaticShape(1, n)) == StaticShape(m, n)
def test_is_broadcast_compatible(): assert StaticShape(2, 5).is_broadcast_compatible(StaticShape(2, 5)) assert StaticShape(2, 5).is_broadcast_compatible(StaticShape(1)) assert StaticShape(3, 1).is_broadcast_compatible(StaticShape(1, 3)) assert StaticShape(7, 3, 3).is_broadcast_compatible(StaticShape(3, 3)) assert StaticShape(None, 3, 3).is_broadcast_compatible(StaticShape(3, 1)) assert StaticShape(None, 4).is_broadcast_compatible(StaticShape()) assert StaticShape(None, None).is_broadcast_compatible(StaticShape(1)) assert StaticShape(None, 1).is_broadcast_compatible(StaticShape(1, None)) assert not StaticShape(2, 5).is_broadcast_compatible(StaticShape(3)) assert not StaticShape(None, 1).is_broadcast_compatible( StaticShape(None, 1)) assert not StaticShape(1, None).is_broadcast_compatible(StaticShape(None))
def test_shape_is_assignable_to(): m = Dim.unknown() n = Dim.unknown() assert StaticShape(2, 3).is_assignable_to(StaticShape(2, 3)) assert StaticShape(2, 12).is_assignable_to(StaticShape(2, m)) assert StaticShape(2, m).is_assignable_to(StaticShape(2, m)) assert StaticShape(2, m).is_assignable_to(StaticShape(n, m)) assert not StaticShape(2).is_assignable_to(StaticShape(2, m)) assert not StaticShape(2).is_assignable_to(StaticShape(3)) assert not StaticShape(2, m).is_assignable_to(StaticShape(2, n))
def test_reduce_along_axis(): m = Dim.unknown() shape = StaticShape(2, 3, m, 5) assert shape.reduce_along_axis(axis=0) == StaticShape(3, m, 5) assert shape.reduce_along_axis(axis=-1) == StaticShape(2, 3, m) assert shape.reduce_along_axis(axis=[0, 1]) == StaticShape(m, 5) assert shape.reduce_along_axis(axis=[-1, -1, 1]) == StaticShape(2, m) assert shape.reduce_along_axis(axis=[-1, -2]) == StaticShape(2, 3) assert shape.reduce_along_axis(axis=0, keep_dims=True) == StaticShape(1, 3, m, 5) assert shape.reduce_along_axis(axis=-1, keep_dims=True) == StaticShape(2, 3, m, 1) assert shape.reduce_along_axis(axis=[0, 1], keep_dims=True) == StaticShape(1, 1, m, 5) assert shape.reduce_along_axis(axis=[-1, -1, 1], keep_dims=True) == StaticShape(2, 1, m, 1) assert shape.reduce_along_axis(axis=[-1, -2], keep_dims=True) == StaticShape(2, 3, 1, 1)