def special_shape(draw, static_shape, shape=tuple(), min_dim=0, max_dim=5): """ search strategy that permits broadcastable dimensions to be prepended to a static shape - for the purposes of drawing diverse shaped-arrays for matmul Returns ------- hypothesis.searchstrategy.SearchStrategy -> Tuple[int, ...]""" return draw(broadcastable_shape(shape, min_dim, max_dim)) + static_shape
def test_reduce_broadcast_nokeepdim(var_shape, data): """ example broadcasting: (2, 3) -> (5, 2, 3)""" grad = data.draw(hnp.arrays(dtype=float, shape=broadcastable_shape( shape=var_shape, min_dim=len(var_shape) + 1, max_dim=len(var_shape) + 3), elements=st.just(1.)), label='grad') assume(1 not in grad.shape[-len(var_shape):]) reduced_grad = reduce_broadcast(grad=grad, var_shape=var_shape) reduced_grad *= np.prod( var_shape) / grad.size # scale reduced-grad so all elements are 1 assert_allclose(actual=reduced_grad, desired=np.ones(var_shape))
def test_broadcast_compat_shape( shape: Tuple[int, ...], allow_singleton: bool, min_dim: int, min_side: int, data: st.SearchStrategy, ): """ Ensures that the `broadcastable_shape` strategy: - produces broadcastable shapes - respects input parameters""" max_side = data.draw(st.integers(min_side, min_side + 5), label="max side") max_dim = data.draw( st.integers(min_dim, max(min_dim, len(shape) + 3)), label="max dim" ) compat_shape = data.draw( broadcastable_shape( shape=shape, allow_singleton=allow_singleton, min_dim=min_dim, max_dim=max_dim, min_side=min_side, max_side=max_side, ), label="broadcastable_shape", ) assert ( min_dim <= len(compat_shape) <= max_dim ), "a shape of inappropriate dimensionality was generated by the strategy" a = np.empty(shape) b = np.empty(compat_shape) np.broadcast(a, b) # error if drawn shape for b is not broadcast-compatible if not allow_singleton: small_dim = min(a.ndim, b.ndim) if small_dim: assert ( shape[-small_dim:] == compat_shape[-small_dim:] ), "singleton dimensions were included by the strategy" if len(compat_shape) > len(shape): n = len(compat_shape) - len(shape) for side in compat_shape[:n]: assert ( min_side <= side <= max_side ), "out-of-bound sides were generated by the strategy"
def test_reduce_broadcast_nokeepdim(var_shape, data): """ example broadcasting: (2, 3) -> (5, 2, 3)""" grad_shape = data.draw( broadcastable_shape( shape=var_shape, min_dim=len(var_shape) + 1, max_dim=len(var_shape) + 3, allow_singleton=False, ), label="grad_shape", ) grad = np.ones(grad_shape, dtype=float) reduced_grad = reduce_broadcast(grad=grad, var_shape=var_shape) reduced_grad *= (np.prod(var_shape) / grad.size ) # scale reduced-grad so all elements are 1 assert_allclose(actual=reduced_grad, desired=np.ones(var_shape))
def test_reduce_broadcast_keepdim(var_shape, data): """ example broadcasting: (2, 1, 4) -> (2, 5, 4)""" grad = data.draw(hnp.arrays(dtype=float, shape=broadcastable_shape( shape=var_shape, min_dim=len(var_shape), max_dim=len(var_shape)), elements=st.just(1.)), label='grad') reduced_grad = reduce_broadcast(grad=grad, var_shape=var_shape) assert reduced_grad.shape == tuple(i if i < j else j for i, j in zip(var_shape, grad.shape)) assert (i == 1 for i, j in zip(var_shape, grad.shape) if i < j) sum_axes = tuple(n for n, (i, j) in enumerate(zip(var_shape, grad.shape)) if i != j) assert_allclose(actual=reduced_grad, desired=grad.sum(axis=sum_axes, keepdims=True))