def test_max(node, axis, use_gpu, keep_dimensions): node = Variable(node) assert_cuda_active(use_gpu) def func(node): return sum(rm.amax(node, axis=axis, keepdims=keep_dimensions)) def func2(node): return sum(rm.amax(node, axis=axis, keepdims=keep_dimensions) + 10) compare(func2, node, node) def func3(node): return sum(rm.amax(node, axis=axis, keepdims=keep_dimensions) * 3 + 15) compare(func3, node, node) def func4(node): return sum(rm.amax(node, axis=axis, keepdims=keep_dimensions) + rm.amax(node, axis=axis, keepdims=keep_dimensions)) compare(func4, node, node) # A simple check to see if we actually return the maximum renom_max = rm.amax(node, axis=axis, keepdims=keep_dimensions).as_ndarray() numpy_max = np.amax(node, axis=axis, keepdims=keep_dimensions) assert np.allclose(renom_max, numpy_max, atol=1e-5, rtol=1e-3) compare(func, node, node)
def __new__(self, x, slice_size=1, axis=1): if axis is None: axis = 1 assert len(x.shape) > 1 input_length = x.shape[axis] maxes = [] # TODO: Ensure that input_length is evenly divisible by _slice_size for u in range(input_length // slice_size): offset = u * slice_size maxes.append( rm.amax(x[:, offset:offset + slice_size], axis=axis, keepdims=True)) return rm.concat(*maxes, axis=axis)
def func4(node): return sum(rm.amax(node, axis=axis, keepdims=keep_dimensions) + rm.amax(node, axis=axis, keepdims=keep_dimensions))
def func3(node): return sum(rm.amax(node, axis=axis, keepdims=keep_dimensions) * 3 + 15)
def func(node): return sum(rm.amax(node, axis))