def _conv_laplace_2d(tensor): kernel = np.array([[0., 1., 0.], [1., -4., 1.], [0., 1., 0.]], dtype=np.float32) kernel = kernel.reshape((3, 3, 1, 1)) if tensor.shape[-1] == 1: return math.conv(tensor, kernel, padding='VALID') else: return math.concat([math.conv(tensor[..., i:i + 1], kernel, padding='VALID') for i in range(tensor.shape[-1])], -1)
def _conv_laplace_3d(tensor): """ 3D/Cube laplace stencil in 3D+2D [3,3,3,1,1] array([[[[[ 0.]], [[ 0.]], [[ 0.]]], [[[ 0.]], [[ 1.]], [[ 0.]]], [[[ 0.]], [[ 0.]], [[ 0.]]]], [[[[ 0.]], [[ 1.]], [[ 0.]]], [[[ 1.]], [[-6.]], [[ 1.]]], [[[ 0.]], [[ 1.]], [[ 0.]]]], [[[[ 0.]], [[ 0.]], [[ 0.]]], [[[ 0.]], [[ 1.]], [[ 0.]]], [[[ 0.]], [[ 0.]], [[ 0.]]]]] returns ... padding explicitly done in laplace(), hence here not needed """ kernel = math.to_float([[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]], [[0., 1., 0.], [1., -6., 1.], [0., 1., 0.]], [[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]]]) kernel = kernel.reshape((3, 3, 3, 1, 1)) if tensor.shape[-1] == 1: return math.conv(tensor, kernel, padding='VALID') else: return math.concat([ math.conv(tensor[..., i:i + 1], kernel, padding='VALID') for i in range(tensor.shape[-1]) ], -1)
def _gradient_nd(tensor, padding, relative_shifts): rank = spatial_rank(tensor) tensor = math.pad(tensor, _get_pad_width(rank, (-relative_shifts[0], relative_shifts[1])), mode=padding) components = [] for dimension in range(rank): lower, upper = _dim_shifted(tensor, dimension, relative_shifts, diminish_others=(-relative_shifts[0], relative_shifts[1])) components.append(upper - lower) return math.concat(components, axis=-1)