def test_grid_sample_gradient_1d(self): for backend in BACKENDS: if backend.supports(Backend.gradients): with backend: grid = math.tensor([0., 1, 2, 3], spatial('x')) coords = math.tensor([0.5, 1.5], instance('points')) with math.record_gradients(grid, coords): sampled = math.grid_sample(grid, coords, extrapolation.ZERO) loss = math.mean(math.l2_loss(sampled)) / 2 grad_grid, grad_coords = math.gradients(loss, grid, coords) math.assert_close(grad_grid, math.tensor([0.125, 0.5, 0.375, 0], spatial('x')), msg=backend) math.assert_close(grad_coords, math.tensor([0.25, 0.75], instance('points')), msg=backend)
def test_grid_sample_gradient_1d(self): grads_grid = [] grads_coords = [] for backend in BACKENDS: if backend.supports(Backend.gradients): print(backend) with backend: grid = math.tensor([0., 1, 2, 3], 'x') coords = math.tensor([0.5, 1.5], 'points') with math.record_gradients(grid, coords): sampled = math.grid_sample(grid, coords, extrapolation.ZERO) loss = math.l2_loss(sampled) grad_grid, grad_coords = math.gradients( loss, grid, coords) grads_grid.append(grad_grid) grads_coords.append(grad_coords) math.assert_close(*grads_grid, math.tensor([0.125, 0.5, 0.375, 0], 'x')) math.assert_close(*grads_coords, math.tensor([0.25, 0.75], 'points'))
def f(x: math.Tensor, y: math.Tensor): pred = x loss = math.l2_loss(pred - y) return loss, pred
def l2_loss(field: SampledField, batch_norm=True): """ L2 loss for the unweighted values of the field. See `phi.math.l2_loss()`. """ return math.l2_loss(field.values, batch_norm=batch_norm)
def loss_function(x): print("Running loss_function") assert isinstance(x, math.Tensor) pred = math.native_call(net, x) return math.l2_loss(pred)
def f(x: math.Tensor, y: math.Tensor): assert isinstance(x, math.Tensor) assert isinstance(y, math.Tensor) pred = x loss = math.l2_loss(pred - y) return loss, pred
def loss_function(x): return math.l2_loss(x)
def loss(x, y): return math.l2_loss(x - 1) + math.l2_loss(y + 1)