Esempio n. 1
0
 def test_grid_sample_gradient_1d(self):
     for backend in BACKENDS:
         if backend.supports(Backend.gradients):
             with backend:
                 grid = math.tensor([0., 1, 2, 3], spatial('x'))
                 coords = math.tensor([0.5, 1.5], instance('points'))
                 with math.record_gradients(grid, coords):
                     sampled = math.grid_sample(grid, coords, extrapolation.ZERO)
                     loss = math.mean(math.l2_loss(sampled)) / 2
                     grad_grid, grad_coords = math.gradients(loss, grid, coords)
                 math.assert_close(grad_grid, math.tensor([0.125, 0.5, 0.375, 0], spatial('x')), msg=backend)
                 math.assert_close(grad_coords, math.tensor([0.25, 0.75], instance('points')), msg=backend)
Esempio n. 2
0
 def test_grid_sample_gradient_1d(self):
     grads_grid = []
     grads_coords = []
     for backend in BACKENDS:
         if backend.supports(Backend.gradients):
             print(backend)
             with backend:
                 grid = math.tensor([0., 1, 2, 3], 'x')
                 coords = math.tensor([0.5, 1.5], 'points')
                 with math.record_gradients(grid, coords):
                     sampled = math.grid_sample(grid, coords,
                                                extrapolation.ZERO)
                     loss = math.l2_loss(sampled)
                     grad_grid, grad_coords = math.gradients(
                         loss, grid, coords)
                     grads_grid.append(grad_grid)
                     grads_coords.append(grad_coords)
     math.assert_close(*grads_grid, math.tensor([0.125, 0.5, 0.375, 0],
                                                'x'))
     math.assert_close(*grads_coords, math.tensor([0.25, 0.75], 'points'))
Esempio n. 3
0
 def f(x: math.Tensor, y: math.Tensor):
     pred = x
     loss = math.l2_loss(pred - y)
     return loss, pred
Esempio n. 4
0
def l2_loss(field: SampledField, batch_norm=True):
    """ L2 loss for the unweighted values of the field. See `phi.math.l2_loss()`. """
    return math.l2_loss(field.values, batch_norm=batch_norm)
Esempio n. 5
0
 def loss_function(x):
     print("Running loss_function")
     assert isinstance(x, math.Tensor)
     pred = math.native_call(net, x)
     return math.l2_loss(pred)
Esempio n. 6
0
 def f(x: math.Tensor, y: math.Tensor):
     assert isinstance(x, math.Tensor)
     assert isinstance(y, math.Tensor)
     pred = x
     loss = math.l2_loss(pred - y)
     return loss, pred
Esempio n. 7
0
 def loss_function(x):
     return math.l2_loss(x)
Esempio n. 8
0
 def loss(x, y):
     return math.l2_loss(x - 1) + math.l2_loss(y + 1)