def loss_fun(x, step):
     del step
     logits = jnp.squeeze(predict_fun(x, features))
     onehot_targets = utils.one_hot(targets, num_classes)
     data_loss = -jnp.mean(jnp.sum(logits * onehot_targets, axis=1))
     reg_loss = l2_pen * utils.norm(x)
     return data_loss + reg_loss
Ejemplo n.º 2
0
 def loss_fun(x, step):
     del step
     logits = predict_fun(x, features)
     logits -= logsumexp(logits, axis=1, keepdims=True)
     onehot_targets = utils.one_hot(targets, num_classes)
     data_loss = -jnp.mean(jnp.sum(logits * onehot_targets, axis=1))
     reg_loss = l2_pen * utils.norm(x)
     return data_loss + reg_loss
def test_one_hot_list():
  """Tests the one hot conversion with a list as input."""

  # Generate labels to test.
  n = 5
  labels = list(range(n))

  # Convert to a one-hot representation.
  one_hot_labels = utils.one_hot(labels, n)
  assert np.allclose(one_hot_labels, jnp.eye(n))
def test_one_hot_empty():
  """Tests the one hot function with empty input."""
  assert len(utils.one_hot([], 5)) == 0