def loss_fun(x: ep.Tensor, k: int) -> ep.Tensor: logits = self.model.forward(x) ik = classes[:, k] l0 = -ep.crossentropy(logits, i0) lk = -ep.crossentropy(logits, ik) loss = lk - l0 return loss.sum(), (loss, logits)
def loss_fun( x: ep.Tensor, k: int ) -> Tuple[ep.Tensor, Tuple[ep.Tensor, ep.Tensor]]: logits = model(x) ik = classes[:, k] l0 = -ep.crossentropy(logits, i0) lk = -ep.crossentropy(logits, ik) loss = lk - l0 return loss.sum(), (loss, logits)
def loss_fn(inputs: ep.Tensor, labels: ep.Tensor) -> Tuple[ep.Tensor, ep.Tensor]: logits = model(inputs) sign = -1.0 if targeted else 1.0 loss = sign * ep.crossentropy(logits, labels).sum() return loss, logits
def test_crossentropy_raises(dummy: Tensor) -> None: t = ep.arange(dummy, 50).reshape((10, 5)).float32() t = t / t.max() ep.crossentropy(t, t.argmax(axis=-1)) t = ep.arange(dummy, 150).reshape((10, 5, 3)).float32() t = t / t.max() with pytest.raises(ValueError): ep.crossentropy(t, t.argmax(axis=-1)) t = ep.arange(dummy, 50).reshape((10, 5)).float32() t = t / t.max() with pytest.raises(ValueError): ep.crossentropy(t, t.argmax(axis=-1)[:8])
def loss_fn(inputs: ep.Tensor) -> ep.Tensor: logits = model(inputs) return ep.crossentropy(logits, labels).sum()
def test_crossentropy(dummy: Tensor) -> Tensor: t = ep.arange(dummy, 50).reshape((10, 5)).float32() t = t / t.max() return ep.crossentropy(t, t.argmax(axis=-1))
def loss_fn(inputs): logits = model.forward(inputs) return ep.crossentropy(logits, labels).sum()
def loss_function(input_data): logits = model(input_data) return ep.crossentropy(logits, labels).sum()
def loss_fn(inputs: ep.Tensor, labels: ep.Tensor) -> ep.Tensor: logits = ep.astensor(self.model.forward(inputs.tensor)) return ep.crossentropy(logits, labels).sum()