def test_accuracy(): accuracy = Accuracy() accuracy_with_logits = AccuracyWithLogits() top2 = TopKAccuracy(2) top3 = TopKAccuracy(3) fake_accuracy = AverageMetric(accuracy.score) # First try with a multi class input x = J.Variable(J.Tensor([[0.9, 0, 0.1, 0], [0.2, 0.3, 0.1, 0.4]])) # Labels are the indicies y = J.Variable(J.LongTensor([0, 0])) assert accuracy(x, y).item() == 50. assert fake_accuracy(x, y).item() == 50. # Since applying the softmax again won't change the ordering assert accuracy_with_logits(x, y) == 50. assert top2(x, y) == 50. assert top3(x, y) == 100. # Now try with binary class input x_logit = J.Variable(J.Tensor([[100.], [-100.]])) x = torch.sigmoid(x_logit) y = J.Variable(J.LongTensor([0, 0])) assert accuracy(x, y) == 50. assert fake_accuracy(x, y) == 50. assert accuracy_with_logits(x_logit, y) == 50.
def test_accumulation(): accuracy = Accuracy() x = J.Variable( J.Tensor([[0.9, 0, 0.1, 0], [0.2, 0.3, 0.1, 0.4], [1.0, 0.0, 0.0, 0.0]])) y = J.Variable(J.LongTensor([1, 2, 3])) accuracy(x, y) x = J.Variable(J.Tensor([[0.9, 0, 0.1, 0], [0.2, 0.3, 0.1, 0.4]])) y = J.Variable(J.LongTensor([0, 0])) accuracy(x, y) assert accuracy.accumulate() == 20. accuracy = accuracy.reset() accuracy(x, y) assert accuracy.accumulate() == 50.
def cast_input_to_torch(self, x, volatile=False): # Remove any missing words x = [np.array([word for word in sample if word not in self.missing]) for sample in x] # Get the seq lens and pad it seq_lens = J.LongTensor([max(len(sample), self.min_len) for sample in x]) x = np.array([L.pad_numpy_to_length(sample, length=seq_lens.max()) for sample in x], dtype=int) return self.embeddings(Variable(J.from_numpy(x).long(), volatile=volatile)), seq_lens
def cast_input_to_torch(self, x, volatile=False): # Get the seq lens and pad it seq_lens = J.LongTensor([[ max(sample.shape[0], self.min_size), max(sample.shape[1], self.min_size) ] for sample in x]) pad_shape, _ = seq_lens.max(dim=0) # Simple hack to allow for images to fully resize at the same sizes as the encoder pad_shape = next_power(pad_shape, len(self.encoder)) x = np.stack([ L.pad_numpy_to_shape(sample, shape=tuple(pad_shape)) for sample in x ]) return Variable(J.from_numpy(x.astype(np.float)).float(), volatile=volatile), seq_lens
def test_categorical_crossentropy(): x = J.Variable(J.Tensor([[0.5, 0, 0.5, 0]])) y = J.Variable(J.LongTensor([0])) np.testing.assert_almost_equal(0.6931471824645996, categorical_crossentropy(x, y).item())