def testSmoothSVMth_functional(self): F = Topk_Smooth_SVM(self.labels, self.k, self.tau) res_th = F(V(self.x), V(self.y)) res_py = svm_topk_smooth_py_1(V(self.x), V(self.y), self.tau, self.k) assert_all_close(res_th, res_py)
def testSmoothSVM(self): smooth_svm_th = SmoothTop1SVM(self.n_classes, tau=self.tau) res_th = smooth_svm_th(V(self.x), V(self.y)) res_py = smooth_svm_py(V(self.x), V(self.y), self.tau) assert_all_close(res_th, res_py)
def testMaxSVM(self): max_svm_th = MaxTop1SVM(self.n_classes, alpha=self.alpha) res_th = max_svm_th(V(self.x), V(self.y)) res_py = max_svm_py(V(self.x), V(self.y), alpha=self.alpha) assert_all_close(res_th, res_py)
def testMaxSVMtopk(self): max_svm_th = MaxTopkSVM(self.n_classes, k=self.k) res_th = max_svm_th(V(self.x), V(self.y)) res_py = svm_topk_max_py(V(self.x), V(self.y), k=self.k) assert_all_close(res_th, res_py)
def testSmoothSVMth_loss_scales(self): svm_topk_smooth_th = SmoothTopkSVM(self.n_classes, tau=self.tau, k=self.k) for scale in (1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3): x = self.x * scale res_th = svm_topk_smooth_th(V(x), V(self.y)) res_py = svm_topk_smooth_py_1(V(x), V(self.y), self.tau, self.k).mean() assert_all_close(res_th, res_py)
def testMulTensors(self): sum_ = LogTensor(V(self.x)) * LogTensor(V(self.y)) res_sb = sum_.torch() res_th = self.x.double() + self.y.double() assert_all_close(res_th, res_sb)
def testSumTensors(self): sum_ = LogTensor(V(self.x)) + LogTensor(V(self.y)) res_sb = sum_.torch() res_th = torch.log(torch.exp(self.x.double()) + torch.exp(self.y.double())) assert_all_close(res_th, res_sb)
def testSmoothSVMth_loss(self): for k in range(2, self.k + 1): svm_topk_smooth_th = SmoothTopkSVM(self.n_classes, tau=self.tau, k=k) res_th = svm_topk_smooth_th(V(self.x), V(self.y)) res_py = svm_topk_smooth_py_1(V(self.x), V(self.y), self.tau, k).mean() assert_all_close(res_th, res_py)
def testLogSumProductExp(self): self.n_samples = 25 self.n_classes = 20 self.k = 7 self.x = torch.randn(self.n_samples, self.n_classes) res_th = LogSumExp(self.k, p=1)(V(self.x)).squeeze() res1_th, res2_th = res_th[0], res_th[1] res1_py = np.log(sum_product_py(V(torch.exp(self.x)), self.k - 1)) res2_py = np.log(sum_product_py(V(torch.exp(self.x)), self.k)) assert_all_close(res1_th, res1_py) assert_all_close(res2_th, res2_py)
def test_backward(self): self.n_samples = 25 self.n_classes = 1000 self.k = 100 self.k = 20 self.x = torch.randn(self.n_samples, self.n_classes) self.x, _ = torch.sort(self.x, dim=1, descending=True) for tau in (5e-3, 1e-2, 5e-2, 1e-1, 5e-1, 1, 5, 1e1, 5e2, 1e3): x = self.x / (tau * self.k) top, _ = x.topk(self.k + 1, 1) thresh = 1e2 hard = torch.ge(top[:, self.k - 1] - top[:, self.k], math.log(thresh)) smooth = hard.eq(0) x = x[smooth.unsqueeze(1).expand_as(x)].view(-1, x.size(1)) if not x.size(): print('empty tensor') return X_auto = Variable(x.double(), requires_grad=True) X_man = Variable(x, requires_grad=True) res1_auto, res2_auto = log_sum_exp_k_autograd(X_auto, self.k) res1_auto, res2_auto = res1_auto.squeeze(), res2_auto.squeeze() res_man = LogSumExp(self.k)(X_man).squeeze() res1_man = res_man[0] res2_man = res_man[1] proj1 = torch.randn(res1_auto.size()).fill_(1) proj2 = torch.randn(res2_auto.size()).fill_(1) proj_auto = torch.dot(V(proj1.double()), res1_auto) +\ torch.dot(V(proj2.double()), res2_auto) proj_man = torch.dot(V(proj1), res1_man) +\ torch.dot(V(proj2), res2_man) proj_auto.backward() proj_man.backward() # check forward assert_all_close(res1_auto, res1_man, atol=1e0, rtol=1e-3) assert_all_close(res2_auto, res2_man, atol=1e0, rtol=1e-3) # check backward assert_all_close(X_auto.grad, X_man.grad, atol=0.05, rtol=1e-2)
def testMulZero(self): sum_ = LogTensor(V(self.x)) * 0 res_sb = sum_.torch() res_th = -np.inf * np.ones(res_sb.size()) assert_all_close(res_th, res_sb)
def testMulNonZero(self): sum_ = LogTensor(V(self.x)) * self.nonzero_const res_sb = sum_.torch() res_th = self.x.double() + math.log(self.nonzero_const) assert_all_close(res_th, res_sb)
def testSumZero(self): sum_ = LogTensor(V(self.x)) + 0 res_sb = sum_.torch() res_th = self.x assert_all_close(res_th, res_sb)
def testSumNonZero(self): sum_ = LogTensor(V(self.x)) + self.nonzero_const res_sb = sum_.torch() res_th = torch.log(torch.exp(self.x.double()) + self.nonzero_const) assert_all_close(res_th, res_sb)
def testGradSmoothSVMth_loss(self): for k in range(2, self.k + 1): svm_topk_smooth_th = SmoothTopkSVM(self.n_classes, tau=self.tau, k=k) for scale in (1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4): x = self.x * scale x = Variable(x, requires_grad=True) assert gradcheck(lambda x: svm_topk_smooth_th(x, V(self.y)), (x,), atol=1e-2, rtol=1e-3, eps=max(1e-4 * scale, 1e-2)), \ "failed with scale={}, k={}".format(scale, k)
def testSmoothSVMpy(self): res_py_1 = svm_topk_smooth_py_1(V(self.x), V(self.y), self.tau, self.k) res_py_2 = svm_topk_smooth_py_2(V(self.x), V(self.y), self.tau, self.k) assert_all_close(res_py_1, res_py_2)