def test_heterogeneous_var_aliases(self): ############################################################ from pykeops.torch import Genred from pykeops.numpy.utils import squared_distances aliases = ['p=Pm(0,1)', 'x=Vi(1,3)', 'y=Vj(2,3)'] formula = 'Square(p-Var(3,1,1))*Exp(-SqNorm2(y-x))' # Call cuda kernel myconv = Genred(formula, aliases, reduction_op='Sum', axis=1, dtype='float32') gamma_keops = myconv(self.sigmac, self.xc, self.yc, self.gc, backend='auto') # Numpy version gamma_py = np.sum((self.sigma - self.g.T)**2 * np.exp(-squared_distances(self.x, self.y)), axis=1) # compare output self.assertTrue( np.allclose(gamma_keops.cpu().data.numpy().ravel(), gamma_py.ravel(), atol=1e-6))
def test_generic_syntax_softmax(self): ############################################################ from pykeops.numpy import Genred aliases = ['p=Pm(0,1)', 'a=Vj(1,1)', 'x=Vi(2,3)', 'y=Vj(3,3)'] formula = 'Square(p-a)*Exp(-SqNorm2(x-y))' formula_weights = 'y' if pykeops.gpu_available: backend_to_test = ['auto', 'GPU_1D', 'GPU_2D', 'GPU'] else: backend_to_test = ['auto'] for b, t in itertools.product(backend_to_test, self.type_to_test): with self.subTest(b=b, t=t): # Call cuda kernel myop = Genred(formula, aliases, reduction_op='SumSoftMaxWeight', axis=1, dtype=t, formula2=formula_weights) gamma_keops= myop(self.sigma.astype(t), self.g.astype(t), self.x.astype(t), self.y.astype(t), backend=b) # Numpy version def np_softmax(x,w): x -= np.max(x,axis=1)[:,None] # subtract the max for robustness return np.exp(x)@w/np.sum(np.exp(x),axis=1)[:,None] gamma_py = np_softmax((self.sigma - self.g.T)**2 * np.exp(-squared_distances(self.x, self.y)), self.y) # compare output self.assertTrue(np.allclose(gamma_keops.ravel(), gamma_py.ravel(), atol=1e-6))
def test_heterogeneous_var_aliases(self): ############################################################ from pykeops.numpy import Genred t = self.type_to_test[0] aliases = ["p=Pm(0,1)", "x=Vi(1,3)", "y=Vj(2,3)"] formula = "Square(p-Var(3,1,1))*Exp(-SqNorm2(y-x))" # Call cuda kernel myconv = Genred(formula, aliases, reduction_op="Sum", axis=1) gamma_keops = myconv( self.sigma.astype(t), self.x.astype(t), self.y.astype(t), self.g.astype(t), backend="auto", ) # Numpy version gamma_py = np.sum( (self.sigma - self.g.T) ** 2 * np.exp(-squared_distances(self.x, self.y)), axis=1, ) # compare output self.assertTrue(np.allclose(gamma_keops.ravel(), gamma_py, atol=1e-6))
def test_generic_syntax_lse(self): ############################################################ from pykeops.numpy import Genred aliases = ['p=Pm(0,1)', 'a=Vj(1,1)', 'x=Vi(2,3)', 'y=Vj(3,3)'] formula = 'Square(p-a)*Exp(-SqNorm2(x-y))' if pykeops.gpu_available: backend_to_test = ['auto', 'GPU_1D', 'GPU_2D', 'GPU'] else: backend_to_test = ['auto'] for b, t in itertools.product(backend_to_test, self.type_to_test): with self.subTest(b=b, t=t): # Call cuda kernel myconv = Genred(formula, aliases, reduction_op='LogSumExp', axis=1, dtype=t) gamma_keops = myconv(self.sigma.astype(t), self.g.astype(t), self.x.astype(t), self.y.astype(t), backend=b) # Numpy version gamma_py = log_sum_exp( (self.sigma - self.g.T)**2 * np.exp(-squared_distances(self.x, self.y)), axis=1) # compare output self.assertTrue( np.allclose(gamma_keops.ravel(), gamma_py, atol=1e-6))
def test_generic_syntax_softmax(self): ############################################################ from pykeops.torch import Genred aliases = ["p=Pm(1)", "a=Vj(1)", "x=Vi(3)", "y=Vj(3)"] formula = "Square(p-a)*Exp(-SqNorm2(x-y))" formula_weights = "y" if pykeops.config.gpu_available: backend_to_test = ["auto", "GPU_1D", "GPU_2D", "GPU"] else: backend_to_test = ["auto"] for b in backend_to_test: with self.subTest(b=b): # Call cuda kernel myop = Genred( formula, aliases, reduction_op="SumSoftMaxWeight", axis=1, dtype="float64", formula2=formula_weights, ) gamma_keops = myop(self.sigmacd, self.gcd, self.xcd, self.ycd, backend=b) # Numpy version def np_softmax(x, w): x -= np.max( x, axis=1)[:, None] # subtract the max for robustness return np.exp(x) @ w / np.sum(np.exp(x), axis=1)[:, None] gamma_py = np_softmax( (self.sigma - self.g.T)**2 * np.exp(-squared_distances(self.x, self.y)), self.y, ) # compare output self.assertTrue( np.allclose(gamma_keops.cpu().data.numpy(), gamma_py, atol=1e-6))