def save(self,filename): """ Save the filter object to the specified file. :param filename: Filename for the save file. :type filename: string """ Auxiliary.save(self,filename)
def save(self, filename): """ Save the filter object to the specified file. :param filename: Filename for the save file. :type filename: string """ Auxiliary.save(self, filename)
def test_derivatives(self): print "Testing derivative for p-nested symmetric distribution with radial gamma" sys.stdout.flush() myu = 10 * np.random.rand(1)[0] mys = 10 * np.random.rand(1)[0] n = 10 L = Auxiliary.LpNestedFunction('(0,0,(1,1:4),4,(1,5:8),8:10)') p = Distributions.LpNestedSymmetric({ 'f': L, 'n': n, 'rp': Distributions.Gamma({ 's': mys, 'u': myu }) }) dat = p.sample(50) df = p.dldx(dat) h = 1e-8 df2 = np.array(dat.X * np.Inf) for k in range(n): y = np.array(dat.X) y[k, :] += h df2[k, :] = (p.loglik(Data(y)) - p.loglik(dat)) / h self.assertFalse(np.max(np.abs(df-df2).flatten()) > self.llTol,\ 'Difference in derivative of log-likelihood for p-nested symmetric greater than ' + str(self.llTol))
def test_estimate(self): print "Testing parameter estimation of Gamma distribution ..." sys.stdout.flush() myp = 2.0 * np.random.rand(1)[0] + .5 mys = 10.0 * np.random.rand(1)[0] p1 = Distributions.ExponentialPower({'p': myp, 's': mys}) dat = p1.sample(50000) myp = 2.0 * np.random.rand(1)[0] + .5 mys = 10.0 * np.random.rand(1)[0] p2 = Distributions.ExponentialPower({'p': myp, 's': mys}) p2.estimate(dat) prot = {} prot[ 'message'] = 'Difference in parameters for Exponential Power distribution greater than threshold' prot['s-threshold'] = self.TolParamS prot['p-threshold'] = self.TolParamP prot['true model'] = p1 prot['estimated model'] = p2 self.assertTrue(np.abs(p2.param['p'] - p1.param['p']) < self.TolParamP or np.abs(p2.param['s'] - p1.param['s']) < self.TolParamS,\ Auxiliary.prettyPrintDict(prot))
def test_estimate(self): print "Testing parameter estimation of Gamma distribution ..." sys.stdout.flush() myp = 2.0*np.random.rand(1)[0] + .5 mys = 10.0*np.random.rand(1)[0] p1 = Distributions.ExponentialPower({'p':myp ,'s':mys}) dat = p1.sample(50000) myp = 2.0*np.random.rand(1)[0] + .5 mys = 10.0*np.random.rand(1)[0] p2 = Distributions.ExponentialPower({'p':myp ,'s':mys}) p2.estimate(dat) prot = {} prot['message'] = 'Difference in parameters for Exponential Power distribution greater than threshold' prot['s-threshold'] = self.TolParamS prot['p-threshold'] = self.TolParamP prot['true model'] = p1 prot ['estimated model'] = p2 self.assertTrue(np.abs(p2.param['p'] - p1.param['p']) < self.TolParamP or np.abs(p2.param['s'] - p1.param['s']) < self.TolParamS,\ Auxiliary.prettyPrintDict(prot))
def test_derivatives(self): print "Testing derivatives w.r.t. data ... " sys.stdout.flush() P = [] for k in range(10): myp = 2.0 * np.random.rand(1)[0] + .5 mys = 3.0 * np.random.rand(1)[0] + 1.0 p = Distributions.ExponentialPower({'p': myp, 's': mys}) P.append(p) p = Distributions.ProductOfExponentialPowerDistributions({'P': P}) dat = p.sample(100) h = 1e-7 tol = 1e-4 Y0 = dat.X.copy() df = p.dldx(dat) df2 = 0.0 * df for i in xrange(dat.size(0)): y = Y0.copy() y[i, :] = y[i, :] + h df2[i, :] = (p.loglik(Data(y)) - p.loglik(dat)) / h prot = {} prot[ 'message'] = 'Difference in derivative of log-likelihood for PowerExponential greater than ' + str( tol) prot['max difference'] = np.max(np.abs((df - df2).flatten())) prot['mean difference'] = np.mean(np.abs((df - df2).flatten())) self.assertTrue( np.max(np.abs(df - df2)) < tol, Auxiliary.prettyPrintDict(prot))
def test_RadialFactorizationVsLpNestedNonlinearICA(self): print "Testing Radial Factorization vs. Lp-nested ICA..." sys.stdout.flush() p = np.random.rand() + 1.0 psource = Distributions.LpSphericallySymmetric({ 'p': p, 'rp': Distributions.Gamma({ 'u': 2.0 * np.random.rand() + 1.0, 's': 5.0 * np.random.rand() + 1.0 }) }) F = NonlinearTransformFactory.RadialFactorization(psource) dat = psource.sample(10) L = Auxiliary.LpNestedFunction('(0,0:2)', np.array([p])) psource2 = Distributions.LpNestedSymmetric({ 'f': L, 'n': 2.0, 'rp': psource.param['rp'].copy() }) F2 = NonlinearTransformFactory.LpNestedNonLinearICA(psource2) tol = 1e-6 self.assertTrue(np.max(np.abs(F.logDetJacobian(dat) - F2.logDetJacobian(dat))) < tol,\ 'log-determinants of Lp-nestedICA and Radial Factorization are not equal!')
def test_logdeterminantInCombinationWithLinearFilters(self): print "Testing Log-Determinant of Nonlinear Lp-nested ICA in combination with linear filters..." sys.stdout.flush() L = Auxiliary.LpNestedFunction() p = Distributions.LpNestedSymmetric({'f': L}) dat = p.sample(10) Flin1 = LinearTransformFactory.oRND(dat) Flin2 = LinearTransform( np.random.randn(dat.size(0), dat.size(0)) + 0.1 * np.eye(dat.size(0))) Fnl = NonlinearTransformFactory.LpNestedNonLinearICA(p) Fd = {} Fd['NL'] = Fnl Fd['L1*L2'] = Flin1 * Flin2 Fd['L1*NL'] = Flin1 * Fnl Fd['NL*L1'] = Fnl * Flin1 Fd['Nl*L1*L2'] = Fnl * Flin1 * Flin2 Fd['Nl*(L1*L2)'] = Fnl * (Flin1 * Flin2) Fd['(Nl*L1)*L2'] = (Fnl * Flin1) * Flin2 Fd['L1*Nl*L2'] = Flin1 * Fnl * Flin2 Fd['L1*(Nl*L2)'] = Flin1 * (Fnl * Flin2) Fd['(L1*Nl)*L2'] = (Flin1 * Fnl) * Flin2 Fd['L2*L1*Nl'] = Flin2 * Flin1 * Fnl Fd['L2*(L1*Nl)'] = Flin2 * (Flin1 * Fnl) Fd['(L2*L1)*Nl'] = (Flin2 * Flin1) * Fnl for (tk, F) in Fd.items(): print "\t ... testing " + tk sys.stdout.flush() n, m = dat.size() h = 5 * 1e-7 logdetJ = F.logDetJacobian(dat) for i in range(m): J = np.zeros((n, n)) for j in range(n): tmp = dat[:, i] tmp2 = tmp.copy() tmp2.X[j, :] = tmp2.X[j, :] + h J[:, j] = ((F * tmp2).X - (F * tmp).X)[:, 0] / h Q, R = linalg.qr(J) logdet2 = np.sum(np.log(np.diag(R))) #print np.abs(logdet2 - logdetJ[i]) self.assertFalse(np.abs(logdet2 - logdetJ[i]) > self.DetTol,\ 'Determinant of Jacobian deviates by %.4g which is more than more than %.4g' % (np.abs(logdet2 - logdetJ[i]), self.DetTol))
def test_pdfloglikconsistency(self): print "Testing consistency of pdf and loglik ... " sys.stdout.flush() p = Distributions.MixtureOfLogNormals({'K':5}) dat = p.sample(100) tol = 1e-6 ll = p.loglik(dat) pdf = np.log(p.pdf(dat)) prot = {} prot['message'] = 'Difference in log(p(x)) and loglik(x) MixtureOfLogNormals greater than ' + str(tol) prot['max diff'] = np.max(np.abs(pdf-ll)) prot['mean diff'] = np.mean(np.abs(pdf-ll)) self.assertFalse(np.max(np.abs(ll-pdf) ) > tol,Auxiliary.prettyPrintDict(prot))
def test_derivatives(self): print "Testing derivatives w.r.t. data ... " sys.stdout.flush() p = Distributions.MixtureOfLogNormals({'K':5}) dat = p.sample(100) h = 1e-8 tol = 1e-4 y = np.array(dat.X) + h df = p.dldx(dat) df2 = (p.loglik(Data(y)) - p.loglik(dat))/h prot = {} prot['message'] = 'Difference in derivative of log-likelihood for MixtureOfLogNormals greater than ' + str(tol) prot['max diff'] = np.max(np.abs(df-df2)) prot['mean diff'] = np.mean(np.abs(df-df2)) self.assertFalse(np.mean(np.abs(df-df2)) > tol, Auxiliary.prettyPrintDict(prot))
def test_pdfloglikconsistency(self): print "Testing consistency of pdf and loglik ... " sys.stdout.flush() p = Distributions.MixtureOfLogNormals({'K': 5}) dat = p.sample(100) tol = 1e-6 ll = p.loglik(dat) pdf = np.log(p.pdf(dat)) prot = {} prot[ 'message'] = 'Difference in log(p(x)) and loglik(x) MixtureOfLogNormals greater than ' + str( tol) prot['max diff'] = np.max(np.abs(pdf - ll)) prot['mean diff'] = np.mean(np.abs(pdf - ll)) self.assertFalse( np.max(np.abs(ll - pdf)) > tol, Auxiliary.prettyPrintDict(prot))
def test_derivatives(self): print "Testing derivatives w.r.t. data ... " sys.stdout.flush() p = Distributions.MixtureOfLogNormals({'K': 5}) dat = p.sample(100) h = 1e-8 tol = 1e-4 y = np.array(dat.X) + h df = p.dldx(dat) df2 = (p.loglik(Data(y)) - p.loglik(dat)) / h prot = {} prot[ 'message'] = 'Difference in derivative of log-likelihood for MixtureOfLogNormals greater than ' + str( tol) prot['max diff'] = np.max(np.abs(df - df2)) prot['mean diff'] = np.mean(np.abs(df - df2)) self.assertFalse( np.mean(np.abs(df - df2)) > tol, Auxiliary.prettyPrintDict(prot))
def test_derivatives(self): print "Testing derivatives w.r.t. data ... " sys.stdout.flush() myp = 2.0*np.random.rand(1)[0] + .5 mys = 3.0*np.random.rand(1)[0] + 1.0 p = Distributions.ExponentialPower({'p':myp ,'s':mys}) dat = p.sample(100) h = 1e-7 tol = 1e-4 y = np.array(dat.X) + h df = p.dldx(dat) df2 = (p.loglik(Data(y)) - p.loglik(dat))/h prot = {} prot['message'] = 'Difference in derivative of log-likelihood for PowerExponential greater than ' + str(tol) prot['max difference'] = np.max(np.abs(df-df2)) prot['mean difference'] = np.mean(np.abs(df-df2)) self.assertTrue(np.max(np.abs(df-df2)) < tol,Auxiliary.prettyPrintDict(prot))
def test_logdeterminantOfICA(self): print "Testing Log-Determinant of Nonlinear Lp-nested ICA ..." sys.stdout.flush() L = Auxiliary.LpNestedFunction() p = Distributions.LpNestedSymmetric({'f': L}) dat = p.sample(10) F = NonlinearTransformFactory.LpNestedNonLinearICA(p) n, m = dat.size() h = 1e-7 logdetJ = F.logDetJacobian(dat) for i in range(m): J = np.zeros((n, n)) for j in range(n): tmp = dat[:, i] tmp2 = tmp.copy() tmp2.X[j, :] = tmp2.X[j, :] + h J[:, j] = ((F * tmp2).X - (F * tmp).X)[:, 0] / h # print np.abs(np.log(linalg.det(J)) - logdetJ[i]) self.assertFalse( np.abs(np.log(linalg.det(J)) - logdetJ[i]) > self.DetTol,\ 'Determinant of Jacobian deviates by more than ' + str(self.DetTol) + '!')
def test_derivatives(self): print "Testing derivatives w.r.t. data ... " sys.stdout.flush() myp = 2.0 * np.random.rand(1)[0] + .5 mys = 3.0 * np.random.rand(1)[0] + 1.0 p = Distributions.ExponentialPower({'p': myp, 's': mys}) dat = p.sample(100) h = 1e-7 tol = 1e-4 y = np.array(dat.X) + h df = p.dldx(dat) df2 = (p.loglik(Data(y)) - p.loglik(dat)) / h prot = {} prot[ 'message'] = 'Difference in derivative of log-likelihood for PowerExponential greater than ' + str( tol) prot['max difference'] = np.max(np.abs(df - df2)) prot['mean difference'] = np.mean(np.abs(df - df2)) self.assertTrue( np.max(np.abs(df - df2)) < tol, Auxiliary.prettyPrintDict(prot))
def test_estimate(self): print "Testing parameter estimation for p-nested symmetric distribution with radial gamma" sys.stdout.flush() L = Auxiliary.LpNestedFunction('(0,0,(1,1:3),3,(2,4:7))') L.p = np.random.rand(3) * 1.5 + .5 d = Distributions.LpNestedSymmetric({'f': L, 'n': L.n[()]}) L2 = Auxiliary.LpNestedFunction('(0,0,(1,1:3),3,(2,4:7))') L2.p = np.random.rand(3) * 1.5 + .5 L.lb = 0.0 * L.p L.ub = 2.0 * L2.p rd2 = Distributions.Gamma({ 'u': 5 * np.random.rand(), 's': 10 * np.random.rand() }) # create Distributions object and sample d2 = Distributions.LpNestedSymmetric({ 'f': L2, 'n': L2.n[()], 'rp': rd2 }) print "\t ... checking greedy method" sys.stdout.flush() dat = d2.sample(50000) d.estimate(dat, method="greedy") self.assertFalse( np.max(np.abs(d.param['f'].p - d2.param['f'].p)) > self.TolParam['p'],\ 'Estimated parameter p deviates by more than ' + str(self.TolParam['p']) + '!') self.assertFalse( np.abs(d.param['rp'].param['u'] - d2.param['rp'].param['u']) > self.TolParam['u'],\ 'Estimated parameter u deviates by more than ' + str(self.TolParam['u']) + '!') self.assertFalse( np.abs(d.param['rp'].param['s'] - d2.param['rp'].param['s']) > self.TolParam['s'],\ 'Estimated parameter s deviates by more than ' + str(self.TolParam['s']) + '!') print "\t ... checking Nelder-Mead method" sys.stdout.flush() d = Distributions.LpNestedSymmetric({'f': L, 'n': L.n[()]}) d.estimate(dat, method="neldermead") self.assertFalse( np.max(np.abs(d.param['f'].p - d2.param['f'].p)) > self.TolParam['p'],\ 'Estimated parameter p deviates by more than ' + str(self.TolParam['p']) + '!') self.assertFalse( np.abs(d.param['rp'].param['u'] - d2.param['rp'].param['u']) > self.TolParam['u'],\ 'Estimated parameter u deviates by more than ' + str(self.TolParam['u']) + '!') self.assertFalse( np.abs(d.param['rp'].param['s'] - d2.param['rp'].param['s']) > self.TolParam['s'],\ 'Estimated parameter s deviates by more than ' + str(self.TolParam['s']) + '!') print "\t ... checking Gradient method" sys.stdout.flush() d = Distributions.LpNestedSymmetric({'f': L, 'n': L.n[()]}) d.estimate(dat, method="gradient") self.assertFalse( np.max(np.abs(d.param['f'].p - d2.param['f'].p)) > self.TolParam['p'],\ 'Estimated parameter p deviates by more than ' + str(self.TolParam['p']) + '!') self.assertFalse( np.abs(d.param['rp'].param['u'] - d2.param['rp'].param['u']) > self.TolParam['u'],\ 'Estimated parameter u deviates by more than ' + str(self.TolParam['u']) + '!') self.assertFalse( np.abs(d.param['rp'].param['s'] - d2.param['rp'].param['s']) > self.TolParam['s'],\ 'Estimated parameter s deviates by more than ' + str(self.TolParam['s']) + '!') print "[Ok]"