def test_recover_lambert(self): # Generate data from a normal, then apply the W transform, and check if we can recover parameters # See Table 2 of Georg's paper # These results seem a little worse, but we'd have to run many replications to directly compare. mu, sigma = 0, 1 # print ('del_true\tns\tmu\tsigma\tdelta').expandtabs(10) for delta in [0, 1./3, 1., 1.5]: for n in [50, 100, 1000]: x = np.random.normal(loc=mu, scale=sigma, size=n) y = g.inverse(x, (mu, sigma, delta)) mu_prime, sigma_prime, delta_prime = g.igmm(y) print(('%0.3f\t%d\t%0.3f\t%0.3f\t%0.3f' % (delta, n, mu_prime, sigma_prime, delta_prime)).expandtabs(10)) assert np.abs(mu - mu_prime) < 10. / np.sqrt(n) assert np.abs(sigma - sigma_prime) < 10. / np.sqrt(n) assert np.abs(delta - delta_prime) < 10. / np.sqrt(n)
def test_recover_lambert(): # Generate data from a normal, then apply the W transform, and check if we can recover parameters # See Table 2 of Georg's paper # These results seem a little worse, but we'd have to run many replications to directly compare. mu, sigma = 0, 1 print ("del_true\tns\tmu\tsigma\tdelta").expandtabs(10) for delta in [0, 1.0 / 3, 1.0, 1.5]: for n in [50, 100, 1000]: x = np.random.normal(loc=mu, scale=sigma, size=n) y = g.inverse(x, (mu, sigma, delta)) mu_prime, sigma_prime, delta_prime = g.igmm(y) print ("%0.3f\t%d\t%0.3f\t%0.3f\t%0.3f" % (delta, n, mu_prime, sigma_prime, delta_prime)).expandtabs(10) assert np.abs(mu - mu_prime) < 10.0 / np.sqrt(n) assert np.abs(sigma - sigma_prime) < 10.0 / np.sqrt(n) assert np.abs(delta - delta_prime) < 10.0 / np.sqrt(n)
def test_normality_increase_lambert(self): # Generate random data and check that it is more normal after inference for i, y in enumerate([np.random.standard_cauchy(size=ns), experimental_data]): print('Distribution %d' % i) print('Before') print(('anderson: %0.3f\tshapiro: %0.3f' % (anderson(y)[0], shapiro(y)[0])).expandtabs(30)) stats.probplot(y, dist="norm", plot=plt) plt.savefig(os.path.join(self.test_dir, '%d_before.png' % i)) plt.clf() tau = g.igmm(y) x = g.w_t(y, tau) print('After') print(('anderson: %0.3f\tshapiro: %0.3f' % (anderson(x)[0], shapiro(x)[0])).expandtabs(30)) stats.probplot(x, dist="norm", plot=plt) plt.savefig(os.path.join(self.test_dir, '%d_after.png' % i)) plt.clf()
def test_normality_increase_lambert(): # Generate random data and check that it is more normal after inference for i, y in enumerate([np.random.standard_cauchy(size=ns), experimental_data]): print "Distribution %d" % i print "Before" print ("anderson: %0.3f\tshapiro: %0.3f" % (anderson(y)[0], shapiro(y)[0])).expandtabs(30) stats.probplot(y, dist="norm", plot=pylab) pylab.savefig("%d_before.png" % i) pylab.clf() tau = g.igmm(y) x = g.w_t(y, tau) print "After" print ("anderson: %0.3f\tshapiro: %0.3f" % (anderson(x)[0], shapiro(x)[0])).expandtabs(30) stats.probplot(x, dist="norm", plot=pylab) pylab.savefig("%d_after.png" % i) pylab.clf()