def test_2d_wo_missing(self): # Test cov on 1 2D variable w/o missing values x = self.data.reshape(3, 4) assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), cov(x, rowvar=False, bias=True))
def test_1d_wo_missing(self): "Test cov on 1D variable w/o missing values" x = self.data assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), cov(x, rowvar=False, bias=True))
def test_2d_w_missing(self): # Test cov on 2D variable w/ missing value x = self.data x[-1] = masked x = x.reshape(3, 4) valid = np.logical_not(getmaskarray(x)).astype(int) frac = np.dot(valid, valid.T) xf = (x - x.mean(1)[:, None]).filled(0) assert_almost_equal(cov(x), np.cov(xf) * (x.shape[1] - 1) / (frac - 1.0)) assert_almost_equal(cov(x, bias=True), np.cov(xf, bias=True) * x.shape[1] / frac) frac = np.dot(valid.T, valid) xf = (x - x.mean(0)).filled(0) assert_almost_equal(cov(x, rowvar=False), (np.cov(xf, rowvar=False) * (x.shape[0] - 1) / (frac - 1.0))) assert_almost_equal(cov(x, rowvar=False, bias=True), (np.cov(xf, rowvar=False, bias=True) * x.shape[0] / frac))
def test_2d_w_missing(self): # Test cov on 2D variable w/ missing value x = self.data x[-1] = masked x = x.reshape(3, 4) valid = np.logical_not(getmaskarray(x)).astype(int) frac = np.dot(valid, valid.T) xf = (x - x.mean(1)[:, None]).filled(0) assert_almost_equal(cov(x), np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) assert_almost_equal(cov(x, bias=True), np.cov(xf, bias=True) * x.shape[1] / frac) frac = np.dot(valid.T, valid) xf = (x - x.mean(0)).filled(0) assert_almost_equal(cov(x, rowvar=False), (np.cov(xf, rowvar=False) * (x.shape[0] - 1) / (frac - 1.))) assert_almost_equal( cov(x, rowvar=False, bias=True), (np.cov(xf, rowvar=False, bias=True) * x.shape[0] / frac))
def test_1d_w_missing(self): # Test cov 1 1D variable w/missing values x = self.data x[-1] = masked x -= x.mean() nx = x.compressed() assert_almost_equal(np.cov(nx), cov(x)) assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(nx, rowvar=False, bias=True), cov(x, rowvar=False, bias=True)) # try: cov(x, allow_masked=False) except ValueError: pass # # 2 1D variables w/ missing values nx = x[1:-1] assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1])) assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False), cov(x, x[::-1], rowvar=False)) assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), cov(x, x[::-1], rowvar=False, bias=True))
exit() experiment_dir_base = str(sys.argv[1]) n = int(str(sys.argv[2])) # loop over parameters here experiment_dir = experiment_dir_base + str(os.path.abspath(sys.argv[0])).split(os.sep)[-1].split(".")[0] + os.sep print "running experiments", n, "times at base", experiment_dir # load data data,labels=GPData.get_glass_data() # normalise and whiten dataset data-=mean(data, 0) L=cholesky(cov(data.T)) data=solve_triangular(L, data.T, lower=True).T dim=shape(data)[1] # prior on theta and posterior target estimate theta_prior=Gaussian(mu=0*ones(dim), Sigma=eye(dim)*5) distribution=PseudoMarginalHyperparameterDistribution(data, labels, \ n_importance=100, prior=theta_prior, \ ridge=1e-3) sigma = 23.0 print "using sigma", sigma kernel = GaussianKernel(sigma=sigma) for i in range(n):
if __name__ == '__main__': # load data data, labels = GPData.get_glass_data() # throw away some data n = 250 seed(1) idx = permutation(len(data)) idx = idx[:n] data = data[idx] labels = labels[idx] # normalise and whiten dataset data -= mean(data, 0) L = cholesky(cov(data.T)) data = solve_triangular(L, data.T, lower=True).T dim = shape(data)[1] # prior on theta and posterior target estimate theta_prior = Gaussian(mu=0 * ones(dim), Sigma=eye(dim) * 5) target=PseudoMarginalHyperparameterDistribution(data, labels, \ n_importance=100, prior=theta_prior, \ ridge=1e-3) # create sampler burnin = 10000 num_iterations = burnin + 300000 kernel = GaussianKernel(sigma=23.0) sampler = KameleonWindowLearnScale(target, kernel, stop_adapt=burnin) # sampler=AdaptiveMetropolisLearnScale(target)