Exemple #1
0
    def test_adjust_gradient(self):
        X = dot(randn(5, 5), randn(5, 1000)) + randn(5, 1)
        Y = dot(randn(2, 2), randn(2, 1000)) + dot(randn(2, 5), X)

        meanIn = randn(5, 1)
        meanOut = randn(2, 1)
        preIn = randn(5, 5)
        preOut = randn(2, 2)
        predictor = randn(2, 5)

        pre = AffinePreconditioner(meanIn, meanOut, preIn, preOut, predictor)

        f = lambda X, Y: sum(hstack([p.ravel() for p in pre(X, Y)]))

        # compute analytic gradient
        dfdX, dfdY = pre.adjust_gradient(ones_like(X), ones_like(Y))

        # compute numerical gradient
        h = 0.1
        dfdXn = zeros_like(X)
        X_copy = X.copy()
        for i in range(X.shape[0]):
            for j in range(X.shape[1]):
                X_copy[i, j] = X[i, j] + h
                fp = f(X_copy, Y)

                X_copy[i, j] = X[i, j] - h
                fn = f(X_copy, Y)

                dfdXn[i, j] = (fp - fn) / (2. * h)

                X_copy[i, j] = X[i, j]

        self.assertLess(max(abs(dfdXn - dfdX)), 1e-7)
Exemple #2
0
    def test_affine_preconditioner_logjacobian(self):
        meanIn = randn(5, 1)
        meanOut = randn(2, 1)
        preIn = randn(5, 5)
        preOut = randn(2, 2)
        predictor = randn(2, 5)

        pre = AffinePreconditioner(meanIn, meanOut, preIn, preOut, predictor)

        self.assertAlmostEqual(
            mean(pre.logjacobian(randn(5, 10), randn(2, 10))),
            slogdet(preOut)[1])
Exemple #3
0
	def test_affine_preconditioner_logjacobian(self):
		meanIn = randn(5, 1)
		meanOut = randn(2, 1)
		preIn = randn(5, 5)
		preOut = randn(2, 2)
		predictor = randn(2, 5)

		pre = AffinePreconditioner(
			meanIn,
			meanOut,
			preIn,
			preOut,
			predictor)

		self.assertAlmostEqual(mean(pre.logjacobian(randn(5, 10), randn(2, 10))), slogdet(preOut)[1])
Exemple #4
0
    def test_affine_preconditioner_pickle(self):
        meanIn = randn(5, 1)
        meanOut = randn(2, 1)
        preIn = randn(5, 5)
        preOut = randn(2, 2)
        predictor = randn(2, 5)

        pre0 = AffinePreconditioner(meanIn, meanOut, preIn, preOut, predictor)

        tmp_file = mkstemp()[1]

        # store transformation
        with open(tmp_file, 'w') as handle:
            dump({'pre': pre0}, handle)

        # load transformation
        with open(tmp_file) as handle:
            pre1 = load(handle)['pre']

        X, Y = randn(5, 100), randn(2, 100)

        X0, Y0 = pre0(X, Y)
        X1, Y1 = pre1(X, Y)

        # make sure linear transformation hasn't changed
        self.assertLess(max(abs(X0 - X1)), 1e-20)
        self.assertLess(max(abs(Y0 - Y1)), 1e-20)

        # test inverse after pickling
        Xp, Yp = pre1(X, Y)
        Xr, Yr = pre1.inverse(Xp, Yp)

        self.assertLess(max(abs(Xr - X)), 1e-10)
        self.assertLess(max(abs(Yr - Y)), 1e-10)
Exemple #5
0
	def test_affine_preconditioner(self):
		X = dot(randn(5, 5), randn(5, 1000)) + randn(5, 1)
		Y = dot(randn(2, 2), randn(2, 1000)) + dot(randn(2, 5), X)

		meanIn = randn(5, 1)
		meanOut = randn(2, 1)
		preIn = randn(5, 5)
		preOut = randn(2, 2)
		predictor = randn(2, 5)

		pre = AffinePreconditioner(
			meanIn,
			meanOut,
			preIn,
			preOut,
			predictor)

		# test inverse
		Xp, Yp = pre(X, Y)
		Xr, Yr = pre.inverse(Xp, Yp)

		# check that preconditioner does what it's expected to do
		self.assertEqual(pre.dim_in, X.shape[0])
		self.assertEqual(pre.dim_out, Y.shape[0])
		self.assertLess(max(abs(Xp - dot(preIn, X - meanIn))), 1e-10)
		self.assertLess(max(abs(Yp - dot(preOut, Y - meanOut - dot(predictor, Xp)))), 1e-10)

		# check that inverse works
		self.assertLess(max(abs(Xr - X)), 1e-10)
		self.assertLess(max(abs(Yr - Y)), 1e-10)

		# reference counts should not change
		Xrc = sys.getrefcount(X)
		Yrc = sys.getrefcount(Y)

		for i in range(10):
			pre(X, Y)

		self.assertEqual(sys.getrefcount(X), Xrc)
		self.assertEqual(sys.getrefcount(Y), Yrc)
Exemple #6
0
	def test_affine_preconditioner(self):
		X = dot(randn(5, 5), randn(5, 1000)) + randn(5, 1)
		Y = dot(randn(2, 2), randn(2, 1000)) + dot(randn(2, 5), X)

		meanIn = randn(5, 1)
		meanOut = randn(2, 1)
		preIn = randn(5, 5)
		preOut = randn(2, 2)
		predictor = randn(2, 5)

		pre = AffinePreconditioner(
			meanIn,
			meanOut,
			preIn,
			preOut,
			predictor)

		# test inverse
		Xp, Yp = pre(X, Y)
		Xr, Yr = pre.inverse(Xp, Yp)

		# check that preconditioner does what it's expected to do
		self.assertEqual(pre.dim_in, X.shape[0])
		self.assertEqual(pre.dim_out, Y.shape[0])
		self.assertLess(max(abs(Xp - dot(preIn, X - meanIn))), 1e-10)
		self.assertLess(max(abs(Yp - dot(preOut, Y - meanOut - dot(predictor, Xp)))), 1e-10)

		# check that inverse works
		self.assertLess(max(abs(Xr - X)), 1e-10)
		self.assertLess(max(abs(Yr - Y)), 1e-10)

		# reference counts should not change
		Xrc = sys.getrefcount(X)
		Yrc = sys.getrefcount(Y)

		for i in range(10):
			pre(X, Y)

		self.assertEqual(sys.getrefcount(X), Xrc)
		self.assertEqual(sys.getrefcount(Y), Yrc)
Exemple #7
0
	def test_adjust_gradient(self):
		X = dot(randn(5, 5), randn(5, 1000)) + randn(5, 1)
		Y = dot(randn(2, 2), randn(2, 1000)) + dot(randn(2, 5), X)

		meanIn = randn(5, 1)
		meanOut = randn(2, 1)
		preIn = randn(5, 5)
		preOut = randn(2, 2)
		predictor = randn(2, 5)

		pre = AffinePreconditioner(
			meanIn,
			meanOut,
			preIn,
			preOut,
			predictor)

		f = lambda X, Y: sum(hstack([p.ravel() for p in pre(X, Y)]))

		# compute analytic gradient
		dfdX, dfdY = pre.adjust_gradient(ones_like(X), ones_like(Y))

		# compute numerical gradient
		h = 0.1
		dfdXn = zeros_like(X)
		X_copy = X.copy()
		for i in range(X.shape[0]):
			for j in range(X.shape[1]):
				X_copy[i, j] = X[i, j] + h
				fp = f(X_copy, Y)

				X_copy[i, j] = X[i, j] - h
				fn = f(X_copy, Y)

				dfdXn[i, j] = (fp - fn) / (2. * h)

				X_copy[i, j] = X[i, j]

		self.assertLess(max(abs(dfdXn - dfdX)), 1e-7)