Esempio n. 1
0
	def assert_pogs_check_convergence(self, lib, blas_handle, solver, f_list,
									  g_list, objectives, residuals, tolerances,
									  localA, local_vars):
		"""convergence test

			(1) set

				obj_primal = f(y^{k+1/2}) + g(x^{k+1/2})
				obj_gap = <z^{k+1/2}, zt^{k+1/2}>
				obj_dual = obj_primal - obj_gap

				tol_primal = abstol * sqrt(m) + reltol * ||y^{k+1/2}||
				tol_dual = abstol * sqrt(n) + reltol * ||xt^{k+1/2}||

				res_primal = ||Ax^{k+1/2} - y^{k+1/2}||
				res_dual = ||A'yt^{k+1/2} + xt^{k+1/2}||

			in C and Python, check that these quantities agree

			(2) calculate solver convergence,

					res_primal <= tol_primal
					res_dual <= tol_dual,

				in C and Python, check that the results agree
		"""
		DIGITS = 7 - 2 * lib.FLOAT
		RTOL = 10**(-DIGITS)

		converged = lib.check_convergence(blas_handle, solver, objectives,
										  residuals, tolerances)

		self.load_all_local(lib, local_vars, solver)
		obj_py = func_eval_python(g_list, local_vars.x12)
		obj_py += func_eval_python(f_list, local_vars.y12)
		obj_gap_py = abs(local_vars.z12.dot(local_vars.zt12))
		obj_dua_py = obj_py - obj_gap_py

		tol_primal = tolerances.atolm + (
				tolerances.reltol * np.linalg.norm(local_vars.y12))
		tol_dual = tolerances.atoln + (
				tolerances.reltol * np.linalg.norm(local_vars.xt12))

		self.assertScalarEqual( objectives.gap, obj_gap_py, RTOL )
		self.assertScalarEqual( tolerances.primal, tol_primal, RTOL )
		self.assertScalarEqual( tolerances.dual, tol_dual, RTOL )

		res_primal = np.linalg.norm(
				localA.dot(local_vars.x12) - local_vars.y12)
		res_dual = np.linalg.norm(
				localA.T.dot(local_vars.yt12) + local_vars.xt12)

		self.assertScalarEqual( residuals.primal, res_primal, RTOL )
		self.assertScalarEqual( residuals.dual, res_dual, RTOL )
		self.assertScalarEqual( residuals.gap, abs(obj_gap_py), RTOL )

		converged_py = res_primal <= tolerances.primal and \
					   res_dual <= tolerances.dual

		self.assertEqual( converged, converged_py )
Esempio n. 2
0
	def assert_pogs_check_convergence(self, lib, blas_handle, solver, f_list,
									  g_list, objectives, residuals, tolerances,
									  localA, local_vars):
		"""convergence test

			(1) set

				obj_primal = f(y^{k+1/2}) + g(x^{k+1/2})
				obj_gap = <z^{k+1/2}, zt^{k+1/2}>
				obj_dual = obj_primal - obj_gap

				tol_primal = abstol * sqrt(m) + reltol * ||y^{k+1/2}||
				tol_dual = abstol * sqrt(n) + reltol * ||xt^{k+1/2}||

				res_primal = ||Ax^{k+1/2} - y^{k+1/2}||
				res_dual = ||A'yt^{k+1/2} + xt^{k+1/2}||

			in C and Python, check that these quantities agree

			(2) calculate solver convergence,

					res_primal <= tol_primal
					res_dual <= tol_dual,

				in C and Python, check that the results agree
		"""
		DIGITS = 7 - 2 * lib.FLOAT
		RTOL = 10**(-DIGITS)

		converged = lib.check_convergence(blas_handle, solver, objectives,
										  residuals, tolerances)

		self.load_all_local(lib, local_vars, solver)
		obj_py = func_eval_python(g_list, local_vars.x12)
		obj_py += func_eval_python(f_list, local_vars.y12)
		obj_gap_py = abs(local_vars.z12.dot(local_vars.zt12))
		obj_dua_py = obj_py - obj_gap_py

		tol_primal = tolerances.atolm + (
				tolerances.reltol * np.linalg.norm(local_vars.y12))
		tol_dual = tolerances.atoln + (
				tolerances.reltol * np.linalg.norm(local_vars.xt12))

		self.assertScalarEqual( objectives.gap, obj_gap_py, RTOL )
		self.assertScalarEqual( tolerances.primal, tol_primal, RTOL )
		self.assertScalarEqual( tolerances.dual, tol_dual, RTOL )

		res_primal = np.linalg.norm(
				localA.dot(local_vars.x12) - local_vars.y12)
		res_dual = np.linalg.norm(
				localA.T.dot(local_vars.yt12) + local_vars.xt12)

		self.assertScalarEqual( residuals.primal, res_primal, RTOL )
		self.assertScalarEqual( residuals.dual, res_dual, RTOL )
		self.assertScalarEqual( residuals.gap, abs(obj_gap_py), RTOL )

		converged_py = res_primal <= tolerances.primal and \
					   res_dual <= tolerances.dual

		self.assertEqual( converged, converged_py )
    def assert_pogs_check_convergence(self, lib, blas_handle, solver, f_list,
                                      g_list, objectives, residuals,
                                      tolerances, local_vars):
        """convergence test

			(1) set

				obj_primal = f(y^{k+1/2}) + g(x^{k+1/2})
				obj_gap = <z^{k+1/2}, zt^{k+1/2}>
				obj_dual = obj_primal - obj_gap

				tol_primal = abstol * sqrt(m) + reltol * ||y^{k+1/2}||
				tol_dual = abstol * sqrt(n) + reltol * ||xt^{k+1/2}||

				res_primal = ||Ax^{k+1/2} - y^{k+1/2}||
				res_dual = ||A'yt^{k+1/2} + xt^{k+1/2}||

			in C and Python, check that these quantities agree

			(2) calculate solver convergence,

					res_primal <= tol_primal
					res_dual <= tol_dual,

				in C and Python, check that the results agree
		"""
        m, n = local_vars.m, local_vars.n
        DIGITS = 7 - 2 * lib.FLOAT
        RTOL = 10**(-DIGITS)

        converged = lib.check_convergence(blas_handle, solver, objectives,
                                          residuals, tolerances)

        self.load_all_local(lib, local_vars, solver)
        obj_py = func_eval_python(g_list, local_vars.x12)
        obj_py += func_eval_python(f_list, local_vars.y12)
        obj_gap_py = abs(local_vars.z12.dot(local_vars.zt12))
        obj_dua_py = obj_py - obj_gap_py

        tol_primal = tolerances.atolm + (tolerances.reltol *
                                         np.linalg.norm(local_vars.y12))
        tol_dual = tolerances.atoln + (tolerances.reltol *
                                       np.linalg.norm(local_vars.xt12))

        self.assertScalarEqual(objectives.gap, obj_gap_py, RTOL)
        self.assertScalarEqual(tolerances.primal, tol_primal, RTOL)
        self.assertScalarEqual(tolerances.dual, tol_dual, RTOL)

        z = solver.contents.z.contents
        opA = solver.contents.W.contents.A.contents
        dy, dy_py, dy_ptr = self.register_vector(lib, m, 'dy')
        dmu, dmu_py, dmu_ptr = self.register_vector(lib, n, 'dmu')

        self.assertCall(lib.vector_memcpy_vv(dy, z.primal12.contents.y))
        self.assertCall(
            opA.fused_apply(opA.data, 1, z.primal12.contents.x, -1, dy))
        self.assertCall(lib.vector_memcpy_av(dy_ptr, dy, 1))
        res_primal = np.linalg.norm(dy_py)

        self.assertCall(lib.vector_memcpy_vv(dmu, z.dual12.contents.x))
        self.assertCall(
            opA.fused_adjoint(opA.data, 1, z.dual12.contents.y, 1, dmu))
        self.assertCall(lib.vector_memcpy_av(dmu_ptr, dmu, 1))
        res_dual = np.linalg.norm(dmu_py)

        self.assertScalarEqual(residuals.primal, res_primal, RTOL)
        self.assertScalarEqual(residuals.dual, res_dual, RTOL)
        self.assertScalarEqual(residuals.gap, abs(obj_gap_py), RTOL)

        converged_py = res_primal <= tolerances.primal and \
              res_dual <= tolerances.dual

        self.assertEqual(converged, converged_py)
        self.free_vars('dy', 'dmu')
Esempio n. 4
0
    def test_eval(self):
        m, n = self.shape
        scal = self.scalefactor
        a = 10 * np.random.rand(m)
        b = np.random.rand(m)
        c = np.random.rand(m)
        d = np.random.rand(m)
        e = np.random.rand(m)
        x_rand = np.random.rand(m)

        for (gpu, single_precision) in self.CONDITIONS:
            lib = self.libs.get(single_precision=single_precision, gpu=gpu)
            if lib is None:
                continue
            self.register_exit(lib.ok_device_reset)

            DIGITS = 7 - 2 * single_precision
            RTOL = 10**(-DIGITS)
            ATOLM = RTOL * m**0.5
            ATOLN = RTOL * n**0.5

            f, f_py, f_ptr = self.register_fnvector(lib, m, 'f')
            x, x_py, x_ptr = self.register_vector(lib, m, 'x')
            xout, xout_py, xout_ptr = self.register_vector(lib, m, 'xout')

            # populate
            x_py += x_rand
            self.assertCall(lib.vector_memcpy_va(x, x_ptr, 1))

            for hkey, hval in list(lib.function_enums.dict.items()):
                if self.VERBOSE_TEST:
                    print(hkey)

                print(hkey)
                # avoid domain errors with randomly generated data
                if 'Log' in hkey or 'Exp' in hkey or 'Entr' in hkey:
                    continue

                for i in range(m):
                    f_py[i] = lib.function(hval, a[i], b[i], c[i], d[i], e[i])
                self.assertCall(lib.function_vector_memcpy_va(f, f_ptr))

                # function evaluation
                f_list = [lib.function(*f_) for f_ in f_py]
                funcval_py = func_eval_python(f_list, x_rand)

                funcval_c = np.zeros(1).astype(lib.pyfloat)
                funcval_c_ptr = funcval_c.ctypes.data_as(lib.ok_float_p)
                self.assertCall(lib.function_eval_vector(f, x, funcval_c_ptr))

                if funcval_c[0] in (np.inf, np.nan):
                    self.assertTrue(1)
                else:
                    self.assertScalarEqual(funcval_py, funcval_c, RTOL)

                # proximal operator evaluation, random rho
                rho = 5 * np.random.rand()
                prox_py = prox_eval_python(f_list, rho, x_rand)
                self.assertCall(lib.prox_eval_vector(f, rho, x, xout))
                self.assertCall(lib.vector_memcpy_av(xout_ptr, xout, 1))
                self.assertVecEqual(xout_py, prox_py, ATOLM, RTOL)

            self.free_vars('f', 'x', 'xout')
            self.assertCall(lib.ok_device_reset())
Esempio n. 5
0
	def test_eval(self):
		m, n = self.shape
		scal = self.scalefactor
		a = 10 * np.random.rand(m)
		b = np.random.rand(m)
		c = np.random.rand(m)
		d = np.random.rand(m)
		e = np.random.rand(m)
		x_rand = np.random.rand(m)

		for (gpu, single_precision) in self.CONDITIONS:
			lib = self.libs.get(single_precision=single_precision, gpu=gpu)
			if lib is None:
				continue
			self.register_exit(lib.ok_device_reset)

			DIGITS = 7 - 2 * single_precision
			RTOL = 10**(-DIGITS)
			ATOLM = RTOL * m**0.5
			ATOLN = RTOL * n**0.5

			f, f_py, f_ptr = self.register_fnvector(lib, m, 'f')
			x, x_py, x_ptr = self.register_vector(lib, m, 'x')
			xout, xout_py, xout_ptr = self.register_vector(lib, m, 'xout')

			# populate
			x_py += x_rand
			self.assertCall( lib.vector_memcpy_va(x, x_ptr, 1) )

			for hkey, hval in lib.function_enums.dict.items():
				if self.VERBOSE_TEST:
					print hkey

				print hkey
				# avoid domain errors with randomly generated data
				if 'Log' in hkey or 'Exp' in hkey or 'Entr' in hkey:
					continue

				for i in xrange(m):
					f_py[i] = lib.function(hval, a[i], b[i], c[i], d[i], e[i])
				self.assertCall( lib.function_vector_memcpy_va(f, f_ptr) )

				# function evaluation
				f_list = [lib.function(*f_) for f_ in f_py]
				funcval_py = func_eval_python(f_list, x_rand)

				funcval_c = np.zeros(1).astype(lib.pyfloat)
				funcval_c_ptr = funcval_c.ctypes.data_as(lib.ok_float_p)
				self.assertCall( lib.function_eval_vector(f, x,
														  funcval_c_ptr) )

				if funcval_c[0] in (np.inf, np.nan):
					self.assertTrue( 1 )
				else:
					self.assertScalarEqual( funcval_py, funcval_c, RTOL )

				# proximal operator evaluation, random rho
				rho = 5 * np.random.rand()
				prox_py = prox_eval_python(f_list, rho, x_rand)
				self.assertCall( lib.prox_eval_vector(f, rho, x, xout) )
				self.assertCall( lib.vector_memcpy_av(xout_ptr, xout, 1) )
				self.assertVecEqual( xout_py, prox_py, ATOLM, RTOL )

			self.free_vars('f', 'x', 'xout')
			self.assertCall( lib.ok_device_reset() )
Esempio n. 6
0
	def assert_pogs_check_convergence(self, lib, blas_handle, solver, f_list,
									  g_list, objectives, residuals,
									  tolerances, local_vars):
		"""convergence test

			(1) set

				obj_primal = f(y^{k+1/2}) + g(x^{k+1/2})
				obj_gap = <z^{k+1/2}, zt^{k+1/2}>
				obj_dual = obj_primal - obj_gap

				tol_primal = abstol * sqrt(m) + reltol * ||y^{k+1/2}||
				tol_dual = abstol * sqrt(n) + reltol * ||xt^{k+1/2}||

				res_primal = ||Ax^{k+1/2} - y^{k+1/2}||
				res_dual = ||A'yt^{k+1/2} + xt^{k+1/2}||

			in C and Python, check that these quantities agree

			(2) calculate solver convergence,

					res_primal <= tol_primal
					res_dual <= tol_dual,

				in C and Python, check that the results agree
		"""
		m, n = local_vars.m, local_vars.n
		DIGITS = 7 - 2 * lib.FLOAT
		RTOL = 10**(-DIGITS)

		converged = lib.check_convergence(blas_handle, solver, objectives,
										  residuals, tolerances)

		self.load_all_local(lib, local_vars, solver)
		obj_py = func_eval_python(g_list, local_vars.x12)
		obj_py += func_eval_python(f_list, local_vars.y12)
		obj_gap_py = abs(local_vars.z12.dot(local_vars.zt12))
		obj_dua_py = obj_py - obj_gap_py

		tol_primal = tolerances.atolm + (
				tolerances.reltol * np.linalg.norm(local_vars.y12))
		tol_dual = tolerances.atoln + (
				tolerances.reltol * np.linalg.norm(local_vars.xt12))

		self.assertScalarEqual( objectives.gap, obj_gap_py, RTOL )
		self.assertScalarEqual( tolerances.primal, tol_primal, RTOL )
		self.assertScalarEqual( tolerances.dual, tol_dual, RTOL )

		z = solver.contents.z.contents
		opA = solver.contents.W.contents.A.contents
		dy, dy_py, dy_ptr = self.register_vector(lib, m, 'dy')
		dmu, dmu_py, dmu_ptr = self.register_vector(lib, n, 'dmu')

		self.assertCall( lib.vector_memcpy_vv(dy, z.primal12.contents.y) )
		self.assertCall( opA.fused_apply(opA.data, 1, z.primal12.contents.x,
										 -1, dy) )
		self.assertCall( lib.vector_memcpy_av(dy_ptr, dy, 1) )
		res_primal = np.linalg.norm(dy_py)

		self.assertCall( lib.vector_memcpy_vv(dmu, z.dual12.contents.x) )
		self.assertCall( opA.fused_adjoint(opA.data, 1, z.dual12.contents.y,
										 1, dmu) )
		self.assertCall( lib.vector_memcpy_av(dmu_ptr, dmu, 1) )
		res_dual = np.linalg.norm(dmu_py)

		self.assertScalarEqual( residuals.primal, res_primal, RTOL )
		self.assertScalarEqual( residuals.dual, res_dual, RTOL )
		self.assertScalarEqual( residuals.gap, abs(obj_gap_py), RTOL )

		converged_py = res_primal <= tolerances.primal and \
					   res_dual <= tolerances.dual

		self.assertEqual( converged, converged_py )
		self.free_vars('dy', 'dmu')