def test_shapes(self): """ Only allow initialisation if dim(A) = n*m, dim(b) = n. """ with raises(ValueError): least_squares( A=[[1], [2]], b=1 ) # A too large with raises(ValueError): least_squares( A=2, b=[1, 2] ) # b too large
def test_norm_properties(self): """ Check that the calculated norm has expected properties. """ LS = TestLeastSquaresClass.random_obj() for x in np.random.uniform(low=-1, high=1, size=(5, 5)): x = x.reshape((-1, 1)) for a in np.random.uniform(low=-1, high=1, size=5): assert not (LS(a*x) == approx(a**2*LS(x))) # since we are calculating ||Ax - b||^2 we should not have # ||aAx - b||^2 == a^2||Ax - b||^2 scaledLS = least_squares(LS.A, a*LS.b) assert a**2*LS(x) == approx(scaledLS(a*x)) # for a norm a^2||Ax - b||^2 == ||aAx - ab||^2 assert LS(x) <= ( np.linalg.norm(LS.A@x) + np.linalg.norm(LS.b))**2 # for a norm ||Ax - b||^2 <= (||Ax|| + ||-b||)^2 # ||-b|| = ||b|| assert LS(x) >= 0 # norms are always larger than 0 print( f"Failed on A = {LS.A}, b = {LS.b}, with " f"x = {x} and a = {a}" )
def random_obj(size=5): """ Create random least-squares object for testing. """ return least_squares( A=np.random.uniform(low=-1, high=1, size=(size, size)), b=np.random.uniform(low=-1, high=1, size=size) )
def test_one_dimensional(self): """ Works edge case of for n=1, m=1? """ LS = least_squares( A=1, b=1 ) assert LS(1) == approx(0) # 1*1 - 1 = 0
def test_empty_norm(self): """ Empty norms should always be zero. """ LS = least_squares( A=np.zeros(shape=(5, 5)), b=np.zeros(5) ) x = np.random.uniform(low=-1, high=1, size=5) assert LS(x) == approx(0) # ||0|| = 0
def test_determined(self): """ Solutions to underdetermined and determined systems should be exactly 0. """ LS = least_squares( A=[[1, 2, 3], [4, 5, 6], [2, 5, 7]], b=[1, 2, 3] ) # determined x_ = LS.solve_minimum()['x*'] assert LS(x_) == approx(0)
def set_of_quadratic_objectives_nd(): objectives, start_points, search_directions, true_mins = [], [], [], [] rng = np.random.default_rng(seed=8008135) for n in range(2, 11): A = rng.uniform(low=-1, high=1, size=(n, n)) b = rng.uniform(low=-1, high=1, size=(n, 1)) LS = least_squares(A, b) def _deriv(x, A=A, b=b): # analytical derivative return 2 * A.T @ A @ x - 2 * A.T @ b objectives.append( ObjectiveFunctionWrapper( func=least_squares(A, b), jac=_deriv, )) start_points.append(rng.uniform(low=-1, high=1, size=(n, 1))) search_directions.append(-objectives[-1].jac(start_points[-1])) # derivative at last start point true_mins.append(LS.solve_minimum()['x*']) return objectives, start_points, search_directions, true_mins
def test_create_jac_nd(self, rng): """ Check if the jacobian is correctly calculated for n-D least squares problems. """ for _ in range(10): # try 10 random least squares problems size = np.random.randint(2, 10) LS = least_squares.least_squares(A=np.random.uniform(low=-1, high=1, size=(size, size)), b=np.random.uniform(low=-1, high=1, size=size)) objectivewrapper = wrappers.ObjectiveFunctionWrapper(LS) # if jac isn't specified one is created automatically self._compare_evaluation_at_points( rng.uniform, objectivewrapper.jac, lambda x: 2 * LS.A.T @ LS.A @ x - 2 * LS.A.T @ LS.b, n_points=10, shape=(size, 1) ) # compare result for 10 random points with analytical solution
def test_overdetermined(self): """ Solutions to overdetermined systems should be local (and global) minima. Right now test methodology is just to check that solution is a local minimum. More complete tests are desireable. """ LS = least_squares( A=[[1, 2], [3, 5], [7, 11]], # primes ensure independence b=[13, 17, 19] ) # overdetermined x_, residuals, rank, _ = LS.solve_minimum().values() assert rank < LS.b.shape[0] # ensure system is overdetermined assert residuals > 0 # ensure system is overdetermined assert np.linalg.norm(x_) > 0 # overdetermined -> no solution at 0 for i in range(100): random_perturbation = np.random.uniform( low=-1, high=1, size=x_.shape, ) random_perturbation *= 1e-3 * np.linalg.norm(x_) # ensure perturbation is small compared to x_ assert LS(x_) < LS(x_ + random_perturbation)