def test_UPGM_on_third_analytical_example_non_zero_start(): print( '# Test UPGM on Third Analytical Example (constrained dual). Start at NON-0.' ) # see definition of AnalyticalExampleInnerProblem for problem and solution statement analytical_inner_problem = ConstrainedDualAnalyticalExampleInnerProblem() dual_method = UniversalPGM(analytical_inner_problem.oracle, analytical_inner_problem.projection_function, dimension=analytical_inner_problem.dimension) # we set the initial point somewhere not 0 dual_method.lambda_hat_k = dual_method.projection_function( np.array([-2, 2])) logger = GenericDualMethodLogger(dual_method) for iteration in range(5): # print(dual_method.lambda_k) # print(dual_method.lambda_tilde_k) # print(dual_method.d_k) dual_method.dual_step() # Method should end close to lambda*, where lambda*[1] = 0.5 - lambda*[0], and 0<= lambda*[0] <= 0.5 lambda_star = logger.lambda_k_iterates[-1] # print(lambda_star) assert 0 <= lambda_star[0] <= 0.5 assert lambda_star[1] == 0.5 - lambda_star[0] # with value close to dual optimum np.testing.assert_allclose(logger.d_k_iterates[-1], -1.0, atol=0.01)
def test_averaged_UPGM_on_analytical_example(): print('# Test averaged UPGM on Analytical Example') # see definition of AnalyticalExampleInnerProblem for problem and solution statement analytical_inner_problem = AnalyticalExampleInnerProblem() dual_method = UniversalPGM(analytical_inner_problem.oracle, analytical_inner_problem.projection_function, dimension=analytical_inner_problem.dimension, epsilon=0.01, averaging=True) logger = GenericDualMethodLogger(dual_method) for iteration in range(50): # print(dual_method.S_k) # print(dual_method.lambda_k) # print(dual_method.d_k) dual_method.dual_step() # When averaging is turned on, method is much slower. Method should end close to lambda* but get to about [0.3, 0.7] np.testing.assert_allclose(logger.lambda_k_iterates[-1], np.array([0.3, 0.7]), atol=0.1) # should be close to optimal value (-0.5) but is at -0.8 np.testing.assert_allclose(logger.d_k_iterates[-1], -0.8, atol=0.1)
def test_averaged_UFGM_on_analytical_example(): print('# Test Averaged UFGM on Analytical Example') # see definition of AnalyticalExampleInnerProblem for problem and solution statement analytical_inner_problem = AnalyticalExampleInnerProblem() dual_method = UniversalFGM(analytical_inner_problem.oracle, analytical_inner_problem.projection_function, dimension=analytical_inner_problem.dimension, epsilon=0.1, averaging=False) logger = GenericDualMethodLogger(dual_method) for iteration in range(40): # print(dual_method.lambda_k) # print dual_method.d_k dual_method.dual_step() # Method should end close to lambda* np.testing.assert_allclose(logger.lambda_k_iterates[-1], np.array([1., 1.]), rtol=1e-1, atol=0) # with value close to dual optimum np.testing.assert_allclose(logger.d_k_iterates[-1], -0.5, rtol=1e-1, atol=0)
def test_subgradient_method_on_analytical_example(): print('# Test Subgradient Method on Analytical Example (2 ineq)') # see definition of AnalyticalExampleInnerProblem for problem and solution statement analytical_inner_problem = AnalyticalExampleInnerProblem() dual_method = SubgradientMethod( analytical_inner_problem.oracle, analytical_inner_problem.projection_function, dimension=analytical_inner_problem.dimension, sense='max') logger = GenericDualMethodLogger(dual_method) for iteration in range(10): # print dual_method.lambda_k # print dual_method.d_k dual_method.dual_step() # Method should end close to lambda* np.testing.assert_allclose(logger.lambda_k_iterates[-1], np.array([0.91, 1.]), rtol=1e-2, atol=0) # with value close to dual optimum np.testing.assert_allclose(logger.d_k_iterates[-1], -0.54, rtol=1e-2, atol=0)
def test_DSA_on_analytical_example(): print('# Test Double Simple Averaging Method on Analytical Example') # see definition of AnalyticalExampleInnerProblem for problem and solution statement analytical_inner_problem = AnalyticalExampleInnerProblem() # it looks like lower gammas give faster convergence, but more oscillations GAMMA = 0.5 dual_method = SGMDoubleSimpleAveraging( analytical_inner_problem.oracle, analytical_inner_problem.projection_function, dimension=analytical_inner_problem.dimension, gamma=GAMMA, sense='max') logger = GenericDualMethodLogger(dual_method) for iteration in range(20): # print dual_method.lambda_k # print dual_method.d_k dual_method.dual_step() # Method should end close to lambda* # np.testing.assert_allclose(logger.lambda_k_iterates[-1], np.array([1., 1.]), rtol=1e-1, atol=0) assert 0.95 <= logger.lambda_k_iterates[-1][ 0] <= 1.05 # first coordinate should be ~1.0 assert 0.95 <= logger.lambda_k_iterates[-1][ 1] <= 1.55 # second coordinate should be 1.0 <= coord <= 1.5 # with value close to dual optimum np.testing.assert_allclose(logger.d_k_iterates[-1], -0.5, rtol=1e-1, atol=0)
def test_subgradient_method_sanity_checks(): print('# Test Subgradient Method Sanity') # ensure STEPSIZE_0 has defaults # subgradient_method = SubgradientMethod('oracle', 'projection') # custom STEPSIZE_0 subgradient_method = SubgradientMethod(mock_one_dim_oracle, mock_projection_function, dimension=1, stepsize_0=1.5, stepsize_rule='constant', sense='max') # logging logger = GenericDualMethodLogger(subgradient_method) # verify initial state assert subgradient_method.lambda_k == 0 for iteration in range(3): subgradient_method.dual_step() # start: lambda_k = 0 # it 1: lambda_k = 1.95 = 0 + 1.5 (STEPSIZE_0) * 1.3 (diff_d_k from mock_oracle) # it 2: lambda_k = 3.9 = 1.95 + 1.95 # it 3: lambda_k = 5.85 for iteration in range(3): # default stepsize rule is 1/k subgradient_method.dual_step() # it 4: lambda_k = 6.3375 = 5.85 + (1.5/4) * 1.3 # etc. print(subgradient_method.desc) # verify final states with mock oracle assert subgradient_method.x_k == 1.1 assert subgradient_method.d_k == 1.2 # print(logger.lambda_k_iterates) np.testing.assert_allclose( logger.lambda_k_iterates, np.array([[0], [1.95], [3.9], [5.85], [7.8], [9.75]]), # [11.7] atol=0.1)
def test_DSA_on_Bertsekas_example(): print('# DSA on Bertsekas Example') # see definition of AnalyticalExampleInnerProblem for problem and solution statement analytical_inner_problem = BertsekasCounterExample() # it looks like lower gammas give faster convergence, but more oscillations GAMMA = 0.5 dual_method = SGMDoubleSimpleAveraging( analytical_inner_problem.oracle, analytical_inner_problem.projection_function, dimension=analytical_inner_problem.dimension, gamma=0.5, sense='max') logger = GenericDualMethodLogger(dual_method) # move initial point lambda_0 = np.array([2, 2]) dual_method.lambda_k = lambda_0 for iteration in range(20): # print(dual_method.lambda_k) # print(dual_method.d_k) dual_method.dual_step() # print(logger.d_k_iterates) # print(logger.lambda_k_iterates) # Method should end close to lambda* assert -3 <= logger.lambda_k_iterates[-1][ 0] <= -2.2 # first coordinate should be ~3.0 assert 0 <= logger.lambda_k_iterates[-1][ 1] <= 0.5 # second coordinate should be ~0 # with value close to dual optimum np.testing.assert_allclose(logger.d_k_iterates[-1], 23.15, rtol=1e-1, atol=0)
def test_UPGM_on_second_analytical_example(): print('# Test UPGM on Second Analytical Example (1 eq, 1 ineq)') # see definition of AnalyticalExampleInnerProblem for problem and solution statement analytical_inner_problem = SecondAnalyticalExampleInnerProblem() dual_method = UniversalPGM(analytical_inner_problem.oracle, analytical_inner_problem.projection_function, dimension=analytical_inner_problem.dimension, epsilon=0.01) logger = GenericDualMethodLogger(dual_method) for iteration in range(5): # print(dual_method.lambda_k) # print(dual_method.d_k) # print(logger.L_k_iterates) dual_method.dual_step() # Method should end close to lambda* np.testing.assert_allclose(logger.lambda_k_iterates[-1], np.array([1., 0.]), atol=0.1) # with value close to dual optimum np.testing.assert_allclose(logger.d_k_iterates[-1], -1, rtol=1e-1, atol=0)