def test_linprog(self): # Sample problem instance c = io.tensor([[-1.0], [0.0]]) A_ub = io.tensor([[-2.0, -5.0], [-2.0, 3.0], [2.0, 1.0]]) b_ub = io.tensor([[-10.0], [6.0], [10.0]]) A_eq = io.tensor([[0.0, 1.0]]) b_eq = io.tensor([[1.0]]) # Check INEQUALITIES only x = io.linprog(c, A_ub, b_ub) self.assertVectorEqual(x, [5.0, 0.0]) # Check INEQUALITIES and EQUALITIES x = io.linprog(c, A_ub, b_ub, A_eq, b_eq) self.assertVectorEqual(x, [4.5, 1.0]) # Check that linprog doesn't blow up when c is normal to constraints for ai in A_ub: x = io.linprog(-ai.view(-1, 1), A_ub, b_ub)
def test_inverse_linprog(self): # Sample problem instance c = io.tensor([[1.0], [1.0]]) A_ub = io.tensor([[-2.0, -5.0], [-2.0, 3.0], [2.0, 1.0]]) b_ub = io.tensor([[-10.0], [6.0], [10.0]]) A_eq = io.tensor([[0.0, 1.0]]) b_eq = io.tensor([[1.0]]) x_target = io.tensor([[5.0], [0.0]]) # Check that solution before training is (0.0, 2.0) x = io.linprog(c, A_ub, b_ub) self.assertVectorEqual(x, [0.0, 2.0]) # Check INEQUALITIES only params = io.inverse_linprog(x_target, c, A_ub, b_ub) x = io.linprog(*params) self.assertVectorEqual(x, [5.0, 0.0]) # Check INEQUALITIES and EQUALITIES params = io.inverse_linprog( x_target, c, A_ub, b_ub, A_eq, b_eq, eps_decay=True) # This one needs eps_decay to work x = io.linprog(*params) self.assertVectorEqual(x, [4.5, 1.0]) # Check ABS DUALITY GAP with INEQUALITIES and EQUALITIES params = io.inverse_linprog(x_target, c, A_ub, b_ub, A_eq, b_eq, loss=io.abs_duality_gap) x = io.linprog(*params) self.assertVectorEqual( x, [4.5, 1.0 ]) # Currently linprog doesn't get 3 decimals correct, but ok c_inv, *constraints = io.inverse_linprog(x_target, c, A_ub, b_ub, A_eq, b_eq, loss=io.abs_duality_gap) x = io.linprog(c_inv, *constraints) self.assertAlmostEqual((c_inv.t() @ (x - x_target)).item(), 0) # Check MULTIPOINT finds solution with minimal average error, # regardless if initial c has support of one target point # (Note this doesn't work for abs_duality_gap yet; reasons unknown) x_target = io.tensor([[0.0, 5.0, 4.9, 4.8], [2.0, 0.0, 0.1, 0.2]]) params = io.inverse_linprog(x_target, c, A_ub, b_ub) x = io.linprog(*params) self.assertVectorEqual( x, [4.85, 0.06], places=2 ) # linprog's line-search manages to get IPM to generate a non-vertex
return c, A_ub, b_ub, A_eq, b_eq # Evaluate the parametric LP at specific values of u plp_true = ExamplePLP(weights=[float(sys.argv[4])]) x_train = io.tensor( np.array(sys.argv[1].split(',')).astype(np.float).reshape( (-1, 1))) # targets given as empirical rates from participants u_train = io.tensor( np.array(sys.argv[2].split(',')).astype(np.float).reshape( (-1, 1))) # the Pb/Pf value declared as u on this model u_train_sal_model = torch.cat( [io.linprog(*plp_true(ui)).detach().t() for ui in u_train]) # Plot it xylim = ((0, 2), (0, 2)) cxy = (5, 5) plt.figure(figsize=(16, 4)) for i, w in enumerate([7, 6, 5, 1]): plt.subplot(141 + i) plt.title('w=%.1f' % w) iop.plot_parametric_linprog(ExamplePLP(weights=[w]), u_train, xylim=xylim, cxy=cxy, show_solutions=True)