def exp_cone(self): """Test exponential cone problems. """ for solver in self.solvers: # Basic. p = Problem(Minimize(self.b), [exp(self.a) <= self.b, self.a >= 1]) pmod = Problem(Minimize(self.b), [ExpCone(self.a, Constant(1), self.b), self.a >= 1]) self.assertTrue(ConeMatrixStuffing().accepts(pmod)) p_new = ConeMatrixStuffing().apply(pmod) if not solver.accepts(p_new[0]): return result = p.solve(solver.name()) sltn = solve_wrapper(solver, p_new[0]) self.assertAlmostEqual(sltn.opt_val, result, places=1) inv_sltn = ConeMatrixStuffing().invert(sltn, p_new[1]) self.assertAlmostEqual(inv_sltn.opt_val, result, places=1) for var in pmod.variables(): self.assertItemsAlmostEqual(inv_sltn.primal_vars[var.id], var.value, places=1) # More complex. p = Problem(Minimize(self.b), [exp(self.a/2 + self.c) <= self.b+5, self.a >= 1, self.c >= 5]) pmod = Problem(Minimize(self.b), [ExpCone(self.a/2 + self.c, Constant(1), self.b+5), self.a >= 1, self.c >= 5]) self.assertTrue(ConeMatrixStuffing().accepts(pmod)) result = p.solve(solver.name()) p_new = ConeMatrixStuffing().apply(pmod) sltn = solve_wrapper(solver, p_new[0]) self.assertAlmostEqual(sltn.opt_val, result, places=0) inv_sltn = ConeMatrixStuffing().invert(sltn, p_new[1]) self.assertAlmostEqual(inv_sltn.opt_val, result, places=0) for var in pmod.variables(): self.assertItemsAlmostEqual(inv_sltn.primal_vars[var.id], var.value, places=0)
def inverse(expr): if type(expr) == atoms.ceil: return lambda t: atoms.floor(t) elif type(expr) == atoms.floor: return lambda t: atoms.ceil(t) elif type(expr) == NegExpression: return lambda t: -t elif type(expr) == atoms.exp: return lambda t: atoms.log(t) if t.is_nonneg() else -np.inf elif type(expr) == atoms.log: return lambda t: atoms.exp(t) elif type(expr) == atoms.log1p: return lambda t: atoms.exp(t) - 1 elif type(expr) == atoms.logistic: return lambda t: atoms.log(atoms.exp(t) - 1) if t.is_nonneg() else -np.inf elif type(expr) == atoms.power: def power_inv(t): if expr.p == 1: return t return atoms.power(t, 1/expr.p) if t.is_nonneg() else np.inf return power_inv elif type(expr) == atoms.multiply: if expr.args[0].is_constant(): const = expr.args[0] else: const = expr.args[1] return lambda t: t / const elif type(expr) == DivExpression: if expr.args[0].is_constant(): const = expr.args[0] else: const = expr.args[1] return lambda t: t * const else: raise ValueError
def inverse(expr): if type(expr) == atoms.ceil: return lambda t: atoms.floor(t) elif type(expr) == atoms.floor: return lambda t: atoms.ceil(t) elif type(expr) == NegExpression: return lambda t: -t elif type(expr) == atoms.exp: return lambda t: atoms.log(t) if t.is_nonneg() else -np.inf elif type(expr) == atoms.log: return lambda t: atoms.exp(t) elif type(expr) == atoms.log1p: return lambda t: atoms.exp(t) - 1 elif type(expr) == atoms.logistic: return lambda t: atoms.log(atoms.exp(t) - 1) if t.is_nonneg( ) else -np.inf elif type(expr) == atoms.power: def power_inv(t): if expr.p.value == 1: return t return atoms.power(t, 1 / expr.p.value) if t.is_nonneg() else np.inf return power_inv elif type(expr) == atoms.multiply: if expr.args[0].is_constant(): const = expr.args[0] else: const = expr.args[1] return lambda t: t / const elif type(expr) == DivExpression: # either const / x <= t or x / const <= t if expr.args[0].is_constant(): # numerator is constant const = expr.args[0] return lambda t: const / t else: # denominator is constant const = expr.args[1] return lambda t: const * t elif type(expr) == AddExpression: if expr.args[0].is_constant(): const = expr.args[0] else: const = expr.args[1] return lambda t: t - const elif type(expr) == atoms.abs: arg = expr.args[0] if arg.is_nonneg(): return lambda t: t elif arg.is_nonpos(): return lambda t: -t else: raise ValueError("Sign of argument must be known.") elif type(expr) in (Sum, atoms.cumsum): return lambda t: t else: raise ValueError
def log_sum_exp_canon(expr, args): x = args[0] shape = expr.shape axis = expr.axis t = Variable(shape) # log(sum(exp(x))) <= t <=> sum(exp(x-t)) <= 1 if axis is None: # shape = (1, 1) promoted_t = promote(t, x.shape) elif axis == 0: # shape = (1, n) promoted_t = Constant(np.ones( (x.shape[0], 1))) * reshape(t, (1, ) + x.shape[1:]) else: # shape = (m, 1) promoted_t = reshape(t, x.shape[:-1] + (1, )) * Constant(np.ones((1, x.shape[1]))) exp_expr = exp(x - promoted_t) obj, constraints = exp_canon(exp_expr, exp_expr.args) obj = sum(obj, axis=axis) ones = Constant(np.ones(shape)) constraints.append(obj <= ones) return t, constraints