def uncons_opt(obj, cons, Vars, x0): """nlinprog Parameters ---------- obj : cons : Returns ------- Notes ------ """ raise NotImplementedError assert(obj == 0) for c in cons: assert(isinstance(c, sym.LessThan)) assert(c.rhs == 0) obj += c.lhs**2 eval_f = ft.partial(eval_expr, sym.lambdify(Vars, obj)) eval_grad_f = ft.partial(eval_grad_obj, sym.lambdify(Vars, grad(Vars, obj))) eval_hessian_f = ft.partial(eval_expr, sym.lambdify(Vars, sym.hessian(obj, Vars))) # Return codes in IpReturnCodes_inc.h res_x, mL, mU, Lambda, res_obj, status = pyipopt.fmin_unconstrained( eval_f, x0, fprime=eval_grad_f, fhess=eval_hessian_f, ) return spec.OPTRES(res_obj, res_x, 'OK', res_obj <= 0)
def _find_assignment(variable_handler, atoms, max_num_resets, tol, verbose=False): init = np.array(variable_handler.dict_to_vector()) def func(vector): print "dim:", np.shape(vector), vector d = variable_handler.vector_to_dict(vector) return sum(evaluate(atom, d).norm for atom in atoms) def grad(f, theta): theta = algopy.UTPM.init_jacobian(theta) return algopy.UTPM.extract_jacobian(f(theta)) def hess(f, theta): theta = algopy.UTPM.init_hessian(theta) return algopy.UTPM.extract_hessian(len(theta), f(theta)) x = None for i in range(max_num_resets): results = pyipopt.fmin_unconstrained(func, init, functools.partial(grad, func)) if verbose: print("iteration %d:" % (i+1)) print(results) val, zl, zu, constraint_multipliers, obj, status = results if obj < tol: x = val break init = np.random.rand(len(init)) if x is None: return None return variable_handler.vector_to_dict(x)
def main(): pyipopt.set_loglevel(2) x0 = numpy.array([-1.2, 1], dtype=float) results = pyipopt.fmin_unconstrained( scipy.optimize.rosen, x0, fprime=scipy.optimize.rosen_der, fhess=scipy.optimize.rosen_hess, ) print(results)
def main(): pyipopt.set_loglevel(2) x0 = numpy.array([-0.27, -0.9], dtype=float) results = pyipopt.fmin_unconstrained( himmelblau, x0, fprime=functools.partial(eval_grad, himmelblau), fhess=functools.partial(eval_hess, himmelblau), ) print results
def main(): pyipopt.set_loglevel(2) x0 = numpy.array([-3, -1, -3, -1], dtype=float) results = pyipopt.fmin_unconstrained( wood, x0, fprime=functools.partial(eval_grad, wood), fhess=functools.partial(eval_hess, wood), ) print results
def main(): pyipopt.set_loglevel(2) x0 = numpy.array([-3, -1, -3, -1], dtype=float) results = pyipopt.fmin_unconstrained( wood, x0, fprime=functools.partial(eval_grad, wood), fhess=functools.partial(eval_hess, wood), ) print(results)
def main(): pyipopt.set_loglevel(2) x0 = numpy.array([-1.2, 1], dtype=float) results = pyipopt.fmin_unconstrained( scipy.optimize.rosen, x0, fprime=scipy.optimize.rosen_der, fhess=scipy.optimize.rosen_hess, ) print results
def solve_unconstr(theta0): pyipopt.set_loglevel(1) thetahat , _, _, _, _, fval = pyipopt.fmin_unconstrained( eval_f, theta0, fprime=eval_grad, fhess=eval_hess, ) return thetahat
def _find_assignment(variable_handler, atoms, max_num_resets, tol, verbose=False): init = np.array(variable_handler.dict_to_vector()) def func(vector): print "dim:", np.shape(vector), vector d = variable_handler.vector_to_dict(vector) return sum(evaluate(atom, d).norm for atom in atoms) def grad(f, theta): theta = algopy.UTPM.init_jacobian(theta) return algopy.UTPM.extract_jacobian(f(theta)) def hess(f, theta): theta = algopy.UTPM.init_hessian(theta) return algopy.UTPM.extract_hessian(len(theta), f(theta)) x = None for i in range(max_num_resets): results = pyipopt.fmin_unconstrained(func, init, functools.partial(grad, func)) if verbose: print("iteration %d:" % (i + 1)) print(results) val, zl, zu, constraint_multipliers, obj, status = results if obj < tol: x = val break init = np.random.rand(len(init)) if x is None: return None return variable_handler.vector_to_dict(x)
def do_opt(args, f, theta): """ @param args: directly parsed from the command line @param f: function to minimize @param theta: initial guess of parameter values """ #FIXME: remove fmin_args #@param fmin_args: data and other precomputed things independent of theta fmin_args = tuple() g = functools.partial(eval_grad, f) h = functools.partial(eval_hess, f) if args.fmin == 'simplex': results = scipy.optimize.fmin( f, theta, args=fmin_args, maxfun=10000, maxiter=10000, xtol=1e-8, ftol=1e-8, full_output=True, ) elif args.fmin == 'bfgs': results = scipy.optimize.fmin_bfgs( f, theta, args=fmin_args, #fprime=g, #epsilon=1e-7, maxiter=10000, full_output=True, disp=True, retall=True, ) elif args.fmin == 'jeffopt': results = jeffopt.fmin_jeff_unconstrained( f, theta, args=fmin_args, #abstol=1e-8, ) elif args.fmin == 'ncg': results = scipy.optimize.fmin_ncg( f, theta, args=fmin_args, fprime=g, fhess=h, avextol=1e-6, maxiter=10000, full_output=True, disp=True, retall=True, ) elif args.fmin == 'slsqp': results = scipy.optimize.minimize( f, theta, args=fmin_args, method='SLSQP', jac=g, ) elif args.fmin == 'powell': results = scipy.optimize.minimize( f, theta, args=fmin_args, method='Powell', ) elif args.fmin == 'cg': results = scipy.optimize.minimize( f, theta, args=fmin_args, method='CG', jac=g, ) elif args.fmin == 'anneal': results = scipy.optimize.minimize( f, theta, args=fmin_args, method='Anneal', ) elif args.fmin == 'ipopt': results = pyipopt.fmin_unconstrained( f, theta, fprime=g, fhess=h, ) else: raise Exception print 'results:', results xopt = results[0] print 'optimal solution vector:', xopt print 'exp optimal solution vector:', numpy.exp(xopt) print
def do_opt(args, f, theta): """ @param args: directly parsed from the command line @param f: function to minimize @param theta: initial guess of parameter values """ #FIXME: remove fmin_args #@param fmin_args: data and other precomputed things independent of theta fmin_args = tuple() g = functools.partial(eval_grad, f) h = functools.partial(eval_hess, f) if args.fmin == 'simplex': results = scipy.optimize.fmin( f, theta, args=fmin_args, maxfun=10000, maxiter=10000, xtol=1e-8, ftol=1e-8, full_output=True, ) elif args.fmin == 'bfgs': results = scipy.optimize.fmin_bfgs( f, theta, args=fmin_args, #fprime=g, #epsilon=1e-7, maxiter=10000, full_output=True, disp=True, retall=True, ) elif args.fmin == 'jeffopt': results = jeffopt.fmin_jeff_unconstrained( f, theta, args=fmin_args, #abstol=1e-8, ) elif args.fmin == 'ncg': results = scipy.optimize.fmin_ncg( f, theta, args=fmin_args, fprime=g, fhess=h, avextol=1e-6, maxiter=10000, full_output=True, disp=True, retall=True, ) elif args.fmin == 'slsqp': results = scipy.optimize.minimize( f, theta, args=fmin_args, method='SLSQP', jac=g, ) elif args.fmin == 'powell': results = scipy.optimize.minimize( f, theta, args=fmin_args, method='Powell', ) elif args.fmin == 'cg': results = scipy.optimize.minimize( f, theta, args=fmin_args, method='CG', jac=g, ) elif args.fmin == 'anneal': results = scipy.optimize.minimize( f, theta, args=fmin_args, method='Anneal', ) elif args.fmin == 'ipopt': results = pyipopt.fmin_unconstrained( f, theta, fprime=g, fhess=h, ) else: raise Exception print 'results:', results xopt = results[0] print 'optimal solution vector:', xopt print 'exp optimal solution vector:', numpy.exp(xopt) print
multistart = 1000 np.random.seed(1234) r = np.random.rand(multistart, len(thetalower)) r = r[multistartidx,:] print '** invoke ipopt' start = time() pyipopt.set_loglevel(1) thetahatnew , _, _, _, _, fval = pyipopt.fmin_unconstrained( eval_f, thetalower*r + thetaupper*(1-r), fprime=eval_grad, fhess=eval_hess, ) print '** calculating nlogl and se' nllnew = eval_f(thetahatnew) try: sehatnew = np.diag(np.linalg.inv(eval_hess(thetahatnew)))**0.5 except Exception, e: sehatnew = np.nan*np.ones(thetahatnew.shape) print '-- Time =', time()-start, 's' outname = 'result'+ '_' + specname +'_' "{:03d}".format(int(multistartidx))
def main(): pyipopt.set_loglevel(2) x0 = numpy.array([-0.27, -0.9], dtype=float) results = pyipopt.fmin_unconstrained( himmelblau, x0, fprime=functools.partial(eval_grad, himmelblau), fhess=functools.partial(eval_hess, himmelblau), ) print(results)