def solve(self, otol=1e-5, zguess=None, seperate=False):

        if seperate:
            self.segments = self.pool.map(lambda seg: seg.solve(),
                                          self.segments)
            #[seg.solve() for seg in self.segments]

        else:
            # set optimisation params
            self.otol = otol

            # instantiate optimisation problem
            prob = pg.problem(self)

            # instantiate algorithm
            algo = pg.ipopt()
            algo.set_numeric_option("tol", self.otol)
            algo = pg.algorithm(algo)
            algo.set_verbosity(1)

            # instantiate and evolve population
            if zguess is None:
                pop = pg.population(prob, 1)
            else:
                pop = pg.population(prob, 0)
                pop.push_back(zguess)
            pop = algo.evolve(pop)

            # extract soltution
            self.zopt = pop.champion_x
            self.fitness(self.zopt)

        # combine records
        self.process_records()
    def solve(self, otol=1e-5):

        # set optimisation params
        self.otol = otol

        # instantiate optimisation problem
        prob = pg.problem(self)

        # instantiate algorithm
        algo = pg.ipopt()
        algo.set_numeric_option("tol", self.otol)
        algo = pg.algorithm(algo)
        algo.set_verbosity(1)

        # instantiate and evolve population
        pop = pg.population(prob, 1)
        pop = algo.evolve(pop)

        # extract soltution
        self.zopt = pop.champion_x
        self.fitness(self.zopt)

        # process records
        self.process_records()

        return self
Beispiel #3
0
def solve(x0,
          xf,
          alpha,
          dv=None,
          otol=1e-5,
          iter=200,
          Tlb=1,
          Tub=25,
          lb=100,
          atol=1e-12,
          rtol=1e-12):

    # initialise dynamics
    dyn = dynamics(x0,
                   xf,
                   alpha,
                   Tlb=Tlb,
                   Tub=Tub,
                   lb=lb,
                   atol=atol,
                   rtol=rtol)

    # optimisation problem
    prob = pg.problem(dyn)
    prob.c_tol = 1e-5

    # algorithm
    algo = pg.ipopt()
    algo.set_numeric_option("acceptable_tol", otol)
    algo.set_integer_option("max_iter", iter)
    algo = pg.algorithm(algo)
    algo.set_verbosity(1)

    # guess
    if dv is None:
        pop = pg.population(prob, 1)
    else:
        pop = pg.population(prob, 0)
        pop.push_back(dv)

    # solve
    dv = algo.evolve(pop).champion_x

    # feasibility
    feas = prob.feasibility_x(dv)

    T = dv[0]
    l0 = dv[1:]
    t, y, s, f = dyn.propagate(T, l0)
    return dv, feas, t, y, dyn.pmp(y.T, alpha)
Beispiel #4
0
    def solve(self, inp, Tlb=2, Tub=30, obj='time'):

        self.Tlb = Tlb
        self.Tub = Tub

        # if guess
        if isinstance(inp, int):
            self.N = inp
            guess = False
        elif inp is not None:
            times, h, states, controls = self.decode(inp)
            self.N = len(states)
            guess = True

        if obj == 'energy':
            self.obj = 'energy'
        elif obj == 'time':
            self.obj = 'time'
        else:
            raise ValueError("Object must be 'energy' or 'time'.")

        # problem
        prob = pg.problem(self)

        # population
        if guess:
            pop = pg.population(prob, 0)
            pop.push_back(inp)
        else:
            pop = pg.population(prob, 1)

        # algorithm
        algo = pg.ipopt()
        algo.set_numeric_option("acceptable_tol", 1e-5)
        algo.set_integer_option("max_iter", 500)
        algo = pg.algorithm(algo)
        algo.set_verbosity(1)

        # evolve population
        pop = algo.evolve(pop)

        # return decision vector
        return pop.champion_x
Beispiel #5
0
def algo_factory(name, original_screen_output=True):
    if name == "slsqp":
        uda = pg.nlopt('slsqp')
        uda.xtol_rel = 1e-5
        uda.ftol_rel = 0
        algo = pg.algorithm(uda)
        algo.set_verbosity(1)
        return algo
    elif name == "ipopt":
        if original_screen_output:
            pl = 5
        else:
            pl = 0
        # Disable lint check on next line. Known issue (pagmo2/issues/261)
        uda = pg.ipopt()  # pylint: disable=no-member
        uda.set_integer_option("print_level", pl)
        uda.set_integer_option("acceptable_iter", 4)
        uda.set_integer_option("max_iter", 150)

        uda.set_numeric_option("tol", 1e-8)
        uda.set_numeric_option("dual_inf_tol", 1e-8)
        uda.set_numeric_option("constr_viol_tol", 1e-8)
        uda.set_numeric_option("compl_inf_tol", 1e-8)

        uda.set_numeric_option("acceptable_tol", 1e-3)
        uda.set_numeric_option("acceptable_dual_inf_tol", 1e-2)
        uda.set_numeric_option("acceptable_constr_viol_tol", 1e-6)
        uda.set_numeric_option("acceptable_compl_inf_tol", 1e-6)

        algo = pg.algorithm(uda)
        return algo
    elif name == "snopt7":
        import pygmo_plugins_nonfree as pg7
        uda = pg7.snopt7(original_screen_output,
                         "/usr/local/lib/libsnopt7_c.so")
        uda.set_integer_option("Major iterations limit", 2000)
        uda.set_integer_option("Iterations limit", 200000)
        uda.set_numeric_option("Major optimality tolerance", 1e-2)
        uda.set_numeric_option("Major feasibility tolerance", 1e-9)

        algo = pg.algorithm(uda)
        return algo
Beispiel #6
0
    def solve(self, otol=1e-5, atol=1e-14, rtol=1e-14):

        # set optimisation params
        self.otol = otol
        self.atol = atol
        self.rtol = rtol

        # nondimensionalise problem
        self.nondimensionalise()

        # instantiate optimisation problem
        prob = pg.problem(self)

        # instantiate algorithm
        algo = pg.ipopt()
        algo.set_numeric_option("tol", self.otol)
        algo = pg.algorithm(algo)
        algo.set_verbosity(1)

        # instantiate and evolve population
        pop = pg.population(prob, 1)
        pop = algo.evolve(pop)

        # extract soltution
        self.zopt = pop.champion_x
        self.fitness(self.zopt)

        # set states and times
        self.states = np.vstack(
            ([self.segments[i].states for i in range(self.nseg)]))
        self.times = np.hstack(
            (self.segments[i].times for i in range(self.nseg)))
        self.controls = np.apply_along_axis(self.dynamics.control, 1,
                                            self.states)

        # redimensionalise problem
        self.dimensionalse()

        # redimensionalise records
        self.times *= self.dynamics.T
        self.states[:, :self.dynamics.sdim] = np.apply_along_axis(
            self.dynamics.dim_state, 1, self.states[:, :self.dynamics.sdim])
Beispiel #7
0
def algo_factory(name, original_screen_output=True):
    if name is "slsqp":
        uda = pg.nlopt('slsqp')
        uda.xtol_rel = 1e-5
        uda.ftol_rel = 0
        algo = pg.algorithm(uda)
        algo.set_verbosity(1)
        return algo
    elif name is "ipopt":
        if original_screen_output:
            pl = 5
        else:
            pl = 0
        uda = pg.ipopt()
        uda.set_integer_option("print_level", pl)
        uda.set_integer_option("acceptable_iter", 4)
        uda.set_integer_option("max_iter", 150)

        uda.set_numeric_option("tol", 1e-8)
        uda.set_numeric_option("dual_inf_tol", 1e-8)
        uda.set_numeric_option("constr_viol_tol", 1e-8)
        uda.set_numeric_option("compl_inf_tol", 1e-8)

        uda.set_numeric_option("acceptable_tol", 1e-3)
        uda.set_numeric_option("acceptable_dual_inf_tol", 1e-2)
        uda.set_numeric_option("acceptable_constr_viol_tol", 1e-6)
        uda.set_numeric_option("acceptable_compl_inf_tol", 1e-6)

        algo = pg.algorithm(uda)
        return algo
    elif name is "snopt7":
        import pygmo_plugins_nonfree as pg7
        uda = pg7.snopt7(original_screen_output,
                         "/usr/local/lib/libsnopt7_c.so")
        uda.set_integer_option("Major iterations limit", 2000)
        uda.set_integer_option("Iterations limit", 200000)
        uda.set_numeric_option("Major optimality tolerance", 1e-2)
        uda.set_numeric_option("Major feasibility tolerance", 1e-9)

        algo = pg.algorithm(uda)
        return algo
Beispiel #8
0
    def solve(self, otol=1e-5, atol=1e-10, rtol=1e-10):

        # integration params
        self.otol = otol

        # random initialisation
        self.fitness(pg.random_decision_vector(*self.get_bounds()),
                     rtol=rtol,
                     atol=atol)

        # nondimensionalise problem
        self.nondimensionalise()

        # instantiate optimisation problem
        prob = pg.problem(self)

        # instantiate algorithm
        algo = pg.ipopt()
        algo.set_numeric_option("tol", self.otol)
        algo = pg.algorithm(algo)
        algo.set_verbosity(1)

        # instantiate and evolve population
        pop = pg.population(prob, 1)
        pop = algo.evolve(pop)

        # extract solution
        self.zopt = pop.champion_x
        self.fitness(self.zopt)

        # compute controls
        self.controls = np.apply_along_axis(self.dynamics.control, 1,
                                            self.states)

        # redimensionalise problem
        self.dimensionalise()

        # redimensionalise records
        self.dimensionalise_record()
Beispiel #9
0
 def _setup_algorithm(self, parameters):
     alg = pg.ipopt()
     return alg
Beispiel #10
0
    def solve_direct(self, states, controls, T, homotopy, boundaries):

        # sanity
        assert states.shape[0] == controls.shape[0]
        assert states.shape[1] == self.state_dim
        assert controls.shape[1] == self.control_dim

        # system parameters
        params = self.params.values()

        # number of collocation nodes
        n = states.shape[0]

        # decision vector bounds
        @jit
        def get_bounds():
            zl = np.hstack((self.state_lb, self.control_lb))
            zl = np.tile(zl, n)
            zl = np.hstack(([0.0], zl))
            zu = np.hstack((self.state_ub, self.control_ub))
            zu = np.tile(zu, n)
            zu = np.hstack(([np.inf], zu))
            return zl, zu

        # decision vector maker
        @jit
        def flatten(states, controls, T):
            z = np.hstack((states, controls)).flatten()
            z = np.hstack(([T], z))
            return z

        # decsision vector translator
        @jit
        def unflatten(z):
            T = z[0]
            z = z[1:].reshape(n, self.state_dim + self.control_dim)
            states = z[:, :self.state_dim]
            controls = z[:, self.state_dim:]
            return states, controls, T

        # fitness vector
        print('Compiling fitness...')

        @jit
        def fitness(z):

            # translate decision vector
            states, controls, T = unflatten(z)

            # time grid
            n = states.shape[0]
            times = np.linspace(0, T, n)

            # objective
            L = vmap(lambda state, control: self.lagrangian(
                state, control, homotopy, *params))
            L = L(states, controls)
            J = np.trapz(L, dx=T / (n - 1))

            # Lagrangian state dynamics constraints, and boundary constraints
            # e0 = self.collocate_lagrangian(states, controls, times, costs, homotopy, *params)
            e1 = self.collocate_state(states, controls, times, *params)
            e2, e3 = boundaries(states[0, :], states[-1, :])
            e = np.hstack((e1.flatten(), e2, e3))**2

            # fitness vector
            return np.hstack((J, e))

        # z = flatten(states, controls, T)
        # fitness(z)

        # sparse Jacobian
        print('Compiling Jacobian and its sparsity...')
        gradient = jit(jacfwd(fitness))
        z = flatten(states, controls, T)
        sparse_id = np.vstack((np.nonzero(gradient(z)))).T
        sparse_gradient = jit(lambda z: gradient(z)[[*sparse_id.T]])
        gradient_sparsity = jit(lambda: sparse_id)
        print('Jacobian has {} elements.'.format(sparse_id.shape[0]))

        # assign PyGMO problem methods
        self.fitness = fitness
        self.gradient = sparse_gradient
        self.gradient_sparsity = gradient_sparsity
        self.get_bounds = get_bounds
        self.get_nobj = jit(lambda: 1)
        nec = fitness(z).shape[0] - 1
        self.get_nec = jit(lambda: nec)

        # plot before
        states, controls, T = unflatten(z)
        self.plot('../img/direct_before.png', states, dpi=1000)

        # solve NLP with IPOPT
        print('Solving...')
        prob = pg.problem(udp=self)
        algo = pg.ipopt()
        algo.set_integer_option('max_iter', 1000)
        algo = pg.algorithm(algo)
        algo.set_verbosity(1)
        pop = pg.population(prob=prob, size=0)
        pop.push_back(z)
        pop = algo.evolve(pop)

        # save and plot solution
        z = pop.champion_x
        np.save('decision.npy', z)
        states, controls, T = unflatten(z)
        self.plot('../img/direct_after.png', states, dpi=1000)
Beispiel #11
0
    def solve(self,
              alpha,
              Tlb=1,
              Tub=30,
              lb=1,
              atol=1e-10,
              rtol=1e-10,
              otol=1e-5,
              iter=200,
              dv=None,
              verbose=False):

        # set homotopy parameter
        self.alpha = alpha

        # set time bounds
        self.Tlb = Tlb
        self.Tub = Tub

        # set costate magnitude tolerances
        self.lb = lb

        # set tolerances
        self.atol = atol
        self.rtol = rtol

        # problem
        prob = pg.problem(self)
        prob.c_tol = otol

        # algorithm
        algo = pg.ipopt()
        algo.set_numeric_option("acceptable_tol", otol)
        algo.set_integer_option("max_iter", iter)
        algo = pg.algorithm(algo)
        algo.set_verbosity(1)

        # supplied population
        if dv is not None:
            if verbose:
                print("Testing supplied decision vector: {}".format(dv))
            pop = pg.population(prob, 0)
            pop.push_back(dv)
            pop = algo.evolve(pop)
            feas = prob.feasibility_x(pop.champion_x)
            if verbose:
                print("Supplied decision vector was {}.".format(
                    "succesfull" if feas else "unsuccesfull"))
            return pop.champion_x, feas

        # random population
        else:
            pop = pg.population(prob, 1)
            if verbose:
                print("Trying random decision vector {}.".format(
                    pop.champion_x))
            pop = algo.evolve(pop)
            if verbose:
                print("Optimised decision vector now {}".format(
                    pop.champion_x))
            feas = prob.feasibility_x(pop.champion_x)
            if verbose:
                print("Supplied decision vector was {}.".format(
                    "succesfull" if feas else "unsuccesfull"))
            return pop.champion_x, feas
Beispiel #12
0
            x[4] - x[5])**2 + x[3]**2 - x[2] + x[5] + x[2]**2 - x[1]
        ci2 = -(8 * x[5] * (x[5]**2 - x[4]) - 2 *
                (1 - x[5]) + x[4]**2 - x[3] + x[3]**2 - x[4])
        return [obj, ce1, ce2, ce3, ce4, ci1, ci2]


def _gradient(self, x):
    return pg.estimate_gradient_h(lambda x: self.fitness(x), x)


if __name__ == "__main__":
    my_minlp.gradient = _gradient
    # We need to reconstruct the problem as we changed its definition (adding the gradient)
    prob = pg.problem(my_minlp())
    prob.c_tol = [1e-8] * 6
    prob = pg.problem(my_minlp())
    print(prob)

    # We run 20 instances of the optimization in parallel via a default archipelago setup
    archi = pg.archipelago(n=20, algo=pg.ipopt(), prob=my_minlp(), pop_size=1)
    archi.evolve(2)
    archi.wait()
    # We get the best of the parallel runs
    a = archi.get_champions_f()
    a2 = sorted(archi.get_champions_f(), key=lambda x: x[0])[0]
    best_isl_idx = [(el == a2).all() for el in a].index(True)
    x_best = archi.get_champions_x()[best_isl_idx]
    f_best = archi.get_champions_f()[best_isl_idx]
    print("Best relaxed solution, x: {}".format(x_best))
    print("Best relaxed solution, f: {}".format(f_best))
Beispiel #13
0
    def pygmo(self, x0, bnds, options):
        class pygmo_objective_fcn:
            def __init__(self, obj_fcn, bnds):
                self.obj_fcn = obj_fcn
                self.bnds = bnds

            def fitness(self, x):
                return [self.obj_fcn(x)]

            def get_bounds(self):
                return self.bnds

            def gradient(self, x):
                return pygmo.estimate_gradient_h(lambda x: self.fitness(x), x)

        timer_start = timer()

        pop_size = int(np.max([35, 5 * (len(x0) + 1)]))
        if options['stop_criteria_type'] == 'Iteration Maximum':
            num_gen = int(np.ceil(options['stop_criteria_val'] / pop_size))
        elif options['stop_criteria_type'] == 'Maximum Time [min]':
            num_gen = int(np.ceil(1E20 / pop_size))

        prob = pygmo.problem(pygmo_objective_fcn(self.obj_fcn, tuple(bnds)))
        pop = pygmo.population(prob, pop_size)
        pop.push_back(x=x0)  # puts initial guess into the initial population

        # all coefficients/rules should be optimized if they're to be used
        if options['algorithm'] == 'pygmo_DE':
            #F = (0.107 - 0.141)/(1 + (num_gen/225)**7.75)
            F = 0.2
            CR = 0.8032 * np.exp(-1.165E-3 * num_gen)
            algo = pygmo.algorithm(pygmo.de(gen=num_gen, F=F, CR=CR,
                                            variant=6))
        elif options['algorithm'] == 'pygmo_SaDE':
            algo = pygmo.algorithm(pygmo.sade(gen=num_gen, variant=6))
        elif options['algorithm'] == 'pygmo_PSO':  # using generational version
            algo = pygmo.algorithm(pygmo.pso_gen(gen=num_gen))
        elif options['algorithm'] == 'pygmo_GWO':
            algo = pygmo.algorithm(pygmo.gwo(gen=num_gen))
        elif options['algorithm'] == 'pygmo_IPOPT':
            algo = pygmo.algorithm(pygmo.ipopt())

        pop = algo.evolve(pop)

        x = pop.champion_x

        obj_fcn, x, shock_output = self.Scaled_Fit_Fun(x, optimizing=False)

        msg = 'Optimization terminated successfully.'
        success = True

        res = {
            'x': x,
            'shock': shock_output,
            'fval': obj_fcn,
            'nfev': pop.problem.get_fevals(),
            'success': success,
            'message': msg,
            'time': timer() - timer_start
        }

        return res