Example #1
0
    def create_problem(self, task_list):
        # prepare problem data (TODO spostare calcolo di task_positions in classe funzione di costo)
        self.task_list = task_list
        self.n_tasks = self.guidance.n_agents
        task_positions = np.array(
            [np.array(t.coordinates) for t in task_list.tasks])
        task_indices = [t.id for t in task_list.tasks]
        starting_position = self.guidance.current_pose.position[:-1]

        # create problem matrices
        c = self._generate_cost(task_positions, starting_position)
        A, b = self._generate_constraints(task_indices)

        # create problem object
        x = Variable(len(self.task_list.tasks))
        obj = c @ x
        constr = A.transpose() @ x == b
        problem = LinearProblem(objective_function=obj, constraints=constr)
        self.agent.problem = problem

        # set communicator label
        self.guidance.communicator.current_label = int(task_list.label)

        # create algorithm object
        self.algorithm = DistributedSimplex(
            self.agent, stop_iterations=self.stop_iterations)
Example #2
0
    def initialize_scenario(self,
                            robot_id: int,
                            prediction_horizon: int,
                            system_matrices: dict,
                            cost_matrices: dict,
                            coupling_constraints: dict,
                            local_constraints: dict = None):
        # save information
        self.prediction_horizon = prediction_horizon
        self.system_matrices = system_matrices
        self.coupling_constraints = coupling_constraints
        self.robot_id = robot_id

        # shorhands
        A = system_matrices['A']
        B = system_matrices['B']
        T = prediction_horizon
        E = local_constraints['x_matrix']
        f = local_constraints['x_vector']
        G = local_constraints['u_matrix']
        h = local_constraints['u_vector']
        Q = cost_matrices['state']
        R = cost_matrices['input']
        n, m = self._sizes = (A.shape[0], B.shape[1])

        # create optimization variables
        z = Variable((T + 1) * n + T * m)  # complete optimization variable
        self._x = np.vstack((np.eye((T + 1) * n), np.zeros(
            (T * m,
             (T + 1) * n)))) @ z  # state portion of optimization variable
        self._u = np.vstack((np.zeros(((T + 1) * n, T * m)), np.eye(
            T * m))) @ z  # input portion of optimization variable

        # initialize objective function and constraints
        self._obj_func = 0
        self._basic_constraints = []

        for t in range(T):
            # extract optimization variables corresponding to x(t), u(t), x(t+1)
            x_t = np.eye(n, (T + 1) * n, t * n).T @ self._x
            u_t = np.eye(m, T * m, t * m).T @ self._u
            x_tp = np.eye(n, (T + 1) * n, (t + 1) * n).T @ self._x

            # build local constraints
            self._basic_constraints.append(x_tp == A.T @ x_t +
                                           B.T @ u_t)  # dynamics
            self._basic_constraints.append(E.T @ x_t <= f)  # state
            self._basic_constraints.append(G.T @ u_t <= h)  # input

            # add terms to objective function
            self._obj_func += QuadraticForm(x_t, Q) + QuadraticForm(u_t, R)
Example #3
0
# Generate problem data
#####################

# points
points = np.zeros((dim, nsamp[0] + nsamp[1]))
points[:, 0:nsamp[0]] = np.random.multivariate_normal(mu[0], sigma[0],
                                                      nsamp[0]).transpose()
points[:, nsamp[0]:] = np.random.multivariate_normal(mu[1], sigma[1],
                                                     nsamp[1]).transpose()

# labels
labels = np.ones((sum(nsamp), 1))
labels[nsamp[0]:] = -labels[nsamp[0]:]

# cost function
z = Variable(dim + 1 + NN)
A = np.zeros((dim + 1 + NN, dim))
A[0:dim:, :] = np.eye(dim)  # w = A @ z
B = np.zeros((dim + 1 + NN, 1))
B[dim + 1:dim + NN + 1] = np.ones((NN, 1))  # xi_1 + ... + xi_N = B @ z
D = np.zeros((dim + 1 + NN, 1))
D[dim] = 1  # b = D @ z
E = np.zeros((dim + 1 + NN, 1))
E[dim + 1 + agent_id] = 1  # xi_i = E @ z

obj_func = (1 / 2) * (A @ z) @ (A @ z) + C * (B @ z)

# constraints
F = np.zeros((dim + 1 + NN, NN))
F[dim + 1:dim + NN + 1:, :] = np.eye(NN)
Example #4
0
from disropt.functions import QuadraticForm, Variable
from disropt.utils.graph_constructor import MPIgraph
from disropt.problems import Problem

# generate communication graph (everyone uses the same seed)
comm_graph = MPIgraph('random_binomial', 'metropolis')
agent_id, in_nbrs, out_nbrs, in_weights, _ = comm_graph.get_local_info()

# size of optimization variable
n = 5

# generate quadratic cost function
np.random.seed()
Q = np.random.randn(n, n)
Q = Q.transpose() @ Q
x = Variable(n)
func = QuadraticForm(x - np.random.randn(n, 1), Q)

# create Problem and Agent
agent = Agent(in_nbrs, out_nbrs, in_weights=in_weights)
agent.set_problem(Problem(func))

# run the algorithm
x0 = np.random.rand(n, 1)
algorithm = GradientTracking(agent, x0)
algorithm.run(iterations=1000, stepsize=0.01)

print("Agent {} - solution estimate: {}".format(
    agent_id,
    algorithm.get_result().flatten()))
Example #5
0
# Generate problem data
#####################

# points
points = np.zeros((dim, nsamp[0] + nsamp[1]))
points[:, 0:nsamp[0]] = np.random.multivariate_normal(mu[0], sigma[0],
                                                      nsamp[0]).transpose()
points[:, nsamp[0]:] = np.random.multivariate_normal(mu[1], sigma[1],
                                                     nsamp[1]).transpose()

# labels
labels = np.ones((sum(nsamp), 1))
labels[nsamp[0]:] = -labels[nsamp[0]:]

# cost function
z = Variable(dim + 1)
A = np.ones((dim + 1, 1))
A[-1] = 0
obj_func = (C / (2 * NN)) * SquaredNorm(A @ z)

for j in range(sum(nsamp)):
    e_j = np.zeros((sum(nsamp), 1))
    e_j[j] = 1
    A_j = np.vstack((points @ e_j, 1))
    obj_func += Logistic(-labels[j] * A_j @ z)

#####################
# Distributed algorithms
#####################

# local agent and problem
Example #6
0
EE_min = 1  # kWh
EE_max = rnd(8, 16)  # kWh
EE_init = rnd(0.2, 0.5) * EE_max  # kWh
EE_ref = rnd(0.55, 0.8) * EE_max  # kWh
zeta_u = 1 - rnd(0.015, 0.075)  # pure number

#####################
# Generate problem object
#####################

# normalize unit measures
DeltaT = DeltaT / 60  # minutes  -> hours
CC_u = CC_u / 1e3  # Euro/MWh -> Euro/KWh

# optimization variables
z = Variable(2 * TT +
             1)  # stack of e (state of charge) and u (input charging power)
e = np.vstack((np.eye(TT + 1), np.zeros(
    (TT, TT + 1)))) @ z  # T+1 components (from 0 to T)
u = np.vstack((np.zeros(
    (TT + 1, TT)), np.eye(TT))) @ z  # T components (from 0 to T-1)

# objective function
obj_func = PP * (CC_u @ u)

# coupling function
coupling_func = PP * u - (PP_max / NN)

# local constraints
e_0 = np.zeros((TT + 1, 1))
e_T = np.zeros((TT + 1, 1))
e_0[0] = 1
Example #7
0
    in_weights=W[local_rank, :].tolist())

# local variable dimension - random in [2,5]
n_i = np.random.randint(2, 6)

# number of coupling constraints
S = 3

# generate a positive definite matrix
P = np.random.randn(n_i, n_i)
while not is_pos_def(P):
    P = np.random.randn(n_i, n_i)
bias = np.random.randn(n_i, 1)

# declare a variable
x = Variable(n_i)

# define the local objective function
fn = QuadraticForm(x - bias, P)

# define the local constraint set
constr = [x>=-2, x<=2]

# define the local contribution to the coupling constraints
A = np.random.randn(S, n_i)
coupling_fn = A.transpose() @ x

# create local problem and assign to agent
pb = ConstraintCoupledProblem(objective_function=fn,
                              constraints=constr,
                              coupling_function=coupling_fn)
Example #8
0
def generate_LP(n_var: int,
                n_constr: int,
                radius: float,
                direction: str = 'min',
                constr_form: str = 'ineq'):
    """Generate a feasible and not unbounded Linear Program and return problem data (cost, constraints, solution)
    TODO add reference
    
    Args:
        n_var (int): number of optimization variables
        n_constr (int): number of constraints
        radius (float): size of feasible set (inequality form), size of dual feasible set (equality form)
        direction (str): optimization direction - either 'max' (for maximization) or 'min' (for minimization, default)
        constr_form (str): form of constraints - either 'ineq' (for inequality: Ax <= b, default) or 'eq' (for standard form: Ax = b, x >= 0)

    Returns:
        c (np.ndarray): cost vector
        A (np.ndarray): constraint matrix
        b (np.ndarray): constraint vector (right-hand side)
        solution (np.ndarray): optimal solution of problem
    """

    size_1 = n_constr
    size_2 = n_var
    x = Variable(n_var)

    # swap dimensions if in dual form (equality constraints)
    if constr_form == 'eq':
        size_1, size_2 = size_2, size_1

    count = 0

    while True:
        count += 1

        # generate a problem in the form min_y c'y s.t. Ay <= b
        A_gen = np.random.randn(size_1, size_2)
        b_gen = np.random.randn(size_1, 1)
        c_gen = A_gen.transpose() @ np.random.randn(size_1, 1)

        # prepare problem data
        if constr_form == 'ineq':
            # primal problem
            A = A_gen
            b = b_gen
            c = c_gen
            constr = A_gen.transpose() @ x <= b_gen
        else:
            # dual problem: min_x b'x s.t. A'x = -c, x >= 0
            A = A_gen.transpose()
            b = -c_gen
            c = b_gen
            constr = [A_gen @ x == -c_gen, x >= 0]

        # form optimization problem object
        if direction == 'min':
            obj = c @ x
        else:
            obj = -c @ x

        prob = LinearProblem(objective_function=obj, constraints=constr)

        # solve problem to check whether an optimal solution exists
        try:
            solution = prob.solve()
            break
        except:
            continue

    return c, A, b, solution
Example #9
0
# get MPI info
NN = MPI.COMM_WORLD.Get_size()
agent_id = MPI.COMM_WORLD.Get_rank()

# Generate a common graph (everyone uses the same seed)
Adj = binomial_random_graph(NN, p=0.03, seed=1)

#####################
# Problem parameters
#####################
np.random.seed(10*agent_id)

# linear objective function
dim = 2
z = Variable(dim)
c = np.ones([dim,1])
obj_func = c @ z

# constraints are circles of the form (z-p)^\top (z-p) <= 1
# equivalently z^\top z - 2(A p)^\top z + p^\top A p <= 1
I = np.eye(dim)
p = np.random.rand(dim,1)
r = 1 # unitary radius

constr = []
ff = QuadraticForm(z,I,- 2*(I @ p),(p.transpose() @ I @ p) - r**2)
constr.append(ff<= 0)

#####################
# Distributed algorithms
Example #10
0
# number of columns for each processor
k = 2

# generate a feasible optimization problem of size k * nproc
c_glob, A_glob, b_glob, x_glob = generate_LP(k * nproc, n_constr, 50, constr_form='eq')

# extract the columns assigned to this agent
local_indices = list(np.arange(k*local_rank, k*(local_rank+1)))

c_loc = c_glob[local_indices, :]
A_loc = A_glob[:, local_indices]
b_loc = b_glob

# define the local problem data
x = Variable(k)
obj = c_loc @ x
constr = A_loc.transpose() @ x == b_loc
problem = LinearProblem(objective_function=obj, constraints=constr)

# create agent
agent = Agent(in_neighbors=np.nonzero(Adj[local_rank, :])[0].tolist(),
        out_neighbors=np.nonzero(Adj[:, local_rank])[0].tolist())
agent.problem = problem

# instantiate the algorithm
algorithm = DistributedSimplex(agent, enable_log=True, problem_size=nproc*k,
    local_indices=local_indices, stop_iterations=2*graph_diam+1)

# run the algorithm
x_sequence, J_sequence = algorithm.run(iterations=100, verbose=True)
Example #11
0
np.random.seed()

agent = Agent(in_neighbors=np.nonzero(Adj[local_rank, :])[0].tolist(),
              out_neighbors=np.nonzero(Adj[:, local_rank])[0].tolist(),
              in_weights=W[local_rank, :].tolist())

# variable dimension
d = 4

# generate a positive definite matrix
P = np.random.randn(d, d)
while not is_pos_def(P):
    P = np.random.randn(d, d)
bias = np.random.randn(d, 1)
# declare a variable
x = Variable(d)

# define the local objective function
fun = QuadraticForm(x - bias, P)

# local problem
pb = Problem(fun)
agent.set_problem(pb)

# instantiate the algorithms
initial_condition = np.random.rand(d, 1)

algorithm = GradientTracking(agent=agent,
                             initial_condition=initial_condition,
                             enable_log=True)