Esempio n. 1
0
for idx in np.arange(F.shape[1]):
    constr.append(F[:, idx][:, None] @ z >= 0)

for j in range(sum(nsamp)):
    constr.append(
        float(labels[j]) * (points[:, j].reshape(2, 1) @ (A @ z) + D @ z) >=
        1 - E @ z)  # j-th point

#####################
# Distributed algorithms
#####################

# local agent and problem
agent = Agent(in_neighbors=np.nonzero(Adj[agent_id, :])[0].tolist(),
              out_neighbors=np.nonzero(Adj[:, agent_id])[0].tolist())
pb = Problem(obj_func, constr)
agent.set_problem(pb)
# instantiate the algorithm
constrcons = ConstraintsConsensus(agent=agent, enable_log=True)

n_iter = NN * 3

# run the algorithm
constrcons_seq = constrcons.run(iterations=n_iter, verbose=True)

# print results
constrcons_x = constrcons.get_result()

print("Agent {}: {}".format(agent_id,
                            constrcons_x.flatten()))  # save information
Esempio n. 2
0
# generate a positive definite matrix
P = np.random.randn(n, n)
P = P.transpose() @ P
bias = np.random.randn(n, 1)
# declare a variable
x = Variable(n)

# define the local objective function
fn = QuadraticForm(x - bias, P)

# define a (common) constraint set
constr = [x <= 1, x >= -1]

# local problem
pb = Problem(fn, constr)
agent.set_problem(pb)

# instantiate the algorithms
initial_condition = np.random.rand(n, 1)

algorithm = BlockSubgradientMethod(agent=agent,
                                   initial_condition=initial_condition,
                                   enable_log=True)


def step_gen(k):  # define a stepsize generator
    return 0.1 / np.sqrt(k + 1)


# run the algorithm
Esempio n. 3
0
from disropt.functions import QuadraticForm, Variable
from disropt.utils.graph_constructor import MPIgraph
from disropt.problems import Problem

# generate communication graph (everyone uses the same seed)
comm_graph = MPIgraph('random_binomial', 'metropolis')
agent_id, in_nbrs, out_nbrs, in_weights, _ = comm_graph.get_local_info()

# size of optimization variable
n = 5

# generate quadratic cost function
np.random.seed()
Q = np.random.randn(n, n)
Q = Q.transpose() @ Q
x = Variable(n)
func = QuadraticForm(x - np.random.randn(n, 1), Q)

# create Problem and Agent
agent = Agent(in_nbrs, out_nbrs, in_weights=in_weights)
agent.set_problem(Problem(func))

# run the algorithm
x0 = np.random.rand(n, 1)
algorithm = GradientTracking(agent, x0)
algorithm.run(iterations=1000, stepsize=0.01)

print("Agent {} - solution estimate: {}".format(
    agent_id,
    algorithm.get_result().flatten()))
Esempio n. 4
0
    with open('agent_{}_dual_sequence.pkl'.format(i), 'rb') as input:
        lambda_sequence[i] = pickle.load(input)
    x_sequence[i] = np.load("agent_{}_primal_sequence.npy".format(i))
    z_sequence[i] = np.load("agent_{}_auxiliary_primal_sequence.npy".format(i))
    with open('agent_{}_function.pkl'.format(i), 'rb') as input:
        local_obj_function[i] = pickle.load(input)
with open('constraints.pkl', 'rb') as input:
    constr = pickle.load(input)
iters = x_sequence[0].shape[0]

# solve centralized problem
global_obj_func = 0
for i in range(N):
    global_obj_func += local_obj_function[i]

global_pb = Problem(global_obj_func, constr)
x_centr = global_pb.solve()
cost_centr = global_obj_func.eval(x_centr)
x_centr = x_centr.flatten()

# compute cost errors
cost_err = np.zeros((N, iters)) - cost_centr

for i in range(N):
    for t in range(iters):
        # add i-th cost
        cost_err[i, t] += local_obj_function[i].eval(x_sequence[i][t, :])

# plot maximum cost error
plt.figure()
plt.title('Maximum cost error (among agents)')
Esempio n. 5
0
for j in range(sum(nsamp)):
    e_j = np.zeros((sum(nsamp), 1))
    e_j[j] = 1
    A_j = np.vstack((points @ e_j, 1))
    obj_func += Logistic(-labels[j] * A_j @ z)

#####################
# Distributed algorithms
#####################

# local agent and problem
agent = Agent(in_neighbors=np.nonzero(Adj[agent_id, :])[0].tolist(),
              out_neighbors=np.nonzero(Adj[:, agent_id])[0].tolist(),
              in_weights=W[agent_id, :].tolist())
pb = Problem(obj_func)
agent.set_problem(pb)

# instantiate the algorithms
x0 = 5 * np.random.rand(dim + 1, 1)

subgr = SubgradientMethod(agent=agent, initial_condition=x0, enable_log=True)

gradtr = GradientTracking(agent=agent, initial_condition=x0, enable_log=True)


def step_gen(k):
    return 1 / ((k + 1)**0.6)


constant_stepsize = 0.001
Esempio n. 6
0
distance = np.linalg.norm(x_true - c, ord=2)

# declare a variable
x = Variable(n)

# define the local objective function
objective = x @ x

# local constraint
upper_bound = (x - c) @ (x - c) <= (distance**2 + 0.0001 * np.random.rand())
lower_bound = (x - c) @ (x - c) >= (distance**2 - 0.0001 * np.random.rand())
constr = SquaredNorm(x - c) == distance**2

constraints = [upper_bound, lower_bound]
# define local problem
pb = Problem(objective_function=objective, constraints=constraints)
agent.set_problem(pb)

####
x0 = np.random.randn(n, 1)
algorithm = ASYMM(agent=agent,
                  graph_diameter=graph_diameter,
                  initial_condition=x0,
                  enable_log=True)

timestamp_sequence_awake, timestamp_sequence_sleep, sequence = algorithm.run(
    running_time=5.0)

# print solution
print(x_true)
print("Agent {}: {}".format(agent.id, algorithm.get_result()))
Esempio n. 7
0
# load agent data
seq_subgr = np.zeros((NN, iters, size))
seq_gradtr = np.zeros((NN, iters, size))
local_function = {}
for i in range(NN):
    seq_subgr[i, :, :] = np.load("agent_{}_seq_subgr.npy".format(i))
    seq_gradtr[i, :, :] = np.load("agent_{}_seq_gradtr.npy".format(i))
    with open('agent_{}_func.pkl'.format(i), 'rb') as inp:
        local_function[i] = pickle.load(inp)

# solve centralized problem
global_obj_func = 0
for i in range(NN):
    global_obj_func += local_function[i]

global_pb = Problem(global_obj_func)
x_centr = global_pb.solve()
cost_centr = global_obj_func.eval(x_centr)
x_centr = x_centr.flatten()

# compute cost errors
cost_err_subgr = np.zeros((NN, iters))
cost_err_gradtr = np.zeros((NN, iters))

for i in range(NN):
    for t in range(iters):
        # first compute global function value at local point
        cost_ii_tt_subgr = 0
        cost_ii_tt_gradtr = 0
        for j in range(NN):
            cost_ii_tt_subgr += local_function[j].eval(seq_subgr[i,
Esempio n. 8
0
# load agent data
sequence = np.zeros((NN, iters, size))
local_constr = {}
for i in range(NN):
    sequence[i, :, :] = np.load("agent_{}_seq.npy".format(i),
                                allow_pickle=True).reshape((iters, size))
    with open('agent_{}_constr.pkl'.format(i), 'rb') as inp:
        local_constr[i] = pickle.load(inp)
with open('objective_function.pkl', 'rb') as inp:
    obj_func = pickle.load(inp)

# solve centralized problem
global_constr = []
for i in range(NN):
    global_constr.extend(local_constr[i])
global_pb = Problem(obj_func, global_constr)
x_centr = global_pb.solve()
cost_centr = obj_func.eval(x_centr)

# compute cost errors
cost_err = np.zeros((NN, iters))

for i in range(NN):
    for t in range(iters):
        cost_err[i, t] = abs(
            obj_func.eval(sequence[i, t, :].reshape((size, 1))) - cost_centr)

# compute max violation
vio_err = np.zeros((NN, iters))
for i in range(NN):
    for t in range(iters):