コード例 #1
0
def update_graph(k):
    Adj_k = Adj_seq[k]
    W_k = metropolis_hastings(Adj_k)
    agent.set_neighbors(
        in_neighbors=np.nonzero(Adj_k[agent_id, :])[0].tolist(),
        out_neighbors=np.nonzero(Adj_k[:, agent_id])[0].tolist())
    agent.set_weights(in_weights=W_k[agent_id, :].tolist())
コード例 #2
0
        def run(
                self, epochs=1, self_learning_epochs=5, review_epochs=1, review_intervals={0: 100},
                weights_computation_interval=100, test_interval=1000, batch_size=10, gamma_weight=100,
                verbose=False):
            self.accuracies = []
            # self learning
            self.self_learning(epochs=self_learning_epochs)
            self.eval_accuracy()
            self.change_weights(gamma_weight=100)

            review_changes = np.asarray(list(review_intervals.keys()))[::-1]
            Xs_len = len(self.X_shared)
            total_iters = epochs*Xs_len
            for ep in range(epochs):
                # collective learning
                for k in range(Xs_len//batch_size):
                    if verbose:
                        if self.agent.id == 0:
                            print("Remaining {}".format(total_iters-(ep*Xs_len+k*batch_size)), end="\r")
                    # produce collective proxy label
                    samples = self.X_shared[k*batch_size:(k+1)*batch_size]
                    proxy_labels = self.collective_prediction(samples, batch_size)
                    # train on proxy data
                    loss_value, grads = self.grad(tf.reshape(samples, (batch_size, 28, 28, 1)), proxy_labels)
                    self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables), self.stepsize)

                    current_interval = review_changes[np.argmax(review_changes <= k*batch_size)]
                    if (k*batch_size) % review_intervals[current_interval] == 0:
                        self.self_learning(epochs=review_epochs)

                    if (k*batch_size) % 100 == 0:
                        # Generate a common graph (everyone use the same seed)
                        Adj = binomial_random_graph(self.agent.communicator.size, p=0.5, seed=(k*batch_size))
                        W = metropolis_hastings(Adj)

                        # create local agent
                        self.agent.set_neighbors(
                            in_neighbors=np.nonzero(Adj[self.agent.id, :])[0].tolist(),
                            out_neighbors=np.nonzero(Adj[:, self.agent.id])[0].tolist())
                        self.agent.set_weights(in_weights=W[self.agent.id, :].tolist())

                    if (k*batch_size) % weights_computation_interval == 0:
                        self.eval_accuracy()
                        self.change_weights(gamma_weight = gamma_weight)

                    if (k*batch_size) % test_interval == 0:
                        self.test()
                        self.accuracies.append(float(self.test_accuracy.numpy()))

            if self.enable_log:
                return self.accuracies
コード例 #3
0
ファイル: launcher.py プロジェクト: YeTian-93/disropt
from disropt.agents import Agent
from disropt.algorithms.subgradient import BlockSubgradientMethod
from disropt.functions import QuadraticForm, Variable, SquaredNorm
from disropt.utils.utilities import is_pos_def
from disropt.constraints.projection_sets import Box
from disropt.utils.graph_constructor import binomial_random_graph, metropolis_hastings
from disropt.problems import Problem

# get MPI info
comm = MPI.COMM_WORLD
nproc = comm.Get_size()
local_rank = comm.Get_rank()

# Generate a common graph (everyone use the same seed)
Adj = binomial_random_graph(nproc, p=0.3, seed=1)
W = metropolis_hastings(Adj)

# reset local seed
np.random.seed(local_rank)

agent = Agent(in_neighbors=np.nonzero(Adj[local_rank, :])[0].tolist(),
              out_neighbors=np.nonzero(Adj[:, local_rank])[0].tolist(),
              in_weights=W[local_rank, :].tolist())

# variable dimension
n = 6

# generate a positive definite matrix
P = np.random.randn(n, n)
P = P.transpose() @ P
bias = np.random.randn(n, 1)