Exemplo n.º 1
0
def initializeDataClient(client_id, stub, X, y):
    # logging.debug(f"To send {client_id} got {X.shape}, {y.shape}, \n {X[0:5, 0:5]}")
    data = functions_pb2.Data(x=serialize(X), y=serialize(y))
    res = stub.InitializeData(data)
    logging.info(
        msg=
        f"Node {client_id} initialized with #datapoints: {res.numeric_reply} from client {res.str_reply}"
    )
    return client_id
Exemplo n.º 2
0
    def Train(self, request, context):
        self._increase_global_counts()
        self.Xtheta = deserialize(request.model.xtheta)
        Q = int(request.q)
        lambduh = float(request.lambduh)
        logging.info(
            msg=
            f"Client {self.device_index} the dimension of Xtheta {self.Xtheta.shape}, Theta {self.theta.shape}"
        )
        # Isolate the H_-k from other datacenters for the same label space
        # Obtained in the last iteration
        Xtheta_from_other_DC = self.Xtheta - self.X @ self.theta  # Assuming label space is same
        for rounds in range(Q):
            logging.info("Starting local round ", rounds)
            # batch gradient descent for the time being

            # If NO partital gradient information from outside is used
            # grad = 1/len(device.X) * device.X.T @ (device.X @ device.theta - device.y)

            # If partital gradient information from outside is used
            grad = 1 / len(self.X) * self.X.T @ (
                (Xtheta_from_other_DC + self.X @ self.theta) -
                self.y) + lambduh * self.theta
            if self.decreasing_step:
                self.theta = self.theta - self.alpha / np.sqrt(
                    self.global_rounds_counter +
                    1) * grad  # decreasing dtep size
            else:
                self.theta = self.theta - self.alpha * grad
        self.Xtheta = self.X @ self.theta  # Update the value of the predicted y (probably unnecessary and not used)
        response_model = functions_pb2.Model(model=serialize(self.theta))
        return response_model
Exemplo n.º 3
0
    def Train(self, request, context):
        """
        HFL training. Does not need to maintain the Xw vector.
        :param request:
        :param context:
        :return:
        """
        self._increase_global_counts()
        Q = int(request.q)
        lambduh = float(request.lambduh)
        logging.info(
            msg=
            f"Client {self.device_index} the dimension of Theta {self.theta.shape}"
        )
        for rounds in range(Q):
            logging.info(msg=f"Starting local round {rounds}")
            # batch gradient descent for the time being

            # If NO partital gradient information from outside is used
            # grad = 1/len(device.X) * device.X.T @ (device.X @ device.theta - device.y)

            # If partital gradient information from outside is used
            grad = 1 / len(self.X) * self.X.T @ (
                (self.X @ self.theta) - self.y) + lambduh * self.theta
            if self.decreasing_step:
                self.theta = self.theta - self.alpha / np.sqrt(
                    self.global_rounds_counter +
                    1) * grad  # decreasing dtep size
            else:
                self.theta = self.theta - self.alpha * grad
        response_model = functions_pb2.Model(model=serialize(self.theta))
        return response_model
Exemplo n.º 4
0
def sendModel(client_id, stub, firstInitFlag, global_model):
    model = functions_pb2.Model(model=serialize(global_model))
    res = stub.UpdateLocalModels(model)
    xtheta = deserialize(res.xtheta)
    logging.info(msg=f"Client {res.id} sends back Xtheta {xtheta[0:10]} ")
    assert res.id == client_id
    return client_id, xtheta
Exemplo n.º 5
0
def initializeClient(i, stub, model):
    initialString = functions_pb2.InitialParams(index=float(i),
                                                dc_index=int(i),
                                                device_index=int(i),
                                                model=serialize(model),
                                                alpha=0.01,
                                                lambduh=0.01)
    # print(f"Sending {initialString} to client {i}")
    res = stub.InitializeParams(initialString)
    logging.debug(f"Node {i} initialized as client {i}, result: {res}")
    return i, res
Exemplo n.º 6
0
def trainFunc(client_id, stub, q, lambduh, xtheta, model=None):
    trainconfig = functions_pb2.TrainConfig()
    trainconfig.q = q
    trainconfig.lambduh = lambduh
    trainconfig.model.xtheta = serialize(xtheta)
    # trainconfig.model.model = serialize(model)

    # if xtheta:
    #     trainconfig.model.xtheta = serialize(xtheta)
    # if model:
    #     trainconfig.model.model = serialize(model)
    # if not xtheta and not model:
    #     raise Exception
    res = stub.Train(trainconfig)
    model = deserialize(res.model)
    logging.info(
        f"Server received model from Client {client_id} ==> {model.shape}")
    return client_id, model
Exemplo n.º 7
0
 def UpdateLocalModels(self, request, context):
     self.theta = deserialize(request.model)
     self.Xtheta = self.X @ self.theta
     response = functions_pb2.Model(xtheta=serialize(self.Xtheta),
                                    id=self.identifier)
     return response
Exemplo n.º 8
0
 def SendModel(self, request, context):
     response = functions_pb2.Model()
     response.model = serialize(self.theta)
     response.id = self.identifier
     return response