Пример #1
0
    def cl_fit(self, T, X, Y, params):
        self.w = T
        # Optimization using gradient descent
        self.costs_ = []
        for i in range(self.epochs):
            # calculate gradients
            grads, cost = self.cl_propagate(X, Y, p=params)
            # get gradients
            dw = grads["dw"]
            db = grads["db"]
            # update rule
            self.w = self.w - (self.lr * dw)
            self.b_ = self.b_ - (self.lr * db)
            if i % 100 == 0:
                self.costs_.append(cost)
                if self.debug:
                    print(f"Cost after {i} epochs: {cost}")

        # Predictions on train data
        Y_pred = self.predict(X)
        log(
            'success',
            f"{params['i']}:: CL :: Train accuracy: {100 - np.mean(np.abs(Y_pred - Y)) * 100} %"
        )
        return self.w
Пример #2
0
    def fit(self, X_, Y, node_name="", optimizer="", data=None):
        """Fit the model to data matrix X and target(s) y.
        Parameters
        ----------
        X : array-like or sparse matrix, shape (n_samples, n_features)
            The input data.
        Y : array-like, shape (n_samples,) or (n_samples, n_outputs)
            The target values (class labels in classification, real numbers in
            regression).
        optimizer: optimizer type
        data: cl parameters
        Returns
        -------
        self : returns a trained LR model.
        """
        X = np.hstack((np.ones((X_.shape[0], 1)), X_.copy()))
        m = X.shape[0]
        n = X.shape[1]
        self._init_params(n)
        self._optimize(X, Y, optimizer, data)
        Y_pred = self.predict(X_)
        log("Train accuracy: {} %".format(100 -
                                          np.mean(np.abs(Y_pred - Y)) * 100))

        return self
Пример #3
0
 def process_tuple(self, tuple_):
     if len(tuple_) != 2:
         log('exception', f"Enter a config file or a tuple in the form of (Number of nodes, INIT_PORT)")
     n, port = tuple_
     self.config['timeout_ms'] = SOCK_TIMEOUT
     self.config['nodes'] = []
     for i in range(n):
         self.config['nodes'].append({'name': f"w{i}", 'host': '', 'port': port + i})
Пример #4
0
def mp_logistic_regression(node: Node):
    """
    Training a Logistic Regression Neural Network model in a P2P architecture
    :param node: Node
    :return: None
    """

    df = node.ldata

    # Prepare and split data
    X = df.iloc[:, :-1].values
    y = df.iloc[:, 60].values
    y = np.where(y == 'R', 0, 1)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
    X_train = X_train.reshape(X_train.shape[0], -1).T
    X_test = X_test.reshape(X_test.shape[0], -1).T

    # Training the solidarity model
    solitary_model = LogisticRegressionNN()
    solitary_model.fit(X_train, y_train)
    accuracy = solitary_model.metrics(X_test, y_test)
    log("success", f"{node.pname}: Solidarity model {accuracy}")

    # Initiate node for network training
    node.set_model(solitary_model)
    node.alpha = 0.5
    stop_conditin = 10

    # Model smoothing over network
    for i in range(stop_conditin):
        # Request a model from a random peer
        peer = node.get_random_peer()
        req = request_model(node)
        node.send(peer, req)
        res = node.wait_response(peer, RESPONSE_MODEL)
        peer_model = res['payload']['model']
        if peer_model is not None:
            node.update_models(peer["name"])
            # node.
        else:
            log(f'{node.pname}: {node.peers[i].get("Name", i)} has no model!')
            continue

        # Ensemble the current node model with the peer model
        ens_classifier = node.model.ensemble(peer_classifier)

        # Retrain and recalculate the accuracy of the ensemble model using local data
        ens_classifier.fit(X_train, y_train)
        node.model = ens_classifier
        accuracy = ens_classifier.metrics(X_test, y_test, metric='accuracy')
        log(
            "success",
            f"{node.pname}: P[{node.peers[i].get('Name', i)}] Ensemble model {accuracy}"
        )

        log('exception', f"END Range({i}) +++++++")

    log('success', f"{node.pname}: Final model accuracy: {accuracy}")
Пример #5
0
def hello(node):
    """
    Each node of the PeerNet sends a hello request to one random peer from its
    peers list, and wait for a hello response from that peer.
    :type node: Node
    :rtype: None
    """
    peer = node.get_random_peer()
    if peer is not None:
        req = request_hello(node)
        node.send(peer, req)
        res = node.wait_response(peer, RESPONSE_HELLO)
        # TODO look for future objects to handle waiting
        log(f"{node.pname}: Received -> {res['payload']}")
    else:
        log('error', "No peer selected !")
Пример #6
0
    def fit(self, X, Y):
        """Fit the model to data matrix X and target(s) y.
        Parameters
        ----------
        X : array-like or sparse matrix, shape (n_samples, n_features)
            The input data.
        Y : array-like, shape (n_samples,) or (n_samples, n_outputs)
            The target values (class labels in classification, real numbers in
            regression).
        Returns
        -------
        self : returns a trained LR model.
        """
        self._init_params(X.shape[0])
        self._optimize(X, Y)
        Y_pred = self.predict(X)
        log(
            'success',
            "Train accuracy: {} %".format(100 -
                                          np.mean(np.abs(Y_pred - Y)) * 100))

        return self
Пример #7
0
def lp_knn(node: Node, k: int):
    """
    Training a solidarity model
    :param node: Node
    :param k: int
    :return: None
    """
    df = node.ldata

    # Prepare and split data
    X = df.iloc[:, :-1].values
    y = df.iloc[:, 4].values
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)

    # Training a solidarity model: KNN classifier
    sol_classifier = KNeighborsClassifier(n_neighbors=k)
    sol_classifier.fit(X_train, y_train)

    # Measure the accuracy of the model
    y_pred = sol_classifier.predict(X_test)
    accuracy = accuracy_score(y_test, y_pred)

    # Set node model
    node.model = sol_classifier
    log("success", f"{node.pname}: Solidarity model accuracy : {accuracy}")

    for i in range(len(node.peers)):
        log(f'{node.pname}: Range({i}): {node.peers[i].get("Name", i)}')

        # Request a model from a random peer
        peer = node.get_random_peer()
        req = request_model(node)
        node.send(peer, req)
        res = node.wait_response(peer, RESPONSE_MODEL)
        peer_classifier = res['payload']['model']
        if peer_classifier is None:
            log(f'{node.pname}: {node.peers[i].get("Name", i)} has no model!')
            continue

        # Ensemble the current node model with the peer model
        # ens_classifier = EnsembleVoteClassifier(clfs=[node.model, peer_classifier], weights=[2, 1], voting='hard')
        ens_classifier = VotingClassifier(estimators=[('model', node.model),
                                                      ('peer', peer_classifier)
                                                      ],
                                          weights=[1, 1],
                                          voting='soft')

        # Train and calculate the accuracy of the ensemble model using local data
        ens_classifier.fit(X_train, y_train)
        y_pred = ens_classifier.predict(X_test)
        node.model = ens_classifier
        accuracy = accuracy_score(y_test, y_pred)

        log(
            "success",
            f"{node.pname}: P[{node.peers[i].get('Name', i)}] Ensemble model accuracy : {accuracy}"
        )
        log('exception', f"END Range({i}) +++++++")
    log('success', f"{node.pname}: Final model accuracy: {accuracy}")
Пример #8
0
def mprob_perceptron(node: Node):
    """
    Training a perceptron model in a P2P architecture
    :param node: Node
    :return: None
    """

    df = node.ldata

    # Prepare and split data
    df.drop(df.columns[[-1, 0]], axis=1, inplace=True)
    featureMeans = list(df.columns[1:11])
    df.diagnosis = df.diagnosis.map({'M': -1, 'B': 1})
    X = df.loc[:, featureMeans].values.astype(float)
    y = df.loc[:, 'diagnosis'].values
    # Split data to train and test sets
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)

    # Training the solitary model
    sol_classifier = Perceptron(eta=0.01, n_iter=100, random_state=0)
    sol_classifier.fit(X_train, y_train)
    accuracy = sol_classifier.metrics(X_test, y_test, metric='accuracy')

    # Set node model
    node.model = sol_classifier
    log("success", f"{node.pname}: Solidarity model {accuracy}")

    for i in range(len(node.peers)):

        log(f'{node.pname}: Range({i}): {node.peers[i].get("Name", i)}')

        # Request a model from a random peer
        peer = node.get_random_peer()
        req = request_model(node)
        node.send(peer, req)
        res = node.wait_response(peer, RESPONSE_MODEL)
        peer_classifier = res['payload']['model']
        if peer_classifier is None:
            log(f'{node.pname}: {node.peers[i].get("Name", i)} has no model!')
            continue

        # Ensemble the current node model with the peer model
        ens_classifier = node.model.ensemble(peer_classifier)

        # Retrain and recalculate the accuracy of the ensemble model using local data
        ens_classifier.fit(X_train, y_train)
        node.model = ens_classifier
        accuracy = ens_classifier.metrics(X_test, y_test, metric='accuracy')
        log(
            "success",
            f"{node.pname}: P[{node.peers[i].get('Name', i)}] Ensemble model {accuracy}"
        )

        log('exception', f"END Range({i}) +++++++")

    log('success', f"{node.pname}: Final model accuracy: {accuracy}")