Exemple #1
0
    def validation_croisee(self,
                           x_tab,
                           t_tab,
                           k=10,
                           est_ech_poids=False,
                           *args):
        # Liste des lambda à explorer
        lamb_min = 0.000000001
        lamb_max = 2.
        liste_lamb = np.logspace(np.log(lamb_min),
                                 np.log(lamb_max),
                                 num=25,
                                 base=np.e)

        nb_donnees = len(x_tab)
        # 20 % des donnees dans D_valid et 80% des donnees dans D_ent
        nb_D_valid = int(np.floor(0.20 * nb_donnees))

        liste_erreur = np.zeros((len(liste_lamb)))
        for i in tqdm(range(len(liste_lamb))):
            self.modele = skPerceptron(penalty='l2',
                                       alpha=liste_lamb[i],
                                       max_iter=self.max_iter,
                                       tol=self.tol,
                                       shuffle=False)
            for j in range(k):
                # Masque de vrai ou de faux pour déterminer
                # les ensembles D_valid et D_ent
                liste_ind = np.ones(nb_donnees, dtype=bool)
                liste_ind[0:nb_D_valid] = 0
                np.random.shuffle(liste_ind)
                # D_valid correspond a faux
                # Division de D en deux groupes formes
                # aleatoirement : D_ent et D_valid
                x_entr = x_tab[liste_ind]
                x_valid = x_tab[np.invert(liste_ind)]
                t_entr = t_tab[liste_ind]
                t_valid = t_tab[np.invert(liste_ind)]
                # Entrainement sur x_ent et t_ent
                self.entrainement(x_entr, t_entr, est_ech_poids, args[0])
                pred_valid = self.prediction(x_valid)
                liste_erreur[i] += self.erreur(t_valid, pred_valid)
            # Moyenne des erreurs pour un lamb
            liste_erreur[i] /= k

        meilleur_lambda = liste_lamb[np.unravel_index(np.argmin(liste_erreur),
                                                      liste_erreur.shape)[0]]
        self.modele = skPerceptron(penalty='l2',
                                   alpha=meilleur_lambda,
                                   max_iter=self.max_iter,
                                   tol=self.tol,
                                   shuffle=False)
        self.entrainement(x_tab, t_tab, est_ech_poids, args[0])
Exemple #2
0
 def __init__(self, max_iter=1000, tol=1e-3):
     self.max_iter = max_iter
     self.tol = tol
     self.modele = skPerceptron(penalty='l2',
                                max_iter=max_iter,
                                tol=tol,
                                shuffle=False)
Exemple #3
0
def train_classifier(data, probs):
    # classifier = Perceptron(data.X.shape[1], epochs=30)
    # classifier = KNeighborsClassifier(n_neighbors=10)
    classifier = skPerceptron(max_iter=1000, tol=1e-4)
    # classifier = LogisticRegression()
    # print(data.X_train.flags)
    # classifier = SVC()
    # to_fit = np.ascontiguousarray(data.X_train)
    classifier.fit(data.X_train, data.Y_train, sample_weight=probs)
    # classifier.fit(data.X_train, data.Y_train)

    # classifier.set_coef_intercept()
    # classifier.fit(data.X_train, data.Y_train)

    # print('Accuracy:', classifier.score(data.X_test, data.Y_test))
    return classifier
Exemple #4
0
def run():
    iris = load_iris()

    X = iris.data[:, 2:4]  # petal length, petal width
    y = (iris.target == 0).astype(np.int)  # iris-setosa

    percept = Perceptron(learning_rate=1)
    w, misclassified_, epochs, w_d = percept.train(X, y)

    score = percept.score(X, y, w)
    print('Our Perceptron Score:', score)

    clf = skPerceptron()
    clf.fit(X, y)

    score = clf.score(X, y)
    print('SKLearn Perceptron Score:', score)

    plotResults(X, y, w_d)
                    -1]  # истинные значения пола (мужчина/женщина)

X_test = data.iloc[int(len(data) * 0.7):, :-1]  # матрица объекты-признаки
y_test = data.iloc[int(len(data) * 0.7):,
                   -1]  # истинные значения пола (мужчина/женщина)

# Натренируем наш перцептрон и перцептрон из `sklearn` на этих данных:

# In[40]:

RANDOM_SEED = 42

perceptron = Perceptron()
perceptron.fit(X_train.values, y_train.values)  # обучение нейрона

sk_perceptron = skPerceptron(random_state=RANDOM_SEED)
sk_perceptron.fit(X_train.values, y_train.values)

# Сравним доли правильных ответов (на тестовых данных):

# In[49]:

print(
    'Точность (доля правильных ответов, из 100%) нашего перцептрона: {:.3f} %'.
    format(
        accuracy_score(y_test.values, perceptron.forward_pass(X_test.values)) *
        100))
print('Точность (доля правильных ответов) перцептрона из sklearn: {:.3f} %'.
      format(
          accuracy_score(y_test.values, sk_perceptron.predict(X_test.values)) *
          100))
Exemple #6
0
    y_ = -(x_points * perceptron.w[0] + perceptron.b) / perceptron.w[1]
    plt.plot(x_points, y_)

    plt.plot(X_train[:50, 0], X_train[:50, 1], 'bo', label='sepal length')
    plt.plot(X_train[50:, 0], X_train[50:, 1], 'ro', label='sepal width')
    plt.xlabel('sepal length')
    plt.ylabel('sepal width')
    plt.show()


if __name__ == '__main__':
    iris = load_iris()
    df = pd.DataFrame(iris.data, columns=iris.feature_names)
    df['label'] = iris.target
    df.columns = [
        'sepal length', 'sepal width', 'petal length', 'petal width', 'label'
    ]
    data, X, y = preprocess(df)
    # train(data, X, y)
    model = skPerceptron(fit_intercept=False, max_iter=1000, shuffle=False)
    model.fit(X, y)
    # 可视化
    x_points = np.linspace(4, 7, 10)
    y = -(model.coef_[0][0] * x_points + model.intercept_) / model.coef_[0][1]
    plt.plot(x_points, y)
    plt.plot(X[:50, 0], X[:50, 1], 'bo', label='0')
    plt.plot(X[50:, 0], X[50:, 1], 'bo', color='green', label='1')
    plt.xlabel('sepal length')
    plt.ylabel('sepal width')
    plt.show()
Exemple #7
0
        y = np.array(y).reshape(-1, 1)
        delta_w= learning_rate * (X.T @ (y_pred - y) / n)
        delta_b= np.mean(y_pred - y)
        self.W -= delta_w
        self.b -= delta_b
    def fit(self,X,y,num_iterac=10000):
        self.W=np.zeros((X.shape[1],1))
        self.b=0
        losses=[]
        for i in range(num_iterac):
            y_pred=self.forward_pass(X)
            losses.append(loss(y_pred,y))
            self.backward_pass(X, y, y_pred)
        return losses
data=pd.read_csv('voice.csv')
data['label'] = data['label'].apply(lambda x: 1 if x == 'male' else 0)
data.head()
data = data.sample(frac=1)
X_train = data.iloc[:int(len(data)*0.7), :-1]
y_train = data.iloc[:int(len(data)*0.7), -1]
X_test = data.iloc[int(len(data)*0.7):, :-1]
y_test = data.iloc[int(len(data)*0.7):, -1]
sk_perceptron=skPerceptron(random_state=42)
sk_perceptron.fit(X_train,y_train)
perceptron=Perceptron()
perceptron.fit(X_train.values,y_train.values)
print('Точность (доля правильных ответов, из 100%) моего перцептрона: {:.1f} %'
      .format(accuracy_score(y_test, perceptron.forward_pass(X_test)) * 100))
print('Точность (доля правильных ответов) перцептрона из sklearn: {:.1f} %'
      .format(accuracy_score(y_test, sk_perceptron.predict(X_test)) * 100))