Exemplo n.º 1
0
X,y = make_moons(n_samples=100, noise=0.25, random_state=3)

X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)

mlp = MLPClassifier(solver='lbfgs', random_state=0, hidden_layer_sizes=[10,10]).fit(X_train, y_train)
plt.figure("[10,10]")
dsets.plot_2d_separator(mlp, X_train, fill=True, alpha=.3)
dsets.discrete_scatter(X_train[:,0], X_train[:,1], y_train)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")

print("hidden_layer_sizes : [10] : 1 hidden avec 10 nœuds \n \t \t [10,10] : 2 hidden 10 nœuds chacune")

mlp = MLPClassifier(solver='lbfgs', random_state=0, hidden_layer_sizes=[10,10,10]).fit(X_train, y_train)
plt.figure("{}".format(mlp.__getattribute__("hidden_layer_sizes"    )))
dsets.plot_2d_separator(mlp, X_train, fill=True, alpha=.3)
dsets.discrete_scatter(X_train[:,0], X_train[:,1], y_train)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")


mlp = MLPClassifier(solver='lbfgs', activation='tanh',random_state=0, hidden_layer_sizes=[10,10,10]).fit(X_train, y_train)
plt.figure("{}, {}".format(mlp.__getattribute__("hidden_layer_sizes"), mlp.__getattribute__("activation")))
dsets.plot_2d_separator(mlp, X_train, fill=True, alpha=.3)
dsets.discrete_scatter(X_train[:,0], X_train[:,1], y_train)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")


fig, axes = plt.subplots(2,4,figsize=(20,8)) 
Exemplo n.º 2
0
#Accuracy on the test set: 0.881

#### On doit rescaler les données pour satisfaire les exigeances du MLP (moyenne à 0  et std à 1) et améliorer les capacités du MLP


X_train_scaled = (X_train-X_train.mean(axis=0))/X_train.std(axis=0)
#### On utilise de même X_train.mean pour rescaler les données

X_test_scaled = (X_test-X_train.mean(axis=0))/X_train.std(axis=0)

mlp = MLPClassifier(random_state=42, max_iter=1000).fit(X_train_scaled, y_train)

print("\nAccuracy on the Scaled training set: {:.3f}".format(mlp.score(X_train_scaled,y_train))) 
print("Accuracy on the Scaled test set: {:.3f}".format(mlp.score(X_test_scaled, y_test)))

print("\nOn passe alpha de {:.4f}, à {:.4f}".format(mlp.__getattribute__("alpha"), 1))
mlp = MLPClassifier(random_state=42, max_iter=1000, alpha=1).fit(X_train_scaled, y_train)

print("Accuracy on the Scaled training set: {:.3f}".format(mlp.score(X_train_scaled,y_train))) 
print("Accuracy on the Scaled test set: {:.3f}".format(mlp.score(X_test_scaled, y_test)))

fig, axes = plt.subplots(2,1, figsize=(15,8))

for ax, alpha in zip(axes, [1, 10]) :
    mlp = MLPClassifier(random_state=42, max_iter=1000,alpha=alpha).fit(X_train_scaled, y_train)
    ax.set_title("alpha = {:.4f}".format(alpha), loc=u'right')
    ax.imshow(mlp.coefs_[0], cmap='viridis', interpolation='none') 
    ax.set_xlabel("Columns in weights Matrix")
    ax.set_ylabel("Input features")
plt.sca(axes[0]) 
plt.yticks(xrange(X.shape[1]), cancer.feature_names)
Exemplo n.º 3
0
    class _MLPClassifier(BaseEstimator):
        def __init__(self, runner, **kwargs):
            self.runner = runner
            self.mlp = MLPClassifier(**kwargs)
            self.state_callback = self.runner._save_state
            self.user_info = [(k, v) for k, v in kwargs.items()]

            for k, v in kwargs.items():
                self.runner._log_current_argument(k, v)
            # need to intercept the classifier so we can track statistics.
            if runner.generate_curves:
                if hasattr(self.mlp, '_update_no_improvement_count'):
                    self._mlp_update_no_improvement_count = self.mlp._update_no_improvement_count
                    self.mlp._update_no_improvement_count = self._update_no_improvement_count_intercept
                if hasattr(self.mlp, '_loss_grad_lbfgs'):
                    self._mlp_loss_grad_lbfgs = self.mlp._loss_grad_lbfgs
                    self.mlp._loss_grad_lbfgs = self._loss_grad_lbfgs_intercept

        def __getattr__(self, item):
            return self.mlp.__getattribute__(item)

        def get_params(self, deep=True):
            out = super().get_params()
            out.update(self.mlp.get_params())
            return out

        def fit(self, x_train, y_train=None):
            self.runner._start_run_timing()
            # make initial callback
            self._invoke_runner_callback()
            return self.mlp.fit(x_train, y_train)

        def predict(self, x_test):
            return self.mlp.predict(x_test)

        def _update_no_improvement_count_intercept(self, early_stopping, x_val,
                                                   y_val):
            self._invoke_runner_callback()
            return self._mlp_update_no_improvement_count(
                early_stopping, x_val, y_val)

        def _loss_grad_lbfgs_intercept(self, packed_coef_inter, x, y,
                                       activations, deltas, coef_grads,
                                       intercept_grads):
            self._invoke_runner_callback()
            return self._mlp_loss_grad_lbfgs(packed_coef_inter, x, y,
                                             activations, deltas, coef_grads,
                                             intercept_grads)

        def _invoke_runner_callback(self):
            iterations = self.mlp.n_iter_ if hasattr(self.mlp,
                                                     'n_iter_') else 0
            no_improvement_count = self.mlp._no_improvement_count if hasattr(
                self.mlp, '_no_improvement_count') else 0

            done = (self.mlp.early_stopping and
                    (no_improvement_count > self.mlp.n_iter_no_change)
                    or iterations == self.mlp.max_iter)

            state = self.mlp.coefs_ if hasattr(self.mlp, 'coefs_') else []
            fitness = self.mlp.loss_ if hasattr(self.mlp, 'loss_') else 0
            curve = self.mlp.loss_curve_ if hasattr(self.mlp,
                                                    'loss_curve_') else [0]
            return self.state_callback(iteration=iterations,
                                       state=state,
                                       fitness=fitness,
                                       user_data=self.user_info,
                                       attempt=no_improvement_count,
                                       done=done,
                                       curve=curve)