Beispiel #1
0
    def fit(self, x, Y):
        self.classes = np.sort(list(set(Y)))
        x = np.array(x).copy()
        y = np.array(Y).copy()
        for f in range(len(self.classes)):
            y[y == self.classes[f]] = f

        clf = MLPClassifier(hidden_layer_sizes=self.shape,
                            alpha=self.beta,
                            learning_rate_init=self.eta0)
        clf.partial_fit(x, Y, classes=self.classes)
        coefs = clf.coefs_
        intercepts = clf.intercepts_
        coefs_tot = [np.zeros(np.shape(c)) for c in coefs]
        intercepts_tot = [np.zeros(np.shape(c)) for c in intercepts]

        for f in range(self.agg):
            if self.verbose and (self.agg > 1):
                print('Passage ' + str(f))
            clfi = self.fit1(x, Y, clf, coefs, intercepts)
            for i in range(len(coefs)):
                coefs_tot[i] += clfi.coefs_[i]
                intercepts_tot[i] += clfi.intercepts_[i]
        coefs = [c / self.agg for c in coefs_tot]
        intercepts = [c / self.agg for c in intercepts_tot]
        clf.coefs_ = coefs
        clf.intercepts_ = intercepts
        self.clf = clf
def generate_NN():
    data_set = load_breast_cancer()
    X = data_set['data']
    y = data_set['target']
    X_train, X_test, y_train, y_test = train_test_split(X, y)
    scaler = StandardScaler()
    scaler.fit(X_train)
    X_train = scaler.transform(X_train)
    X_test = scaler.transform(X_test)
    mlp = MLPClassifier(hidden_layer_sizes=hidden_layers_size)
    mlp.fit(X_train, y_train)
    mlp.intercepts_ = get_zeroes_biases_vectors(input_layer_size,
                                                hidden_layers_size,
                                                output_layer_size)
    return X_test, mlp, y_test
Beispiel #3
0
def test_fit():
    # Test that the algorithm solution is equal to a worked out example.
    X = np.array([[0.6, 0.8, 0.7]])
    y = np.array([0])
    mlp = MLPClassifier(solver='sgd', learning_rate_init=0.1, alpha=0.1,
                        activation='logistic', random_state=1, max_iter=1,
                        hidden_layer_sizes=2, momentum=0)
    # set weights
    mlp.coefs_ = [0] * 2
    mlp.intercepts_ = [0] * 2
    mlp.n_outputs_ = 1
    mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
    mlp.coefs_[1] = np.array([[0.1], [0.2]])
    mlp.intercepts_[0] = np.array([0.1, 0.1])
    mlp.intercepts_[1] = np.array([1.0])
    mlp._coef_grads = [] * 2
    mlp._intercept_grads = [] * 2

    # Initialize parameters
    mlp.n_iter_ = 0
    mlp.learning_rate_ = 0.1

    # Compute the number of layers
    mlp.n_layers_ = 3

    # Pre-allocate gradient matrices
    mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
    mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)

    mlp.out_activation_ = 'logistic'
    mlp.t_ = 0
    mlp.best_loss_ = np.inf
    mlp.loss_curve_ = []
    mlp._no_improvement_count = 0
    mlp._intercept_velocity = [np.zeros_like(intercepts) for
                               intercepts in
                               mlp.intercepts_]
    mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
                          mlp.coefs_]

    mlp.partial_fit(X, y, classes=[0, 1])
    # Manually worked out example
    # h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
    #       =  0.679178699175393
    # h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
    #         = 0.574442516811659
    # o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
    #       = 0.7654329236196236
    # d21 = -(0 - 0.765) = 0.765
    # d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
    # d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
    # W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
    # W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
    # W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
    # W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
    # W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
    # W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
    # W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
    # W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
    # b1grad1 = d11 = 0.01667
    # b1grad2 = d12 = 0.0374
    # b2grad = d21 = 0.765
    # W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
    #          [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
    #          [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
    #          0.096008], [0.4939998, -0.002244]]
    # W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
    #        [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
    # b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
    #         = [0.098333, 0.09626]
    # b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
    assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
                                                 [0.2956664, 0.096008],
                                                 [0.4939998, -0.002244]]),
                        decimal=3)
    assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
                        decimal=3)
    assert_almost_equal(mlp.intercepts_[0],
                        np.array([0.098333, 0.09626]), decimal=3)
    assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
    # Testing output
    #  h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
    #               0.7 * 0.4939998 + 0.098333) = 0.677
    #  h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
    #            0.7 * -0.002244 + 0.09626) = 0.572
    #  o1 = h * W2 + b21 = 0.677 * 0.04706 +
    #             0.572 * 0.154089 + 0.9235 = 1.043
    #  prob = sigmoid(o1) = 0.739
    assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3)
import numpy as np
from sklearn.neural_network import MLPClassifier

os.chdir('../data')

# Load data
X = pd.read_csv('X.csv', header=None)
#with open('X.csv') as datafile:
#  X = [[float(val) for val in line.split(',')] for line in datafile.read().split('\n') if line != '']
y = pd.read_csv('y.csv', header=None).transpose().values[0]
#with open('y.csv') as datafile:
#  y = [float(line) for line in datafile.read().split('\n') if line != '']
Theta1 = pd.read_csv('Theta1.csv', header=None)
#with open('Theta1.csv') as datafile:
#  Theta1 = [[float(val) for val in line.split(',')] for line in datafile.read().split('\n') if line != '']
Theta2 = pd.read_csv('Theta2.csv', header=None)
#with open('Theta2.csv') as datafile:
#  Theta2 = [[float(val) for val in line.split(',')] for line in datafile.read().split('\n') if line != '']

clf = MLPClassifier(algorithm='l-bfgs', alpha=1e-5, hidden_layer_sizes=(25), random_state=1)
clf.fit(X, y)
#clf.n_outputs_ = 10
#clf.n_layers_ = 3
clf.out_activation_ = 'logistic'
clf.intercepts_ = [Theta1[0].values, Theta2[0].values]
clf.coefs_ = [Theta1.transpose()[1:].values, Theta2.transpose()[1:].values]

p = clf.predict(X)
diff = (p == y).mean()
print(diff)
Beispiel #5
0
    def update(self):
        if not World.executions:
            raise RuntimeError('Cannot update. No executions left to work on.')

        # Define workspace (data set of interest).
        params = World.executions.pop(0)
        World.params = params

        t_time = time()

        print('Data set %s %f' % (params['name'], t_time))

        ds = params['dataset']()

        X, y = ds.data, ds.target

        if params['settings'].get('pc_decomposing', False):
            X = decomposition.PCA(whiten=params['settings'].get(
                'whiten', False),
                                  random_state=0).fit_transform(X)

        if params['settings']['plotting']:
            plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
            plt.tight_layout()
            plt.savefig('report/ds-%s.png' % params['name'])
            plt.close()

        # Separate train (80%) and test data (20%).
        X, X_test, y, y_test = model_selection.train_test_split(
            X, y, train_size=.8, random_state=random_state)

        # Set class attributes. We need this for the genetic algorithm.
        World.X, World.X_test, World.y, World.y_test = X, X_test, y, y_test

        # Build a regularly trained Neural Network once.
        # We'll use it as base for our benchmarks.
        mpl = MLPClassifier(**params['nn_params'])

        print('Regular training ongoing...')
        t = time()
        mpl.fit(X, y)
        print('Training complete (elapsed: %f s)' % (time() - t))
        self.evaluate(mpl, label='NN')

        best_i, best_model, best_score = -1, None, -np.inf

        for i, search_params in enumerate(params['searches']):
            trainer = art.agents.ResponderAgent(
                search=art.searches.genetic.GeneticAlgorithm,
                environment=self,
                search_params=search_params)

            # Ask agent to find a trained net for us.
            print('Genetic training has started. Parameters: \n%s' %
                  search_params)
            t = time()
            training = trainer.act()
            print(
                'Evolution complete (%i cycles, %f s elapsed, '
                'candidate utility: %f)' %
                (trainer.search.cycle_, time() - t, trainer.utility(training)))

            if (params['settings']['plotting']
                    and search_params.get('debug', False)):
                # Plotting generations' utilities.
                plt.plot(trainer.search.lowest_utility_,
                         color='blue',
                         linewidth=4,
                         label='Lowest')
                plt.plot(trainer.search.average_utility_,
                         color='orange',
                         linewidth=4,
                         label='Average')
                plt.plot(trainer.search.highest_utility_,
                         color='red',
                         linewidth=4,
                         label='Highest')
                plt.legend()

                plt.xlabel('generation')
                plt.ylabel('utility')

                plt.tight_layout()
                plt.savefig('report/ut-%s-%i.png' % (params['name'], i))
                plt.close()

            # Let's cross our fingers! Build a Neural Network with the
            # parameters selected by the evolutionary process.
            gmpl = MLPClassifier(**params['nn_params'])
            gmpl.coefs_ = training.coefs_
            gmpl.intercepts_ = training.intercepts_

            score = self.evaluate(gmpl, label='GA')
            if score > best_score:
                best_i, best_model, best_score = i, gmpl, score

            gmpl.fit(X, y)
            self.evaluate(gmpl, label='trained-gnn')

        print('%s\'s report:\n'
              '\tBest estimator id: %i\n'
              'Score: %.2f\n'
              'Total time elapsed: %f s\n'
              '---\n' % (params['name'], best_i, best_score,
                         (time() - t_time)))
Beispiel #6
0
        memory[index % memory_size,
               0], memory[index % memory_size,
                          1] = observation[0], observation[1]
        memory[index % memory_size, 2], memory[index % memory_size, 3], memory[
            index % memory_size,
            4] = action_0, observation_1[0], observation_1[1]
        index += 1
        observation = observation_1

        # choose minimum batch
        batch_index = np.random.choice(memory_size, size=mini_batch)
        batch_memory = memory[batch_index, :]
        e_r_l_new = batch_memory[:, 0:3]
        e_r_l_old = batch_memory[:, 3:5]
        q_target = np.zeros((mini_batch, 1))
        counter_2 = 0
        for element in e_r_l_old:
            q_target[counter_2] = r_ + GAMMA * max(old_nn.predict([np.append(element, 0)]), \
                                                   old_nn.predict([np.append(element, 1)]), \
                                                   old_nn.predict([np.append(element, 2)]))
            counter_2 += 1
        q_target = q_target.ravel()
        new_nn.fit(e_r_l_new, q_target)

        if step % learning_inter == 0:
            old_nn.coefs_ = new_nn.coefs_
            old_nn.intercepts_ = new_nn.intercepts_
            # reach the right top
        if observation[0] == env.observation_space.high[0]:
            break
Beispiel #7
0
Datei: nn.py Projekt: arvindr9/ml
clf = NN(hid_layers, activation='relu')
features, encodings, ((x_train, y_train), (x_val, y_val),
                      (x_test, y_test)) = process(all_data,
                                                  train_frac,
                                                  val_frac,
                                                  test_frac,
                                                  modify=True)
print(x_train.shape, y_train.shape)
clf.fit(x_train, y_train)

print("Simulated annealing:")
acc_anneal = []
test_anneal = []

clf.coefs_ = []
clf.intercepts_ = []
#anneal(clf, hid_layers, x_train, y_train) #uses simulated annealing to find the optimal weights
anneal = NNAnneal(clf, hid_layers, x_train, y_train)
([clf.coefs_, clf.intercepts_]), e = anneal.anneal()

print(accuracy_score(clf.predict(x_train), y_train))
print(accuracy_score(clf.predict(x_val), y_val))
print(accuracy_score(clf.predict(x_test), y_test))
# acc_anneal.append(accuracy_score(clf.predict(x_val), y_val))
# test_anneal.append(accuracy_score(clf.predict(x_test), y_test))

# print(acc_anneal)
# print(test_anneal)

print("Hill climbing:")
clf.coefs_ = []
Beispiel #8
0
    def update(self):
        if not World.executions:
            raise RuntimeError('Cannot update. No executions left to work on.')

        # Define workspace (data set of interest).
        params = World.executions.pop(0)
        World.params = params

        t_time = time()

        print('Data set %s %f' % (params['name'], t_time))

        ds = params['dataset']()

        X, y = ds.data, ds.target

        if params['settings'].get('pc_decomposing', False):
            X = decomposition.PCA(
                whiten=params['settings'].get('whiten', False),
                random_state=0).fit_transform(X)

        if params['settings']['plotting']:
            plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
            plt.tight_layout()
            plt.savefig('report/ds-%s.png' % params['name'])
            plt.close()

        # Separate train (80%) and test data (20%).
        X, X_test, y, y_test = model_selection.train_test_split(
            X, y, train_size=.8, random_state=random_state)

        # Set class attributes. We need this for the genetic algorithm.
        World.X, World.X_test, World.y, World.y_test = X, X_test, y, y_test

        # Build a regularly trained Neural Network once.
        # We'll use it as base for our benchmarks.
        mpl = MLPClassifier(**params['nn_params'])

        print('Regular training ongoing...')
        t = time()
        mpl.fit(X, y)
        print('Training complete (elapsed: %f s)' % (time() - t))
        self.evaluate(mpl, label='NN')

        best_i, best_model, best_score = -1, None, -np.inf

        for i, search_params in enumerate(params['searches']):
            trainer = art.agents.ResponderAgent(
                search=art.searches.genetic.GeneticAlgorithm,
                environment=self,
                search_params=search_params)

            # Ask agent to find a trained net for us.
            print('Genetic training has started. Parameters: \n%s'
                  % search_params)
            t = time()
            training = trainer.act()
            print('Evolution complete (%i cycles, %f s elapsed, '
                  'candidate utility: %f)'
                  % (trainer.search.cycle_, time() - t,
                     trainer.utility(training)))

            if (params['settings']['plotting'] and
                    search_params.get('debug', False)):
                # Plotting generations' utilities.
                plt.plot(trainer.search.lowest_utility_,
                         color='blue', linewidth=4, label='Lowest')
                plt.plot(trainer.search.average_utility_,
                         color='orange', linewidth=4, label='Average')
                plt.plot(trainer.search.highest_utility_,
                         color='red', linewidth=4, label='Highest')
                plt.legend()

                plt.xlabel('generation')
                plt.ylabel('utility')

                plt.tight_layout()
                plt.savefig('report/ut-%s-%i.png' % (params['name'], i))
                plt.close()

            # Let's cross our fingers! Build a Neural Network with the
            # parameters selected by the evolutionary process.
            gmpl = MLPClassifier(**params['nn_params'])
            gmpl.coefs_ = training.coefs_
            gmpl.intercepts_ = training.intercepts_

            score = self.evaluate(gmpl, label='GA')
            if score > best_score:
                best_i, best_model, best_score = i, gmpl, score

            gmpl.fit(X, y)
            self.evaluate(gmpl, label='trained-gnn')

        print('%s\'s report:\n'
              '\tBest estimator id: %i\n'
              'Score: %.2f\n'
              'Total time elapsed: %f s\n'
              '---\n'
              % (params['name'], best_i, best_score, (time() - t_time)))