Beispiel #1
0
 def train(
         self, 
         x: np.ndarray, 
         y: np.ndarray, 
         learning_rate: float = 0.001,
         n_epochs: int = 1000
     ) -> None:
     
     x_with_1s = np.hstack( (x, np.ones( (x.shape[0], 1) ) ) )
     
     for i in range(n_epochs):
         # forward pass
         y_hat = self.predict(x)
         
         error = y_hat - y
         
         # backward pass
         
         # output layer
         a_2_with_1s = np.hstack( (self.a_2, np.ones( (x.shape[0], 1) ) ) )
         g_prime_at_z = self.g.derivative(self.z)
         p_l_p_w_1 = np.matmul( a_2_with_1s.T, g_prime_at_z * 2 * error ) )
         
         # hidden layer
         p_l_p_w_2 = np.matmul( self.w_2, g_prime_at_z * 2 * error )
         
         
         self.w_1 += - learning_rate * p_l_p_w_1 * (1 / x.shape[0])
         if i % 100 == 0:
             pl.cla()
             common.plot_preds(x, y, self)
             pl.pause(0.01)
Beispiel #2
0
                           n_samples=n_samples)

y = np.expand_dims(y, -1)

# can we just least-square it?
beta = np.matmul(np.linalg.inv(np.matmul(x.T, x)), np.matmul(x.T, y))

lsq_predict = lambda x: np.matmul(x, beta)


class LSQ:
    def predict(self, x):
        return lsq_predict(x)


plot_preds(x, y, LSQ())

#%%

sigmoid = lambda x: 1 / (1 + np.exp(-x))


def layer_op(input_, w, g):
    return g(np.matmul(input_, w))


sparsity_threshold = 0.5
n_hidden = 30

w_hidden_1 = np.random.randn(n_features, n_hidden)
w_hidden_1[np.random.rand(*w_hidden_1.shape) < sparsity_threshold] = 0
Beispiel #3
0
import matplotlib.pyplot as pl
import numpy as np

from sklearn.datasets import make_classification

n_samples = 1000
n_input_features = 2

# generate data
x, y = make_classification(n_samples=n_samples,
                           n_features=n_input_features,
                           n_classes=2,
                           n_redundant=0,
                           n_informative=2,
                           n_clusters_per_class=2,
                           class_sep=0.5)

y = np.expand_dims(y, 1)

beta = np.matmul(np.linalg.inv(np.matmul(x.T, x)), np.matmul(x.T, y))


class Model:
    def predict(self, x):
        return np.matmul(x, beta)


from common import plot_preds

plot_preds(x, y, Model())
                        feed_dict={
                            input_layer: x,
                            output_layer: y
                        })

    losses.append(loss_)

    if i % 100 == 0:
        pl.subplot(1, 2, 1)
        pl.cla()
        pl.plot(losses)
        pl.pause(0.01)

        pl.subplot(1, 2, 2)
        pl.cla()
        common.plot_preds(x, y, model)

#%% multilayer perceptron
import matplotlib.pyplot as pl
import numpy as np

tf.reset_default_graph()

n_input_features = 1
n_output_features = 1
n_samples = 200

x = np.random.rand(n_samples, 1) * 2.0 - 1.0
x.sort(axis=0)
y = x**2 + np.sin(x * 15) * 0.25
y -= np.mean(y)