Пример #1
0
                                                   (self.iterations + 1))

        #Vanilla SGD + normalization
        #sqrt cache
        layer.weights += -self.current_learning_rate * weight_momentums_corrected / (
            np.sqrt(weight_cache_corrected) + self.epsilon)
        layer.biases += -self.current_learning_rate * bias_momentums_corrected / (
            np.sqrt(bias_cache_corrected) + self.epsilon)

    #call once after any parameter updates
    def post_update_params(self):
        self.iterations += 1


#Create dataset
X, y = sine_data()

#regularization terms r added to hidden layer
#Create Dense Layer with 1 input feature and 64 output values
dense1 = Layer_Dense(1, 64)
#Create ReLU activation(used with Dense Layer)
activation1 = Activation_ReLU()
#create dropout layer
#dropout1 = Layer_Dropout(0.1)
#create 2nd Dense layer with 64 input features(as we take output of previous layer) and 64 output values.

dense2 = Layer_Dense(64, 64)
activation2 = Activation_ReLU()
dense3 = Layer_Dense(64, 1)
#create Linear Activation
activation3 = Activation_Linear()
Пример #2
0
import matplotlib.pyplot as plt
import nnfs
from nnfs.datasets import sine_data

nnfs.init()

X, y = sine_data()

plt.plot(X, y)
plt.show()


class Activation_Linear:
    def forward(self, inputs):
        self.inputs = inputs
        self.outputs = inputs

    def backward(self, dvals):
        self.dinputs = dvals.copy()


class Loss_MeanSquaredError(Loss):
    def forward(self, y, y_hat):
        return np.mean((y - y_hat)**2, axis=1)

    def backward(self, dvals, y):
        n_samples = len(dvals)
        n_outputs = len(dvals[0])

        self.dinputs = -2 * (y - dvals) / n_outputs
        self.dinputs = self.dinputs / n_samples