示例#1
0
def test_softmax_and_layers():
    """
  Creates a two layer neural network using the ReLU activation function for the hidden layer
  and the Softmax activation function for the output one.

  Returns the output of the 
  """
    # Data.
    batch, y = spiral_data(100, 3)  # 100 feature sets of 3 classes.

    # Creating layers.
    # The value 2 is because spiral data has 2 variables. The 3 is arbitrary.
    dense1 = DenseLayer(2, 3)
    activation1 = ActivationRelu()

    # Because the output of the previuos layer, the first parameter is a 3.
    # Because the data has 3 clases, the second parameter is a 3.
    dense2 = DenseLayer(3, 3)
    activation2 = ActivationSoftmax()

    dense1.forward(batch)
    activation1.forward(dense1.output)

    dense2.forward(activation1.output)
    activation2.forward(dense2.output)

    # Print the first 5 of the 300 results.
    print(activation2.output[:5])

    return activation2.output, y
示例#2
0
文件: chap_3.py 项目: szhongren/nnfs
def spiral_data_uncolored(show=False):
    nnfs.init()
    X, y = spiral_data(samples=100, classes=3)

    if show:
        plt.scatter(X[:, 0], X[:, 1], c=y, cmap="brg")
        plt.show()
示例#3
0
文件: chap_5.py 项目: szhongren/nnfs
def categorical_cross_entropy_loss_example_with_class_full_example_with_accuracy(
):
    nnfs.init()
    X, y = spiral_data(samples=100, classes=3)
    dense_layer_1 = DenseLayer(2, 3)
    activation_function_1 = ReluActivation()
    dense_layer_2 = DenseLayer(3, 3)
    activation_function_2 = SoftmaxActivation()
    loss_function = CategoricalCrossEntropyLoss()

    dense_layer_1.forward(X)
    activation_function_1.forward(dense_layer_1.output)
    dense_layer_2.forward(activation_function_1.output)
    activation_function_2.forward(dense_layer_2.output)

    print(activation_function_2.output[:5])

    loss = loss_function.calculate(activation_function_2.output, y)

    print(loss)

    predictions = np.argmax(activation_function_2.output, axis=1)
    if len(y.shape) == 2:
        y = np.argmax(y, axis=1)

    accuracy = np.mean(predictions == y)
    print(accuracy)
示例#4
0
def simple_network_spiral():
    X, y = spiral_data(samples=100, classes=3)

    dense_layer_1 = DenseLayer(2, 3)
    activation_function_1 = ReluActivation()
    dense_layer_2 = DenseLayer(3, 3)
    activation_function_2 = SoftmaxActivation()

    loss_function = CategoricalCrossEntropyLoss()

    lowest_loss = 9999999
    best_dense_layer_1_weights = dense_layer_1.weights.copy()
    best_dense_layer_1_biases = dense_layer_1.biases.copy()
    best_dense_layer_2_weights = dense_layer_2.weights.copy()
    best_dense_layer_2_biases = dense_layer_2.biases.copy()

    for iteration in range(10000):
        # Generate a new set of weights for iteration
        dense_layer_1.weights += 0.05 * np.random.randn(2, 3)
        dense_layer_1.biases += 0.05 * np.random.randn(1, 3)
        dense_layer_2.weights += 0.05 * np.random.randn(3, 3)
        dense_layer_2.biases += 0.05 * np.random.randn(1, 3)

        # Perform a forward pass of the training data through this layer
        dense_layer_1.forward(X)
        activation_function_1.forward(dense_layer_1.output)
        dense_layer_2.forward(activation_function_1.output)
        activation_function_2.forward(dense_layer_2.output)

        # Perform a forward pass through activation function
        # it takes the output of second dense layer here and returns loss
        loss = loss_function.calculate(activation_function_2.output, y)

        # Calculate accuracy from output of activation2 and targets
        # calculate values along first axis
        predictions = np.argmax(activation_function_2.output, axis=1)
        accuracy = np.mean(predictions == y)

        # If loss is smaller - print and save weights and biases aside
        if loss < lowest_loss:
            print(
                "New set of weights found, iteration:",
                iteration,
                "loss:",
                loss,
                "acc:",
                accuracy,
            )
            best_dense_layer_1_weights = dense_layer_1.weights.copy()
            best_dense_layer_1_biases = dense_layer_1.biases.copy()
            best_dense_layer_2_weights = dense_layer_2.weights.copy()
            best_dense_layer_2_biases = dense_layer_2.biases.copy()
            lowest_loss = loss
        # Revert weights and biases
        else:
            dense_layer_1.weights = best_dense_layer_1_weights.copy()
            dense_layer_1.biases = best_dense_layer_1_biases.copy()
            dense_layer_2.weights = best_dense_layer_2_weights.copy()
            dense_layer_2.biases = best_dense_layer_2_biases.copy()
示例#5
0
def main():
    req = json.loads(sys.argv[1])
    mode = req["mode"]
    option = req["option"]

    if (mode == "spiral_data"):
        size = option["size"]
        X, y = spiral_data(size[0], size[1])

        print(json.dumps({"X": X.tolist(), "y": y.tolist()}))
示例#6
0
def training_with_momentum_decay(samples,
                                 epoch_num,
                                 learning_rate=1.0,
                                 decay=1e-3,
                                 momentum=0.5):
    print(f"Training model using Momentum:: {momentum}")
    # Create dataset
    X, y = spiral_data(samples, classes=3)
    # Create Dense layer with 2 input features and 64 output values
    dense1 = Layer_Dense(2, 64)
    # Create ReLU activation (to be used with Dense layer):
    activation1 = Activation_ReLU()
    # Create second Dense layer with 64 input features (as we take output
    # of previous layer here) and 3 output values (output values)
    dense2 = Layer_Dense(64, 3)
    # Create Softmax classifier's combined loss and activation
    loss_activation = Activation_Softmax_Loss_CategoricalCrossentropy()
    # Create optimizer
    optimizer = Optimizer_SGD(learning_rate, decay, momentum)
    # Train in loop
    for epoch in range(epoch_num + 1):
        # Perform a forward pass of our training data through this layer
        dense1.forward(X)
        # Perform a forward pass through activation function
        # takes the output of first dense layer here
        activation1.forward(dense1.output)
        # takes outputs of activation function of first layer as inputs
        dense2.forward(activation1.output)
        # Perform a forward pass through the activation/loss function
        # takes the output of second dense layer here and returns loss
        loss = loss_activation.forward(dense2.output, y)
        # Calculate accuracy from output of activation2 and targets
        # calculate values along first axis
        predictions = np.argmax(loss_activation.output, axis=1)
        if len(y.shape) == 2:
            y = np.argmax(y, axis=1)
        accuracy = np.mean(predictions == y)
        if not epoch % 100:
            print(f'epoch: {epoch}, ' + f'acc: {accuracy:.3f}, ' +
                  f'loss: {loss:.3f}, ' +
                  f'lr: {optimizer.current_learning_rate}')
            # f'Gradient {dense1.weights}')

        # Backward pass
        loss_activation.backward(loss_activation.output, y)
        dense2.backward(loss_activation.dinputs)
        activation1.backward(dense2.dinputs)
        dense1.backward(activation1.dinputs)
        # Update weights and biases
        optimizer.pre_update_params()
        optimizer.update_params(dense1)
        optimizer.update_params(dense2)
        optimizer.post_update_params()
示例#7
0
def run_layer():
    # create dataset
    x, y = spiral_data(samples=100, classes=3)

    # create dense layer with 2 input features and 3 output values
    dense1 = Layer_Dense(2, 3)

    # perform a forward pass of our training data through this layer
    dense1.forward(x)

    # output
    print(dense1.output[:5])
def main():

    # Access data from nnfs datasets
    X, y = spiral_data(100, 3)

    # n_inputs is 2 beacuse of 2 dimentional vector space that defines each data point
    layer1 = Layer_Dense(2, 4)

    # creates an empty object
    activation1 = Activation_Binary()

    layer1.forward(X)
    activation1.forward(layer1.output)

    print(activation1.output)
示例#9
0
def activation_functions():
    # custom for training data set
    X, y = spiral_data(100, 3)

    layer1 = LayerDense(2, 5)
    activation1 = Activation_ReLU()
    layer1.forward(X)
    activation1.forward(layer1.output)

    import matplotlib.pyplot as plt

    plt.scatter(activation1.output[:, 0],
                activation1.output[:, 1],
                c=y,
                cmap="brg")
    plt.show()
示例#10
0
def test_ActivationRelu():
    """
  Tests the ActivationRelu class.
  """
    batch, y = spiral_data(100, 3)  # 100 feature sets of 3 classes.

    layer = DenseLayer(2, 5)
    layer.forward(batch)

    activation = ActivationRelu()
    activation.forward(layer.output)  # The negative values are now gone.

    # Print the modified output.
    print(activation.output)

    # Print the data.
    plt.scatter(batch[:, 0], batch[:, 1], c=y, cmap='brg')
    plt.show()
示例#11
0
文件: chap_5.py 项目: szhongren/nnfs
def categorical_cross_entropy_loss_example_with_class_full_example():
    nnfs.init()
    X, y = spiral_data(samples=100, classes=3)
    dense_layer_1 = DenseLayer(2, 3)
    activation_function_1 = ReluActivation()
    dense_layer_2 = DenseLayer(3, 3)
    activation_function_2 = SoftmaxActivation()
    loss_function = CategoricalCrossEntropyLoss()

    dense_layer_1.forward(X)
    activation_function_1.forward(dense_layer_1.output)
    dense_layer_2.forward(activation_function_1.output)
    activation_function_2.forward(dense_layer_2.output)

    print(activation_function_2.output[:5])

    loss = loss_function.calculate(activation_function_2.output, y)

    print(loss)
示例#12
0
def train_model(samples=10000, epoch_num=100, learning_rate=1.0):
    X, y = spiral_data(samples, classes=3)
    # Create Dense layer with 2 input features and 64 output values
    dense1 = Layer_Dense(2, 64)
    # Create ReLU activation (to be used with Dense layer):
    activation1 = Activation_ReLU()
    # Create second Dense layer with 64 input features (as we take output
    # of previous layer here) and 3 output values (output values)
    dense2 = Layer_Dense(64, 3)
    # Create Softmax classifier's combined loss and activation
    loss_activation = Activation_Softmax_Loss_CategoricalCrossentropy()
    # Create optimizer
    optimizer = Optimizer_SGD(learning_rate)
    # Train in loop
    for epoch in range(epoch_num + 1):
        # Perform a forward pass of our training data through this layer
        dense1.forward(X)
        # Perform a forward pass through activation function
        # takes the output of first dense layer here
        activation1.forward(dense1.output)
        # Perform a forward pass through second Dense layer
        # takes outputs of activation function of first layer as inputs
        dense2.forward(activation1.output)
        # Perform a forward pass through the activation/loss function
        # takes the output of second dense layer here and returns loss
        loss = loss_activation.forward(dense2.output, y)
        predictions = np.argmax(loss_activation.output, axis=1)
        if len(y.shape) == 2:
            y = np.argmax(y, axis=1)
        accuracy = np.mean(predictions == y)
        if not epoch % 100:
            print(f'epoch: {epoch}, ' + f'acc: {accuracy:.3f}, ' +
                  f'loss: {loss:.3f}')
        # Backward pass
        loss_activation.backward(loss_activation.output, y)
        dense2.backward(loss_activation.dinputs)
        activation1.backward(dense2.dinputs)
        dense1.backward(activation1.dinputs)
        # Update weights and biases
        optimizer.update_params(dense1)
        optimizer.update_params(dense2)
# Generally output layers going to have different activation functions than hidden layers
# unit step -- sigmoid -- ReLU
import numpy as np
import nnfs
from nnfs.datasets import spiral_data

nnfs.init()

np.random.seed(0)

X, Y = spiral_data(100, 3)
"""
X = [[1, 2, 3, 2.5],
     [2.0, 5.0, -1.0, 2.0],
     [-1.5, 2.7, 3.3, -0.8]]
"""
"""
inputs = [0, 2, -1, 3.3, -2.7, 1.1, 2.2, -100]
output = []

for i in inputs:
    output.append(max(0, i))
print(output)
"""


class Layer_Dense:
    def __init__(self, n_inputs, n_neurons):
        self.weights = 0.10 * np.random.randn(n_inputs, n_neurons)
        self.biases = np.zeros((1, n_neurons))
示例#14
0
import numpy as np
import nnfs
from nnfs.datasets import spiral_data

nnfs.init()

np.random.seed(0)

X = [
    [1.0, 2.0, 3.0, 2.5],
    [2.0, 5.0, -1.0, 2.0],
    [-1.5, 2.7, 3.3, -0.8]
]

X, y = spiral_data(100, 3) #dataset

class Layer_Dense:
    def __init__(self, n_inputs, n_neurons):
        self.weights = 0.10 * np.random.randn(n_inputs, n_neurons)
        self.biases = np.zeros((1, n_neurons)) #tuple
    def forward(self, inputs):
        self.output = np.dot(inputs, self.weights) + self.biases

class Activation_ReLU:
    def forward(self, inputs):
        self.output = np.maximum(0, inputs)

layer1 = Layer_Dense(2,5) #couse graph (spiral_data)
activation1 = Activation_ReLU()

layer1.forward(X)
示例#15
0
#np.random.seed(0) # for this to work in a notebook all code must run in same cell
import nnfs  # instead of random seed, it will also set default data type for numpy
# dot product in numpy will sometime use a different datatype
# there is no way to set default datatype in numpy, it just decides
# with nnfs we overwrite some things to everyone uses same datatype.
# this will make it possible to replicate everything.
# nnfs will also give us some data
# import a dataset from nnfs that is dataset of spirals
from nnfs.datasets import spiral_data

nnfs.init()

X = [[1, 2, 3, 2.5], [2.0, 5.0, -1.0, 2.0], [-1.5, 2.7, 3.3, -0.8]]

# Create our data. spiral_data creates both features X and labels y
X, y = spiral_data(100, 3)  # 100 feature sets of 3 classes


class Layer_Dense:  # create class object
    def __init__(self, n_inputs, n_neurons):
        # these are our weights
        self.weights = 0.10 * np.random.randn(n_inputs,
                                              n_neurons)  # this is our shape
        # these are our biases
        self.biases = np.zeros(
            (1, n_neurons))  # shape is 1 by n neurons we have

    def forward(self, inputs):
        self.output = np.dot(inputs, self.weights) + self.biases

示例#16
0
        layer.weights += -self.curr_learning_rate * weights_momenta_corrected / (
            np.sqrt(weights_cache_corrected) + self.epsilon)
        layer.biases += -self.curr_learning_rate * biases_momenta_corrected / (
            np.sqrt(biases_cache_corrected) + self.epsilon)

    def decay_learning_rate(self):
        if self.decay_rate:
            self.curr_learning_rate = self.learning_rate * (
                1.0 / (1.0 + self.decay_rate * self.iterations))
            self.iterations += 1


# -------------------------------------
# create network
X_train, y_train = spiral_data(samples=1000, classes=3)
validation = []
for i in range(10):
    validation.append(spiral_data(samples=100, classes=3))

layer_1 = Layer_Dense(2, 61)
act_func_1 = Activation_ReLU()
layer_2 = Layer_Dense(61, 31)
act_func_2 = Activation_ReLU()
layer_3 = Layer_Dense(31, 3)
act_loss_func = Activation_Softmax_Loss_CategoricalCrossentropy()
#optimizer = Optimizer_SGD(momentum=0.9)
#optimizer = Optimizer_AdaGrad()
#optimizer = Optimizer_RMSProp(learning_rate=0.02, decay_rate=1e-5, rho=0.999)
optimizer = Optimizer_Adam(learning_rate=0.075, decay_rate=5e-7)
import numpy as np
import nnfs
import nnfs.datasets as nnfs_d
import matplotlib.pyplot as plt

# np.random.seed(0)
# nnfs.init()

data_x, data_y = nnfs_d.spiral_data(150, 3)
plt.scatter(data_x[:, 0], data_x[:, 1])
print(data_x[:10, 0])
print(data_x[:10, 1])
print(data_x[:10])
plt.show()

X = [[1, 2, 3, 2.5], [2.0, 5.0, -1.0, 2.0], [-1.5, 2.7, 3.3, -0.8]]

# we wanna have all the values between -1 and 1 for the values not to "explode", when they are multiplied by 5 f.e.


class LayerDense:
    def __init__(self, n_inputs, n_neurons):
        self.weights = 0.1 * np.random.randn(n_inputs, n_neurons)
        self.biases = np.zeros((1, n_neurons))
        self.output = None

    def forward(self, inputs):
        self.output = np.dot(inputs, self.weights) + self.biases


# ReLU - Rectified Linear Unit
示例#18
0
        self.dinputs[range(samples), y_true] -= 1
        # Normalize gadient
        self.dinputs = self.dinputs / samples


class OptimizerSGD():
    def __init__(self, learning_rate=1.0):
        self.learning_rate = learning_rate

    def update_params(self, layer):
        layer.weights += -self.learning_rate * layer.dweights  # dweights = gradients
        layer.biases += -self.learning_rate * layer.dbiases


# Setting values and initializing classes
X, y = spiral_data(samples=100, classes=3)  # 300 samples of 2-dimensional data

# Create dense layer with 2 input features and 3 output values
dense1 = LayerDense(n_inputs=2, n_neurons=64)

# Create RELU for dense layers
activation1 = ActivationRelu()

# Create second dense layer with 3 input features and 3 output values
dense2 = LayerDense(n_inputs=64, n_neurons=3)

# Create Softmax classifier's combined loss and activation
loss_activation = ActivationSoftmaxLossCategoricalCrossentropy()

# Create optimizer object
optimizer = OptimizerSGD()
示例#19
0
'''

import matplotlib.pyplot as plt
import nnfs
from nnfs.datasets import spiral_data

import numpy as np

from layer_dense import Layer_Dense
from relu_activation_func import ReLUActivation
from softmax_activation_func import SoftmaxActivation

from cross_entropy import Loss_CategoricalCrossentropy
nnfs.init()

coords, classes = spiral_data(samples=100, classes=3)

dense_1 = Layer_Dense(2, 3)

activation_1 = ReLUActivation()

dense_2 = Layer_Dense(3, 3)

activation2 = SoftmaxActivation()

loss_function = Loss_CategoricalCrossentropy()

lowest_loss = 9999999

best_dense1_weights = dense_1.weights.copy()
best_dense1_biases = dense_1.biases.copy()
import numpy as np
import nnfs
import nnfs.datasets as nnfs_d
import matplotlib.pyplot as plt

np.random.seed(0)


# we wanna have all the values between -1 and 1 for the values not to "explode", when they are multiplied by 5 f.e.
X, y = nnfs_d.spiral_data(100, 3)


class LayerDense:
    def __init__(self, n_inputs, n_neurons):
        self.weights = 0.1 * np.random.randn(n_inputs, n_neurons)
        self.biases = np.zeros((1, n_neurons))
        self.output = None

    def forward(self, inputs):
        self.output = np.dot(inputs, self.weights) + self.biases


# ReLU - Rectified Linear Unit
class ActivationReLU:
    def __init__(self):
        self.output = None

    def forward(self, inputs):
        self.output = np.maximum(0, inputs)

# plt.scatter (X[:, 0], X[:, 1])
# plt.show ()

# plt.scatter (X[:, 0], X[:, 1], c = y, cmap= 'brg')
# plt.show ()


# Actual Code ------------------------------------------------------------------------------------------------------------

import numpy as np
import nnfs
from nnfs.datasets import spiral_data

nnfs.init ()
X, y = spiral_data (100, 3)                 # 100 feature sets of 3 classes
class Layer_Dense:
    def __init__(self, n_inputs, n_neurons):
        """
        Constructor function for the class

        Params:
                n_inputs: size of the input layer coming in
                n_neurons: how many neurons should this layer have
        Outputs:
                No outputs. Just set the weights and biases on a layer based on the params
        """
        self.weights = 0.10 * np.random.randn(n_inputs, n_neurons)                     # Shape of weights (n_inputs, n_neurons)
        self.biases = np.zeros ((1, n_neurons))                                        # Shape of baises (1, n_neurons)

    def forward (self, inputs):
    val_loss = loss_function.calculate(output, y_test)
    predictions = output
    val_acc = np.mean(np.absolute(predictions - y) < accuracy_precision)

    print(f'val_acc: {val_acc} || val_loss: {val_loss}')

    plt.plot(X_test, y_test)
    plt.plot(X_test, predictions)
    plt.show()


    ''' ''
    EPOCHS = 10001
    LEARNING_RATE = 0.05

    X_train, y_train = spiral_data(samples=100, classes=3)
    X_val, y_val = spiral_data(samples=100, classes=3)

    model = network.NeuralNetwork()

    model.add_layer(
        layers.Dense(2,
                     64,
                     weight_regularizer_l2=0.000005,
                     bias_regularizer_l2=0.000005))
    model.add_layer(activations.ReLU())
    model.add_layer(layers.Dropout(rate=0.2))
    model.add_layer(layers.Dense(64, 3))
    model.add_layer(activations.Softmax())

    model.set(loss=losses.CategoricalCrossentropy(),
示例#23
0
import numpy as np
import nnfs
from nnfs.datasets import spiral_data
nnfs.init()

# set random func to repeat random value each time
np.random.seed(0)


# samples
X = [[1, 2, 3, 2.5],
    [2.0, 5.0, -1.0, 2.0],
    [-1.5, 2.7, 3.3, -0.8]]


X, y = spiral_data(100, 3)

#hiden layers
class Layer_Dense:
    def __init__(self, n_inputs, n_neurons):
        self.weights = 0.10 * np.random.randn(n_inputs, n_neurons)
        self.biases = np.zeros((1, n_neurons))
        # np.random.randn - array with normalize data from Gause

    def forward(self, inputs):
        self.output = np.dot(inputs, self.weights) + self.biases



class Activation_Relu:
    def forward(self, inputs):
示例#24
0
文件: nn.py 项目: FiendFyreFox/NNFS
            self.precision = np.std(y) / 500

    def compare(self, predictions, y):
        return np.absolute(predictions - y) < self.precision

class Accuracy_Categorical(Accuracy):

    def init(self, y):
        pass

    def compare(self, predictions, y):
        return predictions == y

# === Miscellaneous ===

X, y = spiral_data(1000, 3)
X_test, y_test = spiral_data(1000, 3)
#y = y.reshape(-1, 1)
#y_test = y_test.reshape(-1, 1)

model = Model()

model.add(Layer_Dense(2, 512, weight_regularizer_l2=5e-4, bias_regularizer_l2=5e-4))
model.add(Activation_ReLU())
model.add(Layer_Dropout(0.1))
model.add(Layer_Dense(512, 3))
model.add(Activation_Softmax())

# Set loss and optimizer objects
model.set(
    loss=Loss_CategoricalCrossentropy(),
        self.output = np.dot(inputs, self.weights) + self.biases


class Activation_ReLU:
    def forward(self, inputs):
        self.output = np.maximum(0, inputs)


class Activation_Softmax:
    def forward(self, inputs):
        exp_values = np.exp(inputs - np.max(inputs, axis=1, keepdims=True))
        probabilities = exp_values / np.sum(exp_values, axis=1, keepdims=True)
        self.output = probabilities


X, y = spiral_data(points=100, classes=3)

dense1 = Layer_Dense(2, 3)
activation1 = Activation_ReLU()

dense2 = Layer_Dense(3, 3)
activation2 = Activation_Softmax()

dense1.forward(X)
activation1.forward(dense1.output)

dense2.forward(activation1.output)
activation2.forward(dense2.output)

print(activation2.output[:5])
示例#26
0
文件: chap_3.py 项目: szhongren/nnfs
def spiral_data_forward_pass():
    X, y = spiral_data(samples=100, classes=3)
    # X is a (300, 2) array
    dense_layer = DenseLayer(2, 3)
    dense_layer.forward(X)
    print(dense_layer.output[:5])
示例#27
0
        # Number of samples
        n_samples = len(dvals)
        # If labels are one-hot encoded,
        # turn them into discrete values
        if len(y_true.shape) == 2:
            y_true = np.argmax(y_true, axis=1)
        # Copy so we can safely modify
        self.dinputs = dvals.copy()
        # Calculate gradient
        self.dinputs[range(n_samples), y_true] -= 1
        # Normalize gradient
        self.dinputs = self.dinputs / n_samples


# CREATE DATASET & NEURAL NET
X, y = spiral_data(samples=1000, classes=3)

layer_1 = Layer_Dense(2, 51, weight_reg_L2=5e-4, bias_reg_L2=5e-4)
act_func_1 = Activation_ReLU()
layer_2 = Layer_Dense(51, 23)
act_func_2 = Activation_ReLU()
layer_3 = Layer_Dense(23, 7)
act_func_3 = Activation_ReLU()
layer_4 = Layer_Dense(7, 3)

act_loss_func = Activation_Softmax_Loss_CategoricalCrossentropy()

optimizer = Optimizer_Adam(eta=0.02, decay=5e-7)

for epoch in range(10001):
    layer_1.forward(X)
示例#28
0
import numpy as np
import nn_classes
import nnfs.datasets as nnfs_d

X, y = nnfs_d.spiral_data(samples=100, classes=3)

# 2 inputs, because there is just 2 dimensions - x and y (but they are in X, y var is just classes)
dense1 = nn_classes.LayerDense(2, 3)
activation1 = nn_classes.ActivationReLU()

# 3 inputs, because prev layer has 3 outputs. 3 outputs here also, because we have 3 classes of data, and we treat this
# layer as an output layer
dense2 = nn_classes.LayerDense(3, 3)
activation2 = nn_classes.ActivationSoftmax()

dense1.forward(X)
# print(dense1.output)
activation1.forward(dense1.output)
# print(activation1.output)

dense2.forward(activation1.output)
# print(dense2.output)
activation2.forward(dense2.output)
# print(activation2.output)

cce = nn_classes.CategoricalCrossEntropy(1)
cce.calculate_loss(activation2.output, y)
print(cce.loss)
print(cce.average_loss)

acc = nn_classes.Accuracy()
示例#29
0
        bias_cache_corrected = layer.bias_cache / (1 - self.beta_2**
                                                   (self.iterations + 1))

        layer.weights += (-self.current_learning_rate *
                          weight_momentums_corrected /
                          (np.sqrt(weight_cache_corrected) + self.epsilon))
        layer.biases += (-self.current_learning_rate *
                         bias_momentums_corrected /
                         (np.sqrt(bias_cache_corrected) + self.epsilon))

    def post_update_params(self) -> None:
        self.iterations += 1


if __name__ == "__main__":
    X, y = spiral_data(samples=100, classes=3)

    dense1 = Layer_Dense(2, 64)
    activation1 = Activation_ReLU()
    dense2 = Layer_Dense(64, 3)
    loss_activation = Activation_Softmax_Loss_CategoricalCrossentropy()
    # optimizer = Optimizer_SGD(decay=8e-8, momentum=0.9)
    # optimizer = Optimizer_Adagrad(decay=1e-4)
    # optimizer = Optimizer_RMSprop(learning_rate=0.02, decay=1e-5, rho=0.999)
    optimizer = Optimize_Adam(learning_rate=0.05, decay=5e-7)

    # train in a loop
    for epoch in range(10001):
        # forward pass
        dense1.forward(X)
        activation1.forward(dense1.output)
import numpy as np
from nnfs.datasets import spiral_data
import nnfs
import matplotlib.pyplot as plt

nnfs.init()
x, y = spiral_data(samples=200, classes=3)
plt.scatter(x, y, title="hello")
plt.show()

inputs = [1.0, 2.0, 3.0, 2.5]
weights = [[0.2, 0.8, -0.5, 1], [0.5, -0.91, 0.26, -0.5],
           [-0.26, -0.27, 0.17, 0.87]]
biases = [2.0, 3.0, 0.5]

#dot product gives one answer from multiplying element-wise and then summing
outputs = np.dot(weights, inputs) + biases

print(outputs)

#Where np.expand_dims() adds a new dimension at the index of the axis.
a = [1, 2, 3]
b = [2, 3, 4]
b = np.array([b])
a = np.expand_dims(np.array(a), axis=0).T

#b=b.T
z = np.dot(a, b)
print(z)
print()
print(a)