예제 #1
0
from nnfs.datasets import spiral_data

from libraries import Activation_Softmax_Loss_CategoricalCorssentropy
from libraries import Activation_Softmax
from libraries import Loss_CategoricalCrossentropy
from libraries import Activation_ReLU
from libraries import Layer_Dense

# initialize nnfs dataset
nnfs.init()

# Create dataset
x, y = spiral_data(samples=100, classes=3)

# Create Dense Layer with 2 input features and 3 output values
dense1 = Layer_Dense(2, 3)

# Create ReLU activation (to be used with Dense Layer)
activation1_relu = Activation_ReLU()

# Create second Dense layer with 3 input features (as we take  ouput of previous layer here)
# and 3 output values (output values)
dense2 = Layer_Dense(3, 3)

# Create a softmax classfier's combined loss and activation
loss_activation = Activation_Softmax_Loss_CategoricalCorssentropy()

# Perform a forward pass of our training data through this layer
dense1.forward(x)

# Perform a forward pass through activation function
import numpy as np
import nnfs
from nnfs.datasets import spiral_data
from libraries import Layer_Dense
from libraries import Activation_ReLU
from libraries import Activation_Softmax


# Initializes NNFS
nnfs.init()

# Create dataset
x,y = spiral_data(samples=100, classes=3)

# Create Dense layer with 2 input features and 3 output values
dense1 = Layer_Dense(2,3)

# Create Relu Activation (to be used with Dense Layer)
activation_relu = Activation_ReLU()

# Create second Dense layer with 3 input features (as we take output of previous layer here)
# and 3 output values
dense2 = Layer_Dense(3, 3)

# Create Softmax actiavtion (to be used with Dense layer):
activation_softmax = Activation_Softmax()

# Make a forward pass of our training data through this layer
dense1.forward(x)

# Make a forward pass through activation fucntion
    def __init__(self, learning_rate=1.0):
        self.learning_rate = learning_rate

    # Update parameters
    def update_params(self, layer):
        # Our goal to minimize value and gradient point towards steepest function ascent,
        # thus we substractd learning rate
        layer.weights += -self.learning_rate * layer.dweights
        layer.biases += -self.learning_rate * layer.dbiases


# create dataset, with 100 sample 3 features
x, y = spiral_data(samples=100, classes=3)

# Create Dense layer with 2 input fatures and 64 output values
dense1 = Layer_Dense(2, 64)

# Create Relu activation (to be used with Dense Layer)
activation_relu = Activation_ReLU()

# Create Dense layer with 2 input fatures and 64 output values
dense_hidden = Layer_Dense(64, 64)

# Create Relu activation (to be used with Dense Layer)
activation_hidden_relu = Activation_ReLU()

# Create second Dense layer with 64 input features (as we take output of
# previous layer here) and 3 output values (output values)
dense2 = Layer_Dense(64, 3)

# create softmax classfier's combined loss and activation
예제 #4
0
from libraries import Layer_Dense
from libraries import Loss_CategoricalCrossentropy
from libraries import Activation_ReLU
from libraries import Activation_Softmax

# Optimization determine how to adjust the weights and biases to decrease the loss.
# Finding an intelligent way to adjust the neurons’ input’s weights and biases to minimize loss is the main difficulty of neural networks.

# initialize dataset
nnfs.init()

# Create dataset
x, y = vertical_data(samples=100, classes=3)

# Create model
dense1 = Layer_Dense(2, 3)  # First dense layer with 2 input, 3 output
activation_relu = Activation_ReLU()
dense2 = Layer_Dense(3, 3)  # Second dense layer, 3 inputs, 3 output
activation_softmax = Activation_Softmax()

# Create loss function
loss_function = Loss_CategoricalCrossentropy()

# Then create some variables to track the best loss and the associated weights and biases
lowest_loss = 9999999  # some initial value
best_dense1_weights = dense1.weights.copy()
best_dense1_biases = dense1.biases.copy()
best_dense2_weights = dense2.weights.copy()
best_dense2_biases = dense2.biases.copy()

# we iterate as many times as desired, pick random values for weights and biases, and save the weights
예제 #5
0
        # in plain python
        # output = []
        # for i in input:
        #   val = 0 if x <= 0 else x
        #   output.append(val)
        # self.output = output

        # use numpy to create more clear result
        # Calculate ouput values from input
        self.output = np.maximum(0, input)


# Create datasets
x, y = spiral_data(100, 3)

# Create Dense layer with 2 input features and 3 output layer
dense1 = Layer_Dense(2, 3)

# Create ReLU activation (to be used with Dense Layer)
activation1 = Activation_ReLU()

# Make a forward pass of our training data through this layer
dense1.forward(x)

# Forward pass through activation func.
# Takes in output from previous layer
activation1.forward(dense1.output)

# Print first 5 rows
print(activation1.output[:5])
예제 #6
0
    def post_update_params(self):
        self.iterations += 1


# --------------------------------


# initialize nnfs dataset
nnfs.init()


# Create dataset
x, y = spiral_data(samples=100, classes=3)

#  Create dense layer with 2 input features and 64 output valus
dense1 = Layer_Dense(2, 64)

# Create softmax classfier's combined loss and activation
loss_activation = Activation_Softmax_Loss_CategoricalCorssentropy()


optimizer = Optimizer_Adam(learning_rate=0.02, decay=1e-5)


# forward
dense1.forward(x)
loss_activation.forward(dense1.output,y)
dense1.backward(loss_activation.output)

# Update weight and biases
optimizer.pre_update_params()