import numpy as np  #22:00 in part #5
import nnfs
from nnfs.datasets import spiral_data  # importing sprial data

#don't have to do this, can use what had in same line in p6 - don't import nnfs also
nnfs.init()  #this is from his github/youtube channel
# it sets the random seed, sets the default datatype for numpy to use, has the dataset also

# importing data, 100 feature sets of 3 classes (3 different colors in spiral, go to
# https://www.youtube.com/watch?v=gmjzbpSVY1A&list=PLQVvvaa0QuDcjD5BAw2DxE6OF2tius3V3&index=5 for more info)
X, y = spiral_data(100, 3)


class Layer_Dense:
    def __init__(self, n_inputs, n_neurons):
        '''.randn(sample size of 4, number of neurons)
        b/c e/a neuron has #weights = #inputs, coding for layer
        .randn allows you to define the shape of the array
        .randn generates random values using gaussian aka normal distribution bounded around 0
        all random numbers gaussian distribution with shape of array
        multiply by 0.1 because want all output-input values to be small 
        after many iterations large values increase exponentially if not multiply by 0.1
        Don't have to transpose weight matrix if initialize weights by dimensions of inputs and neurons '''
        self.weights = 0.1 * np.random.randn(n_inputs, n_neurons)
        ''' initializing all biases to zero, shape of array is 1 by number of neurons
        because each neuron has one bias and coding for one layer
        if all final outputs are zero, then initialize biases to non-zero value
        First parameter of np.zeroes is a shape, so the shape is a tuple (1, n_neurons)'''
        self.biases = np.zeros((1, n_neurons))

    def forward(self, inputs):
コード例 #2
0
# X, y = create_data (100, 3)

# plt.scatter (X[:, 0], X[:, 1])
# plt.show ()

# plt.scatter (X[:, 0], X[:, 1], c = y, cmap= 'brg')
# plt.show ()


# Actual Code ------------------------------------------------------------------------------------------------------------

import numpy as np
import nnfs
from nnfs.datasets import spiral_data

nnfs.init ()
X, y = spiral_data (100, 3)                 # 100 feature sets of 3 classes
class Layer_Dense:
    def __init__(self, n_inputs, n_neurons):
        """
        Constructor function for the class

        Params:
                n_inputs: size of the input layer coming in
                n_neurons: how many neurons should this layer have
        Outputs:
                No outputs. Just set the weights and biases on a layer based on the params
        """
        self.weights = 0.10 * np.random.randn(n_inputs, n_neurons)                     # Shape of weights (n_inputs, n_neurons)
        self.biases = np.zeros ((1, n_neurons))                                        # Shape of baises (1, n_neurons)
コード例 #3
0
import numpy as np
import nnfs
from nnfs.datasets import spiral_data

nnfs.init()  # Sets random seed and default data type for numpy

# Has two features X and y
X, y = spiral_data(100, 3)


class LayerDense:
    def __init__(self, n_inputs, n_neurons):
        self.weights = np.random.randn(n_inputs, n_neurons)
        self.biases = np.zeros((1, n_neurons))  # This is a tuple!

    def forward(self, inputs):
        self.output = np.dot(inputs, self.weights) + self.biases


class ActivationReLU:
    def forward(self, inputs):
        self.output = np.maximum(0, inputs)


layer1 = LayerDense(2, 5)
activation1 = ActivationReLU()
layer1.forward(X)
activation1.forward(layer1.output)
print(activation1.output)
コード例 #4
0
    [-0.5, 0.12, -0.33],
    [-0.44, 0.73, -0.13]
]

biases2 = [-1, 2, -0.5]

layer1_outputs = np.dot(inputs, np.array(weights).T) + biases

layer2_outputs = np.dot(layer1_outputs, np.array(weights2).T) + biases2

print(layer2_outputs)

# This is so cool, I feel like a wizard.
'''

'''
# Page 63-65

from nnfs.datasets import spiral_data
nnfs.init()

X, y = spiral_data(samples=100, classes=3)
print(X)
print(y)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap='brg')
plt.show()
'''


# Page 66-71
# Dense Layer (Fully Connected)
コード例 #5
0
import numpy as np
import nnfs
from nnfs.datasets import spiral_data
nnfs.init()  # setting random seed and default data type for numpy to use

# X = [[1, 2, 3, 2.5],
#      [2.0, 5.0, -1.0, 2.0],
#      [-1.5, 2.7, 3.3, -0.8]]

# X = [[1], [2]]

##################################
# Long version of softmax function
##################################

# layer_outputs = [4.8, 1.21, 2.385]
#
# E = 2.71828182846
#
# exp_values = []
# for output in layer_outputs:
#     exp_values.append(E ** output)
# print('exponentiated values:')
# print(exp_values)
#
# norm_base = sum(exp_values) #sum all values
# norm_values = []
# for value in exp_values:
#     norm_values.append(value/norm_base)
# print('normalized expinentiated values:')
# print(norm_values)
コード例 #6
0
# CODE FOR A DENSE LAYER OF NEURONS CLASS USING NUMPY
# SANTIAGO GARCIA ARANGO

import numpy as np
import nnfs
import matplotlib.pyplot as plt
from nnfs.datasets import spiral_data

# Initialize Neural Networks From Scratch package for following the book
nnfs.init(dot_precision_workaround=True,
          default_dtype='float32',
          random_seed=0)


class DenseLayer:
    """
    DenseLayer is a class to create and process generalized neuron layers.
    :param n_inputs: number of inputs
    :param n_neurons: number of neurons
    """
    def __init__(self, n_inputs, n_neurons):
        # Initialize main layer with random weights and zero vector for biases
        self.weights = 0.01 * np.random.randn(n_inputs, n_neurons)
        self.biases = np.zeros((1, n_neurons))

    def forward(self, inputs):
        # Apply main forward pass with
        self.output = np.dot(inputs, self.weights) + self.biases


if __name__ == "__main__":
コード例 #7
0
def main():
    nnfs.init()
    X, y = spiral_data(samples=1000, classes=3)

    dense1 = Layer(2,
                   512,
                   weight_regularizer_l2=5e-4,
                   bias_regularizer_l2=5e-4)
    activation1 = ReLU()

    dense2 = Layer(512, 3)

    cost_act = Soft_Ce()

    optimizer = Adam(learning_rate=0.02, decay=5e-7)

    for epoch in range(10001):

        dense1.forwardProp(X)
        activation1.forwardProp(dense1.output)

        dense2.forwardProp(activation1.output)

        data_cost = cost_act.forwardProp(dense2.output, y)

        regularization_cost = \
            cost_act.cost.regularization_cost(dense1) + \
            cost_act.cost.regularization_cost(dense2)

        cost = data_cost + regularization_cost

        predictions = np.argmax(cost_act.output, axis=1)
        if len(y.shape) == 2:
            y = np.argmax(y, axis=1)
        accuracy = np.mean(predictions == y)

        if not epoch % 100:
            print(f'epoch: {epoch}, ' + f'acc: {accuracy:.3f}, ' +
                  f'cost: {cost:.3f}, (' + f'data_cost: {data_cost:.3f}, ' +
                  f'reg_cost: {regularization_cost:.3f}), ' +
                  f'lr: {optimizer.curr_learning_rate}')

        cost_act.backProp(cost_act.output, y)
        dense2.backProp(cost_act.dinputs)
        activation1.backProp(dense2.dinputs)
        dense1.backProp(activation1.dinputs)

        optimizer.pre_update_params()
        optimizer.update_params(dense1)
        optimizer.update_params(dense2)
        optimizer.post_update_params()

    X_test, y_test = spiral_data(samples=100, classes=3)
    dense1.forwardProp(X_test)
    activation1.forwardProp(dense1.output)
    dense2.forwardProp(activation1.output)

    cost = cost_act.forwardProp(dense2.output, y_test)

    predictions = np.argmax(cost_act.output, axis=1)
    if len(y.shape) == 2:
        y_test = np.argmax(y_test, axis=1)
    accuracy = np.mean(predictions == y_test)

    print(f'validation, acc: {accuracy:.3f}, cost: {cost:.3f}')