def __init__(self,
                 input_dim=(1, 28, 28),
                 conv_param={
                     'filter_num': 30,
                     'filter_size': 5,
                     'pad': 0,
                     'stride': 1
                 },
                 hidden_size=100,
                 output_size=10,
                 weight_init_std=0.01,
                 batch_size=100):
        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        filter_pad = conv_param['pad']
        filter_stride = conv_param['stride']
        input_size = input_dim[1]
        conv_output_size = (input_size - filter_size +
                            2 * filter_pad / filter_stride + 1)
        pool_output_size = int(filter_num * (conv_output_size / 2) *
                               (conv_output_size / 2))

        self.params = {}
        self.params['W1'] = weight_init_std * np.random.randn(
            filter_num, input_dim[0], filter_size, filter_size)
        self.params['b1'] = np.zeros(filter_num)
        self.params['W2'] = weight_init_std * np.random.randn(
            pool_output_size, hidden_size)
        self.params['b2'] = np.zeros(hidden_size)
        self.params['W3'] = weight_init_std * np.random.randn(
            hidden_size, output_size)
        self.params['b3'] = np.zeros(output_size)

        self.layers = OrderedDict()
        self.layers['Conv1'] = convolution.Convolution(self.params['W1'],
                                                       self.params['b1'],
                                                       conv_param['stride'],
                                                       conv_param['pad'])

        self.layers['Relu1'] = Relu.relu()
        self.layers['Pool1'] = pooling.Pooling(pool_h=2, pool_w=2, stride=2)
        self.layers['Affine1'] = Affine.affine(self.params['W2'],
                                               self.params['b2'])

        self.layers['Relu2'] = Relu.relu()
        self.layers['Affine2'] = Affine.affine(self.params['W3'],
                                               self.params['b3'])
        self.lastLayer = Softmaxwithloss.softmaxwithloss()

        self.batch_size = batch_size
def linear_activation_forward(A_prev, W, b, activation):
    """
    Implement the forward propagation for the LINEAR->ACTIVATION layer

    Arguments:
    A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
    W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
    b -- bias vector, numpy array of shape (size of the current layer, 1)
    activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"

    Returns:
    A -- the output of the activation function, also called the post-activation value
    cache -- a python dictionary containing "linear_cache" and "activation_cache";
             stored for computing the backward pass efficiently
    """

    if activation == "sigmoid":
        # Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)

    elif activation == "relu":
        # Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)

    assert (A.shape == (W.shape[0], A_prev.shape[1]))
    cache = (linear_cache, activation_cache)

    return A, cache
Example #3
0
def activation(layer_input, W, b, activation_function):

    if activation_function == "relu":
        Z, cache = linear_transformation(layer_input, W, b)
        # A will be the activation array of a given layer (also seen as the input for next layer)
        A = relu.relu(Z)

    elif activation_function == "sigmoid":
        Z, cache = linear_transformation(layer_input, W, b)
        A = sigmoid.sigmoid(Z)
    return A, cache
def full_forward_pass(example, net, activations):
    hidden_layer_count = net['hidden_layer_count']
    x = example

    W_1 = net['hidden-#1-W']
    b_1 = net['hidden-#1-b']
    activations[1] = fully_connected(x, W_1, b_1)
    for i in range(2, hidden_layer_count + 1):
        W = net['hidden-#{}-W'.format(i)]
        b = net['hidden-#{}-b'.format(i)]
        # Apply the ith hidden layer and relu and update x.
        activations[i] = fully_connected(relu(activations[i - 1]), W, b)

    W = net['final-W']
    b = net['final-b']
    # Apply the final layer, and then the sigmoid to get zhat.
    # Ignore the unused warning for activations - it is a handle object, so
    # it is pass by reference.
    x = fully_connected(relu(activations[hidden_layer_count]), W, b)
    activations[hidden_layer_count + 1] = x
    z_hat = logistic(x)
    return z_hat
Example #5
0
 def __init__(self, input_size, hidden_size, output_size, batch_size=100):
     #初始化权重
     self.params = {}
     self.params['w1'] = (np.sqrt(2 / 50)) * np.random.randn(
         input_size, hidden_size)
     #下一层链接ReLU所以权重标准差用He初始化np.sqrt(2/50)
     self.params['b1'] = np.zeros((batch_size, hidden_size))
     self.params['w2'] = 0.01 * np.random.randn(hidden_size, output_size)
     self.params['b2'] = np.zeros((batch_size, output_size))
     self.batch_size = batch_size
     #生成层
     self.layers = OrderedDict()  #有序字典存放各层神经节点
     self.layers['Affine1'] = Affine.affine(self.params['w1'],
                                            self.params['b1'])
     self.layers['Dropout'] = dropout.Dropout(dropout_ratio=0.5)  #droout层
     self.layers['Relu1'] = Relu.relu()
     self.layers['Affine2'] = Affine.affine(self.params['w2'],
                                            self.params['b2'])
     self.lastlayer = Softmaxwithloss.softmaxwithloss()
Example #6
0
def linear_activation_forward(A_prev, W, b, activation):
    # Arguments:
    #     A_prev: the previous layer's activation value
    #     W: weights of the current layer
    #     b: bias of the current layer
    #     activation: a string which'll specify which activation function to use for this layer
    # Returns:
    #     A: the result of a single forward pass given the current arguments
    #     cache: a tuple containing the linear cache (A,W,b) and activation cache (Z). we'll need these for backprop
    # Implements:
    #     A single forward pass given the given activation function and arguments

    if activation == 'sigmoid':
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid_fuction(Z), Z
    elif activation == 'relu':
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z), Z

    cache = (linear_cache, activation_cache)

    return A, cache
Example #7
0
def linear_activation_backward(dA, cache, activation):
    # Arguments:
    #     dA: derivative of the activation
    #     cache: self explanatory
    #     activation: name of layer's activation function for calculating the right dZ
    # Returns:
    #     dA_prev: derivative of the previous layer's activation
    #     dW: layer's weights derivative
    #     db: layer's bias derivative
    # Implements:
    #     a single backward pass with regards of the layer's activation function

    linear_cache, activation_cache = cache

    if activation == 'relu':
        dZ = relu(dA, activation_cache)
        dA_prev, dW, db = linear_backward(dZ, linear_cache)
    elif activation == 'sigmoid':
        dZ = sigmoid_fuction(dA, activation_cache)
        dA_prev, dW, db = linear_backward(dZ, linear_cache)

    return dA_prev, dW, db
 def __init__(self,
              input_size,
              hidden_size,
              output_size,
              batch_size=100,
              weight_init_std=0.01):
     #初始化权重
     self.params = {}
     self.params['w1'] = weight_init_std * np.random.randn(
         input_size, hidden_size)
     self.params['b1'] = np.zeros((batch_size, hidden_size))
     self.params['w2'] = weight_init_std * np.random.randn(
         hidden_size, output_size)
     self.params['b2'] = np.zeros((batch_size, output_size))
     self.params['batch_size'] = batch_size
     #生成层
     self.layers = OrderedDict()  #有序字典存放各层神经节点
     self.layers['Affine1'] = Affine.affine(self.params['w1'],
                                            self.params['b1'])
     self.layers['Relu1'] = Relu.relu()
     self.layers['Affine2'] = Affine.affine(self.params['w2'],
                                            self.params['b2'])
     self.lastlayer = Softmaxwithloss.softmaxwithloss()
for subdirectory, dirs, files in os.walk("./"):
    for file_name in files:

        file_path = subdirectory + os.sep + file_name

        if file_path.endswith(".jpg") and ("Blur" in file_path):
            Blurred_Img.append(file_path)
        if file_path.endswith(".jpg") and ("sharp" in file_path):
            Sharp_Img.append(file_path)


for imag,imagSharp in zip(Blurred_Img, Sharp_Img):


    conv=convolution(3,3)
    reluo=relu()
    base_image = cv2.imread(imag)
    base_image=np.array(base_image)
    base_image=base_image[np.newaxis, ...]
    print(base_image.shape)
    Sharp_Img=cv2.imread(imagSharp)
    Sharp_Img=np.array(Sharp_Img)



    base_image, c1=conv.conv_forward(base_image, f_1, 1, 1, bias1)
    base_image=reluo.forward(base_image)

    base_image, c2 = conv.conv_forward(base_image, f_2, 1, 1, bias2)
    base_image = reluo.forward(base_image)
def tinynet_sgd(X, z, layers, epoch_count):
    example_count, feature_count = X.shape

    # Randomly initialize the parameters.
    net = initialize_net(layers, feature_count)
    # The value at key i is the intermediate output of the network at
    # layer i. The type is 'any' because the neuron count is variable.
    activations = {}

    # For each epoch, train on all data examples.
    for ep in range(1, epoch_count + 1):
        print('Starting epoch #{} of #{}...\n'.format(ep, epoch_count))
        learning_rate = .1 / ep
        # Randomly shuffle the examples so they aren't applied in the
        # same order every epoch.
        permutation = np.random.permutation(example_count)
        X = X[permutation]
        z = z[permutation]

        # Train on each example by making a prediction and doing backprop
        # Note that in a full-featured software package like TensorFLow,
        # you would pull a batch of images, and all the functions you
        # implemented here would be efficiently batched to reduce repeated
        # work. If your brain is melting thinking through some of the
        # gradients, remember that at least x is a vector, not a matrix. We
        # do care.
        for i in range(example_count):
            # For simplicity set the '0-th' layer activation to be the
            # input to the network. activations[1...hidden_layer_count] are
            # the outputs of the hidden layers of the network before relu.
            # activations[hidden_layer_count+1] is the output of the output
            # layer. The logistic layers and layer don't need to be stored
            # here to compute the derivatives and updates at the end of
            # the network.
            # You will at some point want to access the post relu
            # activations. Just call relu on the activations to get that.
            # Faster networks would cache them, but memory management here
            # is already bad enough that it won't matter much.
            activations[0] = X[i:(i + 1), :]

            # Forward propagation pass to evaluate the network
            # z_hat is the output of the network activations has been
            # updated in-place to contain the network outputs of layer i
            # at activations[i].
            z_hat = full_forward_pass(X[i:(i + 1), :], net, activations)

            # Backwards pass to evaluate gradients at each layer and update
            # weights. First compute dL/dz_hat here, then use the backprop
            # functions to get the gradients for earlier weights as you
            # move backwards through the network, updating the weights
            # at each step.
            # Note: Study full_forward_pass, defined below, for an example
            # of how the information you need to access can be referenced.
            # [net] contains the current network weights (and is a handle
            # object, so please be careful when editing it).
            # [activations] contains the responses of the network at each
            # layer activations[0] is the input,
            # activations[1:hidden_layer_count] is the output of each layer
            # before relu has been applied.
            # activations[hidden_layer_count+1] is the final output before
            # it is squished with the logistic function.

            # TODO: Implement me!

            # Get dLdz_hat to 'undo' logistic function
            dLdz_hat = 2 * (z_hat - z[i])

            # derivative of loss wrt final output layer
            dLdU = logistic_backprop(dLdz_hat, z_hat)

            # get final w, final b and layer count
            currW = net['final-W']
            currB = net['final-b']
            hidden_layer_count = net['hidden_layer_count']

            # current input is relu function of layer before final layer
            currInput = relu(activations[hidden_layer_count])

            # get gradients across final hidden layer (in this case the only hidden layer)
            dLdX, dLdW, dLdB = fully_connected_backprop(dLdU, currInput, currW)
            net['final-W'] = currW - learning_rate * dLdW
            net['final-b'] = currB - learning_rate * dLdB

            # repeat what we did above for all other hidden layers
            for i in range(1, hidden_layer_count + 1):
                # iterate backwards through the net
                j = hidden_layer_count - i + 1
                currW = net['hidden-#{}-W'.format(j)]
                currB = net['hidden-#{}-b'.format(j)]
                # output of hidden layer
                currInput = activations[j]
                # derivative of loss wrt output of hidden layer
                dLdU = relu_backprop(dLdX, currInput)
                # relu activation as input to hidden layer
                currInput = relu(activations[j - 1])
                # backprop to get derivatives of loss across hidden layer
                dLdX, dLdW, dLdB = fully_connected_backprop(
                    dLdU, currInput, currW)
                net['hidden-#{}-W'.format(j)] = currW - learning_rate * dLdW
                net['hidden-#{}-b'.format(j)] = currB - learning_rate * dLdB
    return net
import numpy as np
import neural_net as nn
import data_manipulation as dm
import relu

train_x = np.loadtxt("train_x", max_rows=20000) / 255
train_y = np.loadtxt("train_y", max_rows=20000)

activation = relu.relu()
net = nn.NeuralNet(train_x, activation, 0.01, 50, 10)
acuurcy = net.train(train_x, train_y, 10, 2)
print(acuurcy)

test_x = np.loadtxt("test_x") / 255
test_y = np.zeros(test_x.shape[0])

for i in range(test_x.shape[0]):
    test_y[i] = (net.predict_no_batch(test_x[i], 0, 0))

output = ""
for i in range(test_y.shape[0]):
    output = output + str(test_y[i].astype(int)) + "\n"

output_file = open("test_y", "w+")
output_file.write(output)
output_file.close()


Example #12
0
"""
Created on Sat Apr  2 14:30:09 2022

@author: NinjaOfPhysics
"""

import numpy as np
import nnfs
from nnfs.datasets import spiral_data

import Layer
from relu import relu
from softmax import softmax

nnfs.init()

if __name__ == "__main__":
    X, y = spiral_data(samples=100, classes=3)
    dense1 = Layer.Layer_Dense(2, 3)
    dense2 = Layer.Layer_Dense(3, 3)

    activation1 = relu()
    activation2 = softmax()

    dense1.forward(X)
    activation1.forward(dense1.output)
    dense2.forward(activation1.output)
    activation2.forward(dense2.output)

    print(activation2.output[:5])
import numpy as np
import matplotlib.pylab as plt
from sigmoid import sigmoid
from step_function import step_function
from relu import relu

# 共通のx(軸)の値
x = np.arange(-3, 3, 0.1)

# 活性化
y_step = step_function(x)
y_sigmoid = sigmoid(x)
y_relu = relu(x)

# 作図
plt.plot(x, y_step, linestyle="--", label="Step")  # ステップ関数のグラフ
plt.plot(x, y_sigmoid, label="Sigmooid")  # シグモイド関数のグラフ
plt.plot(x, y_relu, linestyle=":", label="ReLU")  #ReLU関数のグラフ
plt.legend()  # 凡例
plt.show()  # グラフを表示