示例#1
0
    def __init__(self, input_size, hidden_sizes, output_size):
        I, O = input_size, output_size
        previous_H = I
        current_H = hidden_sizes[0]
        h_length = len(hidden_sizes)
        self.layers = []
        for i in range(h_length):
            current_H = hidden_sizes[i]
            W = 0.01 * np.random.randn(previous_H, current_H)
            b = np.zeros(current_H)
            self.layers.append(Affine(W, b))
            self.layers.append(Sigmoid())
            #self.layers.append(ReLU())
            previous_H = current_H
        W = 0.01 * np.random.randn(previous_H, O)
        b = np.zeros(O)
        self.layers.append(Affine(W, b))

        self.loss_layer = SoftmaxWithLoss()

        # すべての重みと勾配をリストにまとめる
        self.params, self.grads = [], []
        for layer in self.layers:
            self.params += layer.params
            self.grads += layer.grads
示例#2
0
    def __init__(self,
                 n_features,
                 n_output,
                 n_hidden=30,
                 l2=0.0,
                 l1=0.0,
                 epochs=50,
                 eta=0.001,
                 decrease_const=0.0,
                 shuffle=True,
                 n_minibatches=1,
                 random_state=None):
        np.random.seed(random_state)
        self.n_features = n_features
        self.n_hidden = n_hidden
        self.n_output = n_output
        self.l2 = l2
        self.l1 = l1
        self.epochs = epochs
        self.eta = eta
        self.decrease_const = decrease_const
        self.shuffle = shuffle
        self.n_minibatches = n_minibatches

        self.params = {}
        self._init_weights()

        self.layers = {}
        self.layers['Affine_1'] = Affine(self.params['W1'], self.params['b1'])
        self.layers['Sigmoid'] = Sigmoid()
        self.layers['Affine_2'] = Affine(self.params['W2'], self.params['b2'])
        self.last_layer = SoftmaxWithLoss()

        self._loss = []
        self._iter_t = 0
示例#3
0
    def __init__(self, input_size, hidden_size, output_size):
        """
        class initializer

        Parameters
        --------
        input_size : int
            the number of input neurons
        hidden_size : int
            the number of hidden neurons
        output_size : int
            the number of output neurons
        """
        I, H, O = input_size, hidden_size, output_size
        # Initialize Weight & Bias
        W1 = np.random.randn(I, H)
        b1 = np.random.randn(H)
        W2 = np.random.randn(H, O)
        b2 = np.random.randn(O)
        # Generate Layers
        self.layers = [Affine(W1, b1), Sigmoid(), Affine(W2, b2)]
        self.loss_layer = SoftmaxWithLoss()
        # Store all layers' parameters (オリジナルとは異なる)
        self.params_list, self.grads_list = [], []
        for layer in self.layers:
            self.params_list.append(layer.params)
            self.grads_list.append(layer.grads)
    def __init__(
        self,
        input_size,
        hidden_size_list,
        output_size,
        activation="relu",
        weight_init_std="relu",
        weight_decay_lambda=0,
    ):
        self.input_size = input_size
        self.hidden_size_list = hidden_size_list
        self.hidden_layer_num = len(hidden_size_list)
        self.output_size = output_size
        self.weight_decay_lambda = weight_decay_lambda
        self.params = {}

        # Weight initialization
        self.__init_weight(weight_init_std)

        # Create layers
        activation_layer = {"sigmoid": Sigmoid(), "relu": Relu()}
        self.layers = OrderedDict()

        for idx in range(1, self.hidden_layer_num + 1):
            self.layers["Affine" + str(idx)] = Affine(
                self.params["W" + str(idx)], self.params["b" + str(idx)])
            self.layers["Activation_function" +
                        str(idx)] = activation_layer[activation]

        idx = self.hidden_layer_num + 1
        self.layers["Affine" + str(idx)] = Affine(self.params["W" + str(idx)],
                                                  self.params["b" + str(idx)])

        self.last_layer = SoftmaxLoss()
示例#5
0
    def __init__(self, input_size, hidden_size, output_size):
        I, H, O = input_size, hidden_size, output_size

        W1 = np.random.randn(I, H)
        b1 = np.random.randn(H)
        W2 = np.random.randn(H, O)
        b2 = np.random.randn(O)

        self.layers = [Affine(W1, b1), Sigmoid(), Affine(W2, b2)]

        self.params = []
        for layer in self.layers:
            self.params += layer.params
示例#6
0
def all_connection():
    W1 = np.random.randn(2, 4)
    b1 = np.random.randn(4)
    W2 = np.random.randn(4, 3)
    b2 = np.random.randn(3)
    x = np.random.randn(10, 2)  # 10個のサンプルデータ
    h = np.dot(x, W1) + b1
    #print(W1)
    #print(b1)
    #print(x)
    print(h)
    a = Sigmoid.forward(h)
    s = np.dot(a, W2) + b2
    print(s)
示例#7
0
    def __init__(self, input_size, hidden_size, output_size):
        I, H, O = input_size, hidden_size, output_size

        W1 = 0.01 * np.random.randn(I, H)
        b1 = np.zeros(H)
        W2 = 0.01 * np.random.randn(H, O)
        b2 = np.zeros(O)

        self.layers = [Affine(W1, b1), Sigmoid(), Affine(W2, b2)]
        self.loss_layer = SoftmaxWithLoss()

        self.params, self.grads = [], []
        for layer in self.layers:
            self.params += layer.params
            self.grads += layer.grads
示例#8
0
    def __init__(self, input_size, hidden_size, output_size):
        I, H, O = input_size, hidden_size, output_size

        W1 = tf.Variable(tf.random.normal((I, H), mean=0.0, stddev=0.01, dtype='float'))
        b1 = tf.Variable(tf.zeros(H, dtype='float'))
        W2 = tf.Variable(tf.random.normal((H, O), mean=0.0, stddev=0.01, dtype='float'))
        b2 = tf.Variable(tf.zeros(O, dtype='float'))

        self.layers = [
                Affine(W1, b1),
                Sigmoid(),
                Affine(W2, b2),
                ]
        self.loss_layer = SoftmaxWithLoss()
        self.params = []
        for layer in self.layers:
            self.params += layer.params
示例#9
0
    def __init__(self, input_size, hidden_size, output_size):
        I, H, O = input_size, hidden_size, output_size

        #重みとバイアスの初期化
        W1 = 0.01 * np.random.randn(I, H)
        b1 = np.random.randn(H)
        W2 = 0.01 * np.random.randn(H, O)
        b2 = np.random.randn(O)

        #レイヤの作成
        self.layers = [Affine(W1, b1), Sigmoid(), Affine(W2, b2)]
        self.loss_layer = SoftmaxWithLoss()

        #全ての重みをリストにまとめる
        self.params, self.grads = [], []
        for layer in self.layers:
            self.params += layer.params
            self.grads += layer.grads
示例#10
0
    def __init__(self, input_size, hidden_size, output_size):
        I, H, O = input_size, hidden_size, output_size

        # refresh weight and deflection
        W1 = 0.01 * np.random.randn(I, H)
        b1 = np.zeros(H)
        W2 = 0.01 * np.random.randn(H, O)
        b2 = np.zeros(O)

        # making class
        self.layers = [Affine(W1, b1), Sigmoid(), Affine(W2, b2)]
        self.loss_layer = SoftmaxWithLoss()

        # putting all weight and gradient to one list
        self.params, self.grads = [], []
        for layer in self.layers:
            self.params += layer.params
            self.grads += layer.grads
    def __init__(self, input_size, hidden_size, output_size):
        I, H, O = input_size, hidden_size, output_size

        # Initialize weights and biases
        W1 = 0.01 * np.random.randn(I, H)
        b1 = np.random.randn(H)
        W2 = 0.01 * np.random.randn(H, O)
        b2 = np.random.randn(O)

        # create layers
        self.layers = [Affine(W1, b1), Sigmoid(), Affine(W2, b2)]
        self.loss_layer = SoftmaxWithLoss()

        # summarize all weights to list
        self.params, self.grads = [], []
        for layer in self.layers:
            self.params += layer.params
            self.grads += layer.grads
示例#12
0
    def __init__(self, input_size, hidden_size, output_size):
        I, H, O = input_size, hidden_size, output_size

        # 가중치와 편향 초기화
        W1 = 0.01 * np.random.randn(I, H)  # 가중치 작은 무작위 값으로 초기화
        b1 = np.zeros(H)
        W2 = 0.01 * np.random.randn(H, O)  # 가중치 작은 무작위 값으로 초기화
        b2 = np.zeros(O)

        # 계층 생성
        self.layers = [Affine(W1, b1), Sigmoid(), Affine(W2, b2)]

        self.loss_layer = SoftmaxWithLoss(
        )  # 다른 계층과 다르게 취급하여 layers 리스트가 아닌, loss_layer 인스턴스 변수에 별도 저장한다

        # 모든 가중치화 기울기를 리스트에 모은다
        self.params, self.grads = [], []
        for layer in self.layers:
            self.params += layer.params
            self.grads += layer.grads
示例#13
0
    def __init__(self, input_size, hidden_size, output_size):
        I, H, O = input_size, hidden_size, output_size

        # 가중치와 편향 초기화
        W1 = 0.01 * np.random.randn(I, H)
        b1 = np.zeros(H)
        W2 = 0.01 * np.random.randn(H, O)
        b2 = np.zeros(O)

        # 계층 생성
        self.layers = [Affine(W1, b1), Sigmoid(), Affine(W2, b2)]
        self.loss_layer = SoftmaxWithLoss()

        # 모든 가중치와 기울기를 리스트에 모은다.
        # grads가 언제나 깊은 복사를 함.
        # 이렇게 하면, 기울기를 그룹화하는 작업을 최초에 한번만 하면 된다는 이점이 생긴다.
        self.params, self.grads = [], []
        for layer in self.layers:
            self.params += layer.params
            self.grads += layer.grads
示例#14
0
    def __init__(self, input_size, hidden_size, output_size):
        I, H, O = input_size, hidden_size, output_size

        # Initialize of weight & bias
        W1 = 0.01 * np.random.randn(I, H)
        b1 = np.zeros(H)
        W2 = 0.01 * np.random.randn(H, O)
        b2 = np.zeros(O)

        # Making layers
        self.layers = [
            Affine(W1, b1),
            Sigmoid(),
            Affine(W2, b2)
        ]
        self.loss_layer = SoftmaxWithLoss()

        # Conclude all of weights & grads
        self.params, self.grads = [], []
        for layer in self.layers:
            self.params += layer.params
            self.grads += layer.grads
示例#15
0
# coding: utf-8

import numpy as np
import sys
sys.path.append('../../')
from common.layers import Sigmoid

sigmoid = Sigmoid()

#---------------------------------------
# forward
x = np.array([[1.0, -0.5], [-2.0, 3.0]])
print(x)
y = sigmoid.forward(x)
print(y)

#---------------------------------------
# backward
dy = np.array([[5, 5], [5, 5]])
dx = sigmoid.backward(dy)
print(dx)