示例#1
0
    def __init__(self,
                 input_size,
                 hidden_size,
                 output_size,
                 weight_init_std=0.01):

        self.params = {}

        # Weights and biases
        self.params['W1'] = weight_init_std * \
            np.random.rand(input_size, hidden_size)
        self.params['b1'] = np.zeros(hidden_size)

        self.params['W2'] = weight_init_std * \
            np.random.rand(hidden_size, hidden_size)
        self.params['b2'] = np.zeros(hidden_size)

        self.params['W3'] = weight_init_std * \
            np.random.rand(hidden_size, output_size)
        self.params['b3'] = np.zeros(output_size)

        # Layers
        self.layers = OrderedDict()
        self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
        self.layers['Relu1'] = Relu()

        self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['Relu2'] = Relu()

        self.layers['Affine3'] = Affine(self.params['W3'], self.params['b3'])
        self.lastLayer = Softmax()
示例#2
0
文件: main.py 项目: huxinran/cnn
def main():
    """
    main func
    """

    text, x, y, char2idx, idx2char = getty()
    T = 100

    config = {
        'dim_hidden': 300,
        'l': T,
        'clip': 5,
        'mu': 0.9,
        'step_size': 0.001
    }

    #np.random.seed(42)
    r = RNN(config)
    r.accept([27])

    ttb = r.sample('f', char2idx, idx2char)
    r.fit(x[:T], y[:T], 100, char2idx, idx2char)
    tta = r.sample('f', char2idx, idx2char)
    print(ttb)
    print(tta)
    print(text[:T])
    return

    (data, label) = cifar()
    N = 10000
    data = np.array(data, dtype=float)[:N, ]
    label = np.array(label)[:N, ]

    data = normalize(data)

    config = {
        'input_shape': [3, 32, 32],
        'mu': 0.9,
        'step_size': 0.000001,
        'step_decay': 0.95
    }

    nn = Net(config)
    conv1 = Conv([3, 3], 6)
    relu1 = Relu()
    conv2 = Conv([3, 3], 32)
    relu2 = Relu()
    pool = MaxPool()
    fc = FC([10])

    nn.add(conv1)
    nn.add(relu1)
    nn.add(pool)
    nn.add(fc)

    print(nn)
    nn.fit(data, label, 200)
示例#3
0
    def __init__(
            self,
            input_dim=(1, 28, 28),
            conv_param={
                'filter_num': 30,
                'filter_size': 5,
                'pad': 0,
                'stride': 1
            },
            hidden_size=100,
            output_size=10,
            weight_init_std=0.01
    ):
        # 畳み込み層のハイパーパラメータ
        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        filter_pad = conv_param['pad']
        filter_stride = conv_param['stride']
        input_size = input_dim[1]

        conv_output_size = (input_size - filter_size + 2 *
                            filter_pad) / filter_stride + 1
        pool_output_size = int(
            filter_num * (conv_output_size / 2) * (conv_output_size / 2)
        )

        # 重みパラメータ
        self.params = {}
        self.params['W1'] = weight_init_std * \
            np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
        self.params['b1'] = np.zeros(filter_num)

        self.params['W2'] = weight_init_std * \
            np.random.randn(pool_output_size, hidden_size)
        self.params['b2'] = np.zeros(hidden_size)

        self.params['W3'] = weight_init_std * \
            np.random.randn(hidden_size, output_size)
        self.params['b3'] = np.zeros(output_size)

        # レイヤー
        self.layers = OrderedDict()
        self.layers['Conv1'] = Convolution(
            self.params['W1'],
            self.params['b1'],
            conv_param['stride'],
            conv_param['pad']
        )
        self.layers['Relu1'] = Relu()
        self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)

        self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['Relu2'] = Relu()

        self.layers['Affine3'] = Affine(self.params['W3'], self.params['b3'])
        self.last_layer = Softmax()
class TestRelu(unittest.TestCase):
    def setUp(self):
        self.relu = Relu()

    def test_forward(self):
        x = np.array([[1.0, -0.5], [-2.0, 3.0]])
        assert_array_equal(np.array([[1., 0.], [0., 3.]]),
                           self.relu.forward(x))

    def test_backward(self):
        x = np.array([[1.0, -0.5], [-2.0, 3.0]])
        assert_array_equal(np.array([[1., 0.], [0., 3.]]),
                           self.relu.backward(self.relu.forward(x)))
示例#5
0
文件: relu_test.py 项目: huxinran/cnn
    def test_forward(self):
        l = Relu()
        l.accept([2, 3])

        x = np.array([[1, 2, 3, 
                      -4, -5, -6]])
        y = l.forward(x)
        self.assertTrue(np.allclose(y, [[1, 2, 3, 0, 0, 0]]))
示例#6
0
文件: relu_test.py 项目: huxinran/cnn
    def test_backward(self):
        l = Relu()
        l.accept([2, 2])
        l.x = np.array([[1, -1], [-1, 1]])

        dy = np.array([[1, 1], [-1, 2]])
        
        dx = l.backward(dy)

        self.assertTrue(np.allclose(dx, [[1, 0], [0, 2]]))
示例#7
0
    def __init__(self, input_dim, sizes):
        self.layers = []
        sizes.insert(0, input_dim)

        for i in range(1, len(sizes) - 1):
            layer = Dense(sizes[i], sizes[i - 1], Relu())
            self.layers.append(layer)

        l = len(sizes)
        layer = Dense(sizes[l - 1], sizes[l - 2], Sigmoid())
        self.layers.append(layer)
    def __init__(self,
                 input_size,
                 hidden_size,
                 output_size,
                 weight_init_std=0.01):
        self.params = {}
        self.params['W1'] = weight_init_std * np.random.randn(
            input_size, hidden_size)
        self.params['b1'] = np.zeros(hidden_size)
        self.params['W2'] = weight_init_std * np.random.randn(
            hidden_size, output_size)
        self.params['b2'] = np.zeros(output_size)

        self.layers = OrderedDict()
        self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
        self.layers['Relu1'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])

        self.lastLayer = SoftmaxWithLoss()
示例#9
0
validX = (validX - mu) / (std + np.finfo(np.float32).eps)
testX = (testX - mu) / (std + np.finfo(np.float32).eps)

#%% 可视化 mnist
# https://colah.github.io/posts/2014-10-Visualizing-MNIST/

#%%  创建模型
model = NeuralNetwork()
# 将神经网络看成由若干完成特定计算的层组成,数据经过这些层完成前馈运算;
# 根据求导链式法则的启示,可以利用误差的反向传播计算代价函数对模型参数的偏导(即梯度)。

# 任务1:实现Relu类中的forward和backward方法
# 任务2:实现Softmax类中的forward方法

model.layers.append(Linear(n_feature, 60, lr))
model.layers.append(Relu())
model.layers.append(Linear(60, 10, lr))
model.layers.append(Softmax())

#%% 训练
# stochastic gradient descent
batchsize = 100
trainloss = []
validloss = []
snapshot = []

for i in range(n_iter):
    # 每一轮迭代前,产生一组新的序号(目的在于置乱数据)
    idxs = np.random.permutation(trainX.shape[0])

    for j in range(0, trainX.shape[0], batchsize):
示例#10
0
        break
    else:
        print("illegal input from keyboard.")
####

####
while True:
    sig_relu = input("sigmoid or relu? s/r > ")
    if sig_relu == "s":
        npyfile = 'wb_learn_s.npy'
        srclass = Sigmoid()
        srclass_c = Sigmoid()
        break
    elif sig_relu == "r":
        npyfile = 'wb_learn_r.npy'
        srclass = Relu()
        srclass_c = Relu()
        break
    else:
        print("illegal input from keyboard.")
####
sys.stdout.write("Now loading...")
sys.stdout.flush()

dlist = inclass.inputer(itbool)[0]
#print(dlist)
anslist = inclass.inputer(itbool)[1]
#print(ylist)
trainsize = dlist.shape[0]  #N

midnum = 55  #middle_node_of_number
示例#11
0
文件: relu_test.py 项目: huxinran/cnn
 def test_repr(self):
     l = Relu()
     pass
示例#12
0
文件: relu_test.py 项目: huxinran/cnn
 def test_accept(self):
     l = Relu()
     self.assertTrue(l.accept(100))
     pass
示例#13
0
文件: relu_test.py 项目: huxinran/cnn
 def test_init(self):
     l = Relu()
     pass
示例#14
0
文件: main.py 项目: JialiangHan/CNN
import numpy as np

from Convolution import Conv2D
from FC import Fully_Connect
from Pooling import Max_pooling, AVG_pooling
from Softmax import Softmax
from load_mnist import load_mnist
from relu import Relu

images, labels = load_mnist('.\\mnist')
test_images, test_labels = load_mnist('.\\mnist', 't10k')

batch_size = 100
conv1 = Conv2D([batch_size, 28, 28, 1], 12, 5, 1)
relu1 = Relu(conv1.output_shape)
pool1 = Max_pooling(relu1.output_shape)
conv2 = Conv2D(pool1.output_shape, 24, 3, 1)
relu2 = Relu(conv2.output_shape)
pool2 = Max_pooling(relu2.output_shape)
fc = Fully_Connect(pool2.output_shape, 10)
sf = Softmax(fc.output_shape)

for epoch in range(20):
    learning_rate = 1e-4
    # training
    for i in range(int(images.shape[0] / batch_size)):
        # forward
        img = images[i * batch_size:(i + 1) * batch_size].reshape(
            [batch_size, 28, 28, 1])
        label = labels[i * batch_size:(i + 1) * batch_size]
        conv1_out = conv1.forward(img)
 def setUp(self):
     self.relu = Relu()
示例#16
0
from adam import Adam

inclass = Inputer()
dlist = inclass.inputer(True)[0]
#print(dlist)
anslist = inclass.inputer(True)[1]
#print(ylist)
trainsize = dlist.shape[0] #N

sig_relu = input("sigmoid or relu? s/r > ")
####
if sig_relu == "s":
    srclass = Sigmoid()
    npyfile = 'wb_learn_s.npy'
elif sig_relu == "r":
    srclass = Relu()
    npyfile = 'wb_learn_r.npy'
####

from three_nn import Three_nn
threeclass = Three_nn()

midnum = 55 #middle_node_of_number

[w_one, w_two, b_one, b_two, gamma_mid, beta_mid, gamma_out, beta_out,\
 running_mean_mid, running_var_mid, running_mean_out, running_var_out] = numpy.load(npyfile)

normclass_mid = Batchnorm(gamma_mid, beta_mid, 0.9, running_mean_mid, running_var_mid)
normclass_out = Batchnorm(gamma_out, beta_out, 0.9, running_mean_out, running_var_out)
loopnum = 10000
truenum = 0