def __init__(self, inputDim, convFilterNum, convFilterDim, convImageSize, poolFilterDim, poolImageSize, hiddenDim, outputDim, seed=1): #畳み込み+プーリング付き3層ニューラルネット self.inputLayer = layer.InputLayer(inputDim) self.convLayer = layer.ConvolutionalLayer(convFilterNum, convFilterDim, convImageSize, self.inputLayer, seed) self.poolLayer = layer.PoolingLayer(poolFilterDim, poolImageSize, self.convLayer) self.hiddenLayer = layer.HiddenLayer(hiddenDim, self.poolLayer, seed) self.outputLayer = layer.OutputLayer(outputDim, self.hiddenLayer, seed) self.hiddenLayer.setActivator(util.relu) self.hiddenLayer.setBackPropagator(learning.backPropReLU) # 正解データ self.answer = np.zeros(outputDim)
def __init__(self, inputDim, hiddenDim, outputDim, seed=1): # 三層ニューラルネットワーク self.inputLayer = layer.InputLayer(inputDim) self.hiddenLayer = layer.HiddenLayer(hiddenDim, self.inputLayer, seed) self.outputLayer = layer.OutputLayer(outputDim, self.hiddenLayer, seed) # 正解データ self.answer = np.zeros(outputDim)
def __init__(self, inputDim, hiddenDim, outputDim, seed=1): # 三層ニューラルネットワーク self.inputLayer = layer.InputLayer(inputDim) self.hiddenLayer = layer.HiddenLayer(hiddenDim, self.inputLayer, seed) self.outputLayer = layer.OutputLayer(outputDim, self.hiddenLayer, seed) self.hiddenLayer.setActivator(util.relu) self.hiddenLayer.setBackPropagator(learning.backPropReLU) # 正解データ self.answer = np.zeros(outputDim)
def __init__(self, layers): # 输入一个list,存放每一个hiddenLayer的节点数 self.network = [] self.network.append(layer.InputLayer()) # 输入层 i = 0 while i < len(layers): self.network.append( layer.HiddenLayer(layers[i], self.network[i], None)) i = i + 1 self.network.append(layer.OutputLayer(self.network[i])) i = 0 while i < len(self.network) - 1: self.network[i].next_layer = self.network[i + 1] if i != 0: self.network[i].initialize() # 对中间层重新进行初始化 i = i + 1 self.network[-1].initialize()
import ioex import layer import util print("### task1 ###") # 入出力準備 inputM = ioex.InputManager() outputM = ioex.OutputManager() testingData = inputM.getMnistTestingData() # 3層ニューラルネットワーク inputLayer = layer.InputLayer(28 * 28) hiddenLayer = layer.HiddenLayer(50, inputLayer, 1, 1) outputLayer = layer.OutputLayer(10, hiddenLayer, 1, 1) # ニューラルネットワークの設定 #outputLayer.setActivator(util.correctedSoftmax) #入出力 loop = True while (loop): # 入力 targetNum = inputM.selectNumber() sample = testingData.getSingleData(targetNum) inputLayer.setInput(sample.getImage()) # 出力 result = outputLayer.calculate() outputM.printMaxLikelihood(result) print(result)
def _set_layers(self): last_layer = self.depth - 1 self.layers.append(layer.InputLayer(self.form[0])) for i in range(1, last_layer): self.layers.append(layer.HiddenLayer(self.form[i], [], [])) self.layers.append(layer.OutputLayer(self.form[last_layer], [], []))
def inputToHiddenTest(): inputLayer = inputLayerTest() hiddenLayer = layer.HiddenLayer(9, inputLayer) hiddenLayer.setWeight(SAMPLE_WEIGHT) hiddenLayer.setShift(SAMPLE_SHIFT) print(hiddenLayer.calculate())
def hiddenLayerTest(): prev = layer.Layer(10) hiddenLayer = layer.HiddenLayer(5, prev) hiddenLayer.confirmParameters()
import net import layer import random import neuron import sys import time print "Initialising the training data (golden standard)" netInputVector = [[0.05, 0.1], [0.1, 0.2]] netOutputVector = [[0.01, 0.99], [0.04, 0.99]] print "Setting up the net" net = net.Net() inputLayer = layer.InputLayer(2, net, neuron.SigmoidNeuron) net.setInputLayer(inputLayer) hiddenLayer1 = layer.HiddenLayer(3, net, neuron.SigmoidNeuron) net.addHiddenLayer(hiddenLayer1) hiddenLayer2 = layer.HiddenLayer(5, net, neuron.SigmoidNeuron) net.addHiddenLayer(hiddenLayer2) outputLayer = layer.OutputLayer(2, net, neuron.SigmoidNeuron) net.setOutputLayer(outputLayer) print "Training the net" #for index in range(0, len(netInputVector)): # net.inputLayer.setActivationVector(netInputVector[index]) # net.perform() # error = net.outputLayer.error(netOutputVector[index]) # epoch = 1 # while error > 0.01: # net.backpropagate(netOutputVector[index]) # net.perform()
def test_hidden_layer(): print "Testing hidden layer..." for k in xrange(5): print "Layer %i..." % k # random parameters input_dim = np.random.randint(1, 100) output_dim = np.random.randint(1, 100) bias = np.random.randint(2) activation = 'tanh' if np.random.randint(2) else 'sigmoid' # hidden layer hidden_layer = layer.HiddenLayer(input_dim, output_dim, bias, activation, 'test') for i in xrange(20): print "%i" % i, # tests for dimension 1, 2, 3 and 4 if i % 4 == 0: input = T.vector('input_test') input_value = np.random.rand(input_dim).astype(floatX) elif i % 4 == 1: input = T.matrix('input_test') input_value = np.random.rand( np.random.randint(100), input_dim ).astype(floatX) elif i % 4 == 2: input = T.tensor3('input_test') input_value = np.random.rand( np.random.randint(50), np.random.randint(50), input_dim ).astype(floatX) else: input = T.tensor4('input_test') input_value = np.random.rand( np.random.randint(20), np.random.randint(20), np.random.randint(20), input_dim ).astype(floatX) output = hidden_layer.link(input) expected_value = np.dot( input_value, hidden_layer.weights.get_value() ) if bias: expected_value += hidden_layer.bias.get_value() if activation == 'tanh': expected_value = np.tanh(expected_value) else: expected_value = expit(expected_value) assert expected_value.shape == input_value.shape[:-1] + (output_dim,) np.testing.assert_array_almost_equal( output.eval({input: input_value}), expected_value ) print "OK" print "All tests ran successfully for Hidden Layer."