Пример #1
0
def inputLayerTest():
    sample = data.MnistData(SAMPLE_IMAGE, 0)
    inputLayer = layer.InputLayer(sample.getSize())
    image = sample.getImage()
    inputLayer.setInput(image)
    inputLayer.confirmParameters()
    return inputLayer
Пример #2
0
    def __init__(self,
                 inputDim,
                 convFilterNum,
                 convFilterDim,
                 convImageSize,
                 poolFilterDim,
                 poolImageSize,
                 hiddenDim,
                 outputDim,
                 seed=1):

        #畳み込み+プーリング付き3層ニューラルネット
        self.inputLayer = layer.InputLayer(inputDim)
        self.convLayer = layer.ConvolutionalLayer(convFilterNum, convFilterDim,
                                                  convImageSize,
                                                  self.inputLayer, seed)
        self.poolLayer = layer.PoolingLayer(poolFilterDim, poolImageSize,
                                            self.convLayer)
        self.hiddenLayer = layer.HiddenLayer(hiddenDim, self.poolLayer, seed)
        self.outputLayer = layer.OutputLayer(outputDim, self.hiddenLayer, seed)

        self.hiddenLayer.setActivator(util.relu)
        self.hiddenLayer.setBackPropagator(learning.backPropReLU)

        # 正解データ
        self.answer = np.zeros(outputDim)
Пример #3
0
    def __init__(self, inputDim, hiddenDim, outputDim, seed=1):

        # 三層ニューラルネットワーク
        self.inputLayer = layer.InputLayer(inputDim)
        self.hiddenLayer = layer.HiddenLayer(hiddenDim, self.inputLayer, seed)
        self.outputLayer = layer.OutputLayer(outputDim, self.hiddenLayer, seed)
        # 正解データ
        self.answer = np.zeros(outputDim)
Пример #4
0
    def __init__(self, inputDim, hiddenDim, outputDim, seed=1):

        # 三層ニューラルネットワーク
        self.inputLayer = layer.InputLayer(inputDim)
        self.hiddenLayer = layer.HiddenLayer(hiddenDim, self.inputLayer, seed)
        self.outputLayer = layer.OutputLayer(outputDim, self.hiddenLayer, seed)

        self.hiddenLayer.setActivator(util.relu)
        self.hiddenLayer.setBackPropagator(learning.backPropReLU)

        # 正解データ
        self.answer = np.zeros(outputDim)
Пример #5
0
    def __init__(self, nodes, classification=False):   # [M,H1,H2,...,N]
        self.nodes = nodes
        lastLayer = layer.InputLayer()
        for l in range(len(nodes) - 1):
            opt = op.Adam((nodes[l], nodes[l + 1]))
            lastLayer = layer.AffineLayer((nodes[l], nodes[l + 1]), lastLayer, opt)
            if l < len(nodes) - 2:
                act = ac.ReLU()
            elif classification:
                act = ac.Softmax()
            else:
                act = ac.Identity()
            lastLayer = layer.ActLayer(nodes[l + 1], lastLayer, act)

        self.outputLayer = lastLayer
Пример #6
0
 def __init__(self, layers):
     # 输入一个list,存放每一个hiddenLayer的节点数
     self.network = []
     self.network.append(layer.InputLayer())  # 输入层
     i = 0
     while i < len(layers):
         self.network.append(
             layer.HiddenLayer(layers[i], self.network[i], None))
         i = i + 1
     self.network.append(layer.OutputLayer(self.network[i]))
     i = 0
     while i < len(self.network) - 1:
         self.network[i].next_layer = self.network[i + 1]
         if i != 0:
             self.network[i].initialize()  # 对中间层重新进行初始化
         i = i + 1
     self.network[-1].initialize()
Пример #7
0
def inputConvTest():
    inputM = ioex.InputManager()
    outputM = ioex.OutputManager()
    trainingData = inputM.getMnistTrainingData()
    sample = trainingData.getImageBatch(5)
    outputM.showPictureFromBatch(sample, (28, 28))

    inputLayer = layer.InputLayer(28 * 28)
    conv = layer.ConvolutionalLayer(4, 3, inputLayer)
    outputLayer = layer.OutputLayer(10, conv)

    #conv.setWeight(np.array([[1,1,1,1,-8,1,1,1,1],[1,0,1,0,-4,0,1,0,1],[0,1,0,1,-4,1,0,1,0],[0,0,0,0,1,0,0,0,0]]))
    inputLayer.setInputBatch(sample)

    for i in range(0, 10000):
        outputLayer.calculate()
        if i % 1000 == 0:
            outputM.showPictureFromBatch(conv.getOutput(), (28 * 4, 28))
            print(conv.getWeight())
        outputLayer.update(trainingData.getAnswerVecotrBatch(5))
Пример #8
0
    def __init__(self, const):
        self.const = const
        lastLayer = layer.InputLayer()
        for l in range(len(const)):
            layerInfo = const[l]
            if layerInfo['type'] == 'Aff':
                if layerInfo['opt'] == 'Adam':
                    opt = op.Adam(layerInfo['size'])
                elif layerInfo['opt'] == 'SGD':
                    opt = op.SGD()

                lastLayer = layer.AffineLayer(layerInfo['size'], lastLayer, opt)
            
            elif layerInfo['type'] == 'Act':
                if layerInfo['func'] == 'ReLU':
                    act = ac.ReLU()
                elif layerInfo['func'] == 'Sigmoid':
                    act = ac.Sigmoid()
                elif layerInfo['func'] == 'Softmax':
                    act = ac.Softmax()

                lastLayer = layer.ActLayer(lastLayer.outNode, lastLayer, act)

        self.outputLayer = lastLayer
Пример #9
0
import ioex
import layer
import util

print("### task1 ###")

# 入出力準備
inputM = ioex.InputManager()
outputM = ioex.OutputManager()
testingData = inputM.getMnistTestingData()

# 3層ニューラルネットワーク
inputLayer = layer.InputLayer(28 * 28)
hiddenLayer = layer.HiddenLayer(50, inputLayer, 1, 1)
outputLayer = layer.OutputLayer(10, hiddenLayer, 1, 1)

# ニューラルネットワークの設定
#outputLayer.setActivator(util.correctedSoftmax)

#入出力
loop = True
while (loop):
    # 入力
    targetNum = inputM.selectNumber()
    sample = testingData.getSingleData(targetNum)
    inputLayer.setInput(sample.getImage())

    # 出力
    result = outputLayer.calculate()
    outputM.printMaxLikelihood(result)
    print(result)
Пример #10
0
 def _set_layers(self):
     last_layer = self.depth - 1
     self.layers.append(layer.InputLayer(self.form[0]))
     for i in range(1, last_layer):
         self.layers.append(layer.HiddenLayer(self.form[i], [], []))
     self.layers.append(layer.OutputLayer(self.form[last_layer], [], []))
Пример #11
0
                       [43, 2, 3, 4, 5], [44, 2, 3, 4, 5], [11, 2, 3, 4, 5],
                       [12, 2, 3, 4, 5], [13, 2, 3, 4, 5], [14, 2, 3, 4, 5],
                       [21, 2, 3, 4, 5], [22, 2, 3, 4, 5], [23, 2, 3, 4, 5],
                       [24, 2, 3, 4, 5], [31, 2, 3, 4, 5], [32, 2, 3, 4, 5],
                       [33, 2, 3, 4, 5], [34, 2, 3, 4, 5], [41, 2, 3, 4, 5],
                       [42, 2, 3, 4, 5], [43, 2, 3, 4, 5], [44, 2, 3, 4, 5],
                       [11, 2, 3, 4, 5], [12, 2, 3, 4, 5], [13, 2, 3, 4, 5],
                       [14, 2, 3, 4, 5], [21, 2, 3, 4, 5], [22, 2, 3, 4, 5],
                       [23, 2, 3, 4, 5], [24, 2, 3, 4, 5], [31, 2, 3, 4, 5],
                       [32, 2, 3, 4, 5], [33, 2, 3, 4, 5], [34, 2, 3, 4, 5],
                       [41, 2, 3, 4, 5], [42, 2, 3, 4, 5], [43, 2, 3, 4, 5],
                       [44, 2, 3, 4, 5]])

#conv.update(dEdOutTest)

inputLayer = layer.InputLayer(64)
inputLayer.normalization = 1
pooling = layer.PoolingLayer(2, (16, 4), inputLayer)
inputLayer.setInputBatch(dEdOutTest)

result = pooling.calculate()
print(result)

pooling.update(result)
"""
t1 = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
t2 = np.array([1,2,9])

print(t1.reshape(2,2,3)*t2)
print(t1.reshape(2,2,3)==t2)
Пример #12
0
import net
import layer
import random
import neuron
import sys
import time

print "Initialising the training data (golden standard)"
netInputVector = [[0.05, 0.1], [0.1, 0.2]]
netOutputVector = [[0.01, 0.99], [0.04, 0.99]]

print "Setting up the net"
net = net.Net()
inputLayer = layer.InputLayer(2, net, neuron.SigmoidNeuron)
net.setInputLayer(inputLayer)
hiddenLayer1 = layer.HiddenLayer(3, net, neuron.SigmoidNeuron)
net.addHiddenLayer(hiddenLayer1)
hiddenLayer2 = layer.HiddenLayer(5, net, neuron.SigmoidNeuron)
net.addHiddenLayer(hiddenLayer2)
outputLayer = layer.OutputLayer(2, net, neuron.SigmoidNeuron)
net.setOutputLayer(outputLayer)

print "Training the net"
#for index in range(0, len(netInputVector)):
#	net.inputLayer.setActivationVector(netInputVector[index])
#	net.perform()
#	error = net.outputLayer.error(netOutputVector[index])
#	epoch = 1
#	while error > 0.01:
#		net.backpropagate(netOutputVector[index])
#		net.perform()