def load_net(sFileName): """The file must start with & and then a single integer of the number of input units into the first hidden layer; & must always be followed by an integer and then perceptron lines""" lstLayers = [] reader = csv.reader(open(sFileName, "r")) line = next(reader) lstPcpt = [] line = next(reader) cInputs = int(line[-1]) cInputsFirst = cInputs for line in reader: if line[-1] is '&': lstLayers.append(nn.NeuralNetLayer(cInputs, lstPcpt)) lstPcpt = [] elif len(line) is 1: cInputs = int(line[-1]) else: pcptLine = map(lambda x: float(x), line) lstPcpt.append( nn.Perceptron(pcptLine[:-2], pcptLine[-2], int(pcptLine[-1]))) lstLayers.append(nn.NeuralNetLayer(cInputs, lstPcpt)) #nn.print_net(nn.NeuralNet(cInputsFirst, lstLayers)) return nn.NeuralNet(cInputsFirst, lstLayers)
def test_update_layer(self): cOutput = random.randint(5,10) cInput = random.randint(5,10) listDblOutputs = randlist(-1.0,1.0,cOutput) listDblError = [] for dblOut in listDblOutputs: listDblError.append(1.0/(1.0 - dblOut)) listDblInput = randlist(-1.0,1.0,cInput) listPcpt = [] listListDblWOrig = [] for ixPcpt in xrange(cOutput): listDblW = randlist(-1.0,1.0,cInput) listListDblWOrig.append(listDblW) listPcpt.append(nn.Perceptron(list(listDblW),0.0,ixPcpt)) listDblDelta = nn.layer_deltas(listDblOutputs,listDblError) layer = nn.NeuralNetLayer(cInput,listPcpt) dblLearningRate = random.random() nn.update_layer(layer, listDblInput, listDblDelta, dblLearningRate) listZip = zip(layer.listPcpt,listListDblWOrig,listDblOutputs) for pcpt,listDblWOrig,dblOut in listZip: listZipInner = zip(pcpt.listDblW,listDblWOrig,listDblInput) for dblW,dblWOrig,dblIn in listZipInner: dblExpectedDiff = dblLearningRate*dblIn*dblOut self.assertAlmostEqual(dblExpectedDiff, dblW - dblWOrig, 4)
def build_net(listCLayer): listLayer = [] for cInput,cPcpt in zip(listCLayer[:-1],listCLayer[1:]): listPcpt = [] for ixPcpt in xrange(cPcpt): listDblW = randlist(-1.0, 1.0, cInput) listPcpt.append(nn.Perceptron(listDblW,0.0,ixPcpt)) listLayer.append(nn.NeuralNetLayer(cInput,listPcpt)) return nn.NeuralNet(listCLayer[0], listLayer)
def test_hidden_layer_error(self): cOutput = random.randint(5,10) cNextLayerSize = random.randint(5,10) dblSum = random.random() - 0.5 dblDeltaSum = 3.0*(random.random() - 0.5) listPcptUpstream = [nn.Perceptron([], 0.0, ixPcpt) for ixPcpt in xrange(cOutput)] listListDbl = [] for ixUp in xrange(cOutput): listListDbl.append(randlist_for_sum(dblSum, cNextLayerSize, 2.0)) listPcptDownstream = [] for ixPcpt in xrange(cNextLayerSize): listDblW = [listDbl[ixPcpt] for listDbl in listListDbl] listPcptDownstream.append(nn.Perceptron(listDblW,0.0,ixPcpt)) layerUp = nn.NeuralNetLayer(0,listPcptUpstream) layerDown = nn.NeuralNetLayer(cOutput,listPcptDownstream) listDblDelta = [1.0 for _ in xrange(cNextLayerSize)] listDblError = nn.hidden_layer_error(layerUp, listDblDelta, layerDown) self.assertAlmostEqual(dblSum*cOutput, sum(listDblError),5)
def copy_net(net): """returns a separate copy of net""" newListLayer = [] newCInputsFirst = copy.copy(net.cInputs) for layer in net.listLayer: newListPcpt = [] for pcpt in layer.listPcpt: newListDblW = copy.copy(pcpt.listDblW) newDblW0 = copy.copy(pcpt.dblW0) newPcptIx = copy.copy(pcpt.ix) newListPcpt.append(nn.Perceptron(newListDblW, newDblW0, newPcptIx)) newCInputs = copy.copy(layer.cInputs) newListLayer.append(nn.NeuralNetLayer(newCInputs, newListPcpt)) return nn.NeuralNet(newCInputsFirst, newListLayer)
def test_hidden_error(self): cLayer = random.randint(10,15) cNextLayer = random.randint(10,15) ixPcpt = random.randint(0,cLayer-1) listDblExpected = randlist(-3.0,3.0,cNextLayer) listDblTarget,listDblDelta = pairwise_factors(listDblExpected) listPcpt = [] for ix in xrange(cNextLayer): listDblW = randlist(-3.0,3.0,cLayer) listDblW[ixPcpt] = listDblTarget[ix] listPcpt.append(nn.Perceptron(listDblW, random.random()*100.0, ix)) layer = nn.NeuralNetLayer(cLayer, listPcpt) pcpt = nn.Perceptron(randlist(-1.0,1.0,cLayer), 0.0, ixPcpt) dblResult = nn.hidden_error(listDblDelta, pcpt, layer) self.assertAlmostEqual(sum(listDblExpected), dblResult, 4)
def test_feed_forward_layer(self): listPcpt = [] cInput = random.randint(5,10) cPcpt = random.randint(5,10) listDblTarget = randlist(-0.75, 0.75, cPcpt) listDblInput = randlist(-1.0,1.0,cInput) for ix,dblTarget in enumerate(listDblTarget): listDblProduct = randlist_for_sum(dblTarget, cInput+1, 0.5) listDblW = [] for dblProduct,dblInput in zip(listDblProduct,listDblInput): listDblW.append(dblProduct/dblInput) listPcpt.append(nn.Perceptron(listDblW, listDblProduct[-1] ,ix)) layer = nn.NeuralNetLayer(cInput, listPcpt) listDblOutput = nn.feed_forward_layer(layer, listDblInput) listDblLogit = [logit(dbl) for dbl in listDblOutput] for dblTarget,dblLogit in zip(listDblTarget,listDblLogit): self.assertAlmostEqual(dblTarget,dblLogit, 4)