def test_hidden_error(self): cLayer = random.randint(10,15) cNextLayer = random.randint(10,15) ixPcpt = random.randint(0,cLayer-1) listDblExpected = randlist(-3.0,3.0,cNextLayer) listDblTarget,listDblDelta = pairwise_factors(listDblExpected) listPcpt = [] for ix in xrange(cNextLayer): listDblW = randlist(-3.0,3.0,cLayer) listDblW[ixPcpt] = listDblTarget[ix] listPcpt.append(nn.Perceptron(listDblW, random.random()*100.0, ix)) layer = nn.NeuralNetLayer(cLayer, listPcpt) pcpt = nn.Perceptron(randlist(-1.0,1.0,cLayer), 0.0, ixPcpt) dblResult = nn.hidden_error(listDblDelta, pcpt, layer) self.assertAlmostEqual(sum(listDblExpected), dblResult, 4)
def load_net(sFileName): """The file must start with & and then a single integer of the number of input units into the first hidden layer; & must always be followed by an integer and then perceptron lines""" lstLayers = [] reader = csv.reader(open(sFileName, "r")) line = next(reader) lstPcpt = [] line = next(reader) cInputs = int(line[-1]) cInputsFirst = cInputs for line in reader: if line[-1] is '&': lstLayers.append(nn.NeuralNetLayer(cInputs, lstPcpt)) lstPcpt = [] elif len(line) is 1: cInputs = int(line[-1]) else: pcptLine = map(lambda x: float(x), line) lstPcpt.append( nn.Perceptron(pcptLine[:-2], pcptLine[-2], int(pcptLine[-1]))) lstLayers.append(nn.NeuralNetLayer(cInputs, lstPcpt)) #nn.print_net(nn.NeuralNet(cInputsFirst, lstLayers)) return nn.NeuralNet(cInputsFirst, lstLayers)
def test_update_layer(self): cOutput = random.randint(5,10) cInput = random.randint(5,10) listDblOutputs = randlist(-1.0,1.0,cOutput) listDblError = [] for dblOut in listDblOutputs: listDblError.append(1.0/(1.0 - dblOut)) listDblInput = randlist(-1.0,1.0,cInput) listPcpt = [] listListDblWOrig = [] for ixPcpt in xrange(cOutput): listDblW = randlist(-1.0,1.0,cInput) listListDblWOrig.append(listDblW) listPcpt.append(nn.Perceptron(list(listDblW),0.0,ixPcpt)) listDblDelta = nn.layer_deltas(listDblOutputs,listDblError) layer = nn.NeuralNetLayer(cInput,listPcpt) dblLearningRate = random.random() nn.update_layer(layer, listDblInput, listDblDelta, dblLearningRate) listZip = zip(layer.listPcpt,listListDblWOrig,listDblOutputs) for pcpt,listDblWOrig,dblOut in listZip: listZipInner = zip(pcpt.listDblW,listDblWOrig,listDblInput) for dblW,dblWOrig,dblIn in listZipInner: dblExpectedDiff = dblLearningRate*dblIn*dblOut self.assertAlmostEqual(dblExpectedDiff, dblW - dblWOrig, 4)
def build_net(listCLayer): listLayer = [] for cInput,cPcpt in zip(listCLayer[:-1],listCLayer[1:]): listPcpt = [] for ixPcpt in xrange(cPcpt): listDblW = randlist(-1.0, 1.0, cInput) listPcpt.append(nn.Perceptron(listDblW,0.0,ixPcpt)) listLayer.append(nn.NeuralNetLayer(cInput,listPcpt)) return nn.NeuralNet(listCLayer[0], listLayer)
def test_pcpt_activation(self): cInput = random.randint(10,50) listDblTarget = randlist(-0.05,0.05,cInput) listDblW,listDblInput = pairwise_factors(listDblTarget,-0.05,0.05) dblExpected = sum(listDblTarget) dblW0 = 0.1*(random.random() - 0.5) pcpt = nn.Perceptron(listDblW, dblW0, random.randint(0,1000)) dblResult = nn.pcpt_activation(pcpt, listDblInput) self.assertAlmostEqual(dblExpected, logit(dblResult) - dblW0)
def test_hidden_layer_error(self): cOutput = random.randint(5,10) cNextLayerSize = random.randint(5,10) dblSum = random.random() - 0.5 dblDeltaSum = 3.0*(random.random() - 0.5) listPcptUpstream = [nn.Perceptron([], 0.0, ixPcpt) for ixPcpt in xrange(cOutput)] listListDbl = [] for ixUp in xrange(cOutput): listListDbl.append(randlist_for_sum(dblSum, cNextLayerSize, 2.0)) listPcptDownstream = [] for ixPcpt in xrange(cNextLayerSize): listDblW = [listDbl[ixPcpt] for listDbl in listListDbl] listPcptDownstream.append(nn.Perceptron(listDblW,0.0,ixPcpt)) layerUp = nn.NeuralNetLayer(0,listPcptUpstream) layerDown = nn.NeuralNetLayer(cOutput,listPcptDownstream) listDblDelta = [1.0 for _ in xrange(cNextLayerSize)] listDblError = nn.hidden_layer_error(layerUp, listDblDelta, layerDown) self.assertAlmostEqual(dblSum*cOutput, sum(listDblError),5)
def test_pcpt_activation(self): cInput = random.randint(5,10) dblW0 = random.random()*0.5 listDblTarget = randlist(-1.0, 1.0, cInput) listDblX,listDblInput = pairwise_factors(listDblTarget,-1.0,1.0) pcpt = nn.Perceptron(listDblX, dblW0, 0) dblActivation = nn.pcpt_activation(pcpt,listDblInput) dblExpected = sum(listDblTarget) dblActual = logit(dblActivation) - dblW0 self.assertAlmostEqual(dblExpected, dblActual, 4)
def copy_net(net): """returns a separate copy of net""" newListLayer = [] newCInputsFirst = copy.copy(net.cInputs) for layer in net.listLayer: newListPcpt = [] for pcpt in layer.listPcpt: newListDblW = copy.copy(pcpt.listDblW) newDblW0 = copy.copy(pcpt.dblW0) newPcptIx = copy.copy(pcpt.ix) newListPcpt.append(nn.Perceptron(newListDblW, newDblW0, newPcptIx)) newCInputs = copy.copy(layer.cInputs) newListLayer.append(nn.NeuralNetLayer(newCInputs, newListPcpt)) return nn.NeuralNet(newCInputsFirst, newListLayer)
def test_feed_forward_layer(self): listPcpt = [] cInput = random.randint(5,10) cPcpt = random.randint(5,10) listDblTarget = randlist(-0.75, 0.75, cPcpt) listDblInput = randlist(-1.0,1.0,cInput) for ix,dblTarget in enumerate(listDblTarget): listDblProduct = randlist_for_sum(dblTarget, cInput+1, 0.5) listDblW = [] for dblProduct,dblInput in zip(listDblProduct,listDblInput): listDblW.append(dblProduct/dblInput) listPcpt.append(nn.Perceptron(listDblW, listDblProduct[-1] ,ix)) layer = nn.NeuralNetLayer(cInput, listPcpt) listDblOutput = nn.feed_forward_layer(layer, listDblInput) listDblLogit = [logit(dbl) for dbl in listDblOutput] for dblTarget,dblLogit in zip(listDblTarget,listDblLogit): self.assertAlmostEqual(dblTarget,dblLogit, 4)
def test_update_pcpt(self): dblLearningRate = random.random() + 0.5 cWeight = random.randint(10,20) listDblInput = randlist(-0.5,0.5,cWeight) listDblW = randlist(-0.5,0.5,cWeight) dblW0 = random.random() - 0.5 dblActivation = random.random()*0.5 + 0.25 dblError = random.random() - 0.5 dblDelta = nn.compute_delta(dblActivation,dblError) ixPcpt = random.randint(0,100) pcpt = nn.Perceptron(list(listDblW), dblW0, ixPcpt) nn.update_pcpt(pcpt, listDblInput, dblDelta, dblLearningRate) self.assertEqual(len(listDblW), len(pcpt.listDblW)) dblProductBase = dblLearningRate*dblActivation*dblError for dblW,dblIn,dblWOrig in zip(pcpt.listDblW,listDblInput,listDblW): dblProduct = dblProductBase*dblIn dblExpected = dblProduct - (dblW - dblWOrig) self.assertAlmostEqual(dblProduct*dblActivation, dblExpected) self.assertAlmostEqual(dblProductBase*dblActivation, dblProductBase - (pcpt.dblW0 - dblW0))
def build_pcpt(cInputs,dblLo=1.0,dblHi=-10): return nn.Perceptron(cInputs, randlist(dblLo,dblHi,cInputs))