Ejemplo n.º 1
0
    indata =  tuple(data[:20])
    outdata = tuple(data[20:])
    testDataSet.addSample(indata,outdata)


print "NORMALIZING DATASET"
mx = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=numpy.float64)
for inp, out in dataSet:
    for i in range(0, len(inp)):
        mx[i]+=inp[i]
for i in range(0, len(mx)):
    mx[i] = mx[i] / len(dataSet)
nfactor = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=numpy.float64)
for i in range(0, len(dataSet)):
    for j in range(0, len(nfactor)):
        t = dataSet.getSample(i)[0][j]
        nfactor[j] += (t - mx[j]) * (t - mx[j])
for i in range(0, len(nfactor)):
    nfactor[i] = nfactor[i]/len(dataSet)
    nfactor[i] = math.sqrt(nfactor[i])
print nfactor
for inp, out in dataSet:
    for i in range(0, len(inp)):
        if(nfactor[i]!=0):
            inp[i] = (inp[i] - mx[i])/nfactor[i]
    #print inp

print "TRAINING OUR NEURAL NET"
neuralNet = buildNetwork(20, 6, 1)
trainer = BackpropTrainer(neuralNet, dataSet)
t=""
Ejemplo n.º 2
0
                outputMin = output
trains = []
tests = []
epochsNums = []
parameters = range(1, 200)
for i in parameters:
    tstdata, trndata = ds.splitWithProportion( 0.25 )
    hidden_size = i
    numOfEpocs = 10
    """
    n = buildNetwork( 1, hidden_size, 1, bias = True )
    
    
    """
    
    inLayer = LinearLayer(len(ds.getSample(0)[0]))
    hiddenLayer = SigmoidLayer(hidden_size)
    outLayer = LinearLayer(len(ds.getSample(0)[1]))
    n = FeedForwardNetwork()
    n.addInputModule(inLayer)
    n.addModule(hiddenLayer)
    b = BiasUnit()
    n.addModule(b)
    n.addOutputModule(outLayer)
    in_to_hidden = FullConnection(inLayer, hiddenLayer)
    hidden_to_out = FullConnection(hiddenLayer, outLayer)
    b_to_hidden = FullConnection(b, hiddenLayer)
    b_to_out = FullConnection(b, outLayer)
    
    n.addConnection(in_to_hidden)
    n.addConnection(hidden_to_out)
# split the data into testing and training data
tstdata_temp, trndata_temp = alldata.splitWithProportion(0.15)

# small bug with _convertToOneOfMany function.  This fixes that
tstdata = ClassificationDataSet(num_features,1,nb_classes=2)
for n in xrange(0, tstdata_temp.getLength()):
    tstdata.addSample(tstdata_temp.getSample(n)[0], tstdata_temp.getSample(n)[1])

trndata = ClassificationDataSet(num_features,1,nb_classes=2)
for n in xrange(0,trndata_temp.getLength()):
    trndata.addSample(trndata_temp.getSample(n)[0],trndata_temp.getSample(n)[1])

valdata = ClassificationDataSet(num_features,1,nb_classes=2)
for n in xrange(0,stimalldata.getLength()):
    valdata.addSample(stimalldata.getSample(n)[0],stimalldata.getSample(n)[1])

# organizes dataset for pybrain
trndata._convertToOneOfMany()
tstdata._convertToOneOfMany()

valdata._convertToOneOfMany()

# sample printouts before running classifier
print "Number of training patterns: ", len(trndata)
print "Input and output dimensions: ", trndata.indim, trndata.outdim
print "First sample (input, target, class):"
print trndata['input'][0], trndata['target'][0], trndata['class'][0]

# build the ANN
# 2 hidden layers (4 layers total)