Beispiel #1
0
    bpn = BackPropogationNetwork( (1,4,1), lFuncs, 0.4)

    samples = np.zeros(20, dtype=[('x',  float, 1), ('y', float, 1)])
    samples['x'] = np.linspace(0,1,20)
    samples['y'] = np.sin(samples['x']*np.pi)

    input  = np.array([samples['x']]).T
    target = np.array([samples['y']]).T
    max = 100000
    #lnErr = 1e-5
    lnErr = 1e-2

    for i in range(max+1):
        err = 0.0;
        for ii in range(len(input)):
            err += bpn.trainEpoch(npA(input[ii]),npA(target[ii]))
        if i%2500 == 0:
        #if i%20 == 0:
            print "iteration {0}\tError : {1:0.6f}".format(i,err)
            #bpn.NN_cout("Train_"+str(i))
        if err <= lnErr:
            print "Min error reached at {0}".format(i)
            break

    #bpn.setWeights()

    bpn.printWeights()

    plt.figure(figsize=(10,5))
    # Draw real function
    x,y = samples['x'],samples['y']
Beispiel #2
0

    input   = np.array([ [ll[0],ll[1]] for ll in zip(samples['x'],samples['x2']) ])
    input2  = np.array([ [ll[0],ll[1]] for ll in zip(samples2['x'],samples2['x2']) ])
    target = np.array([samples['y']]).T
    target2 = np.array([samples2['y']]).T

    max = 100000
    #lnErr = 1e-5
    #lnErr = 0.2
    lnErr = 5.0

    for i in range(max+1):
        err = 0.0;
        for ii in range(len(input)):
            err += bpn.trainEpoch(npA(input[ii]),npA(target[ii]))
            err += bpn.trainEpoch(npA(input2[ii]),npA(target2[ii]))
        if i%2500 == 0:
        #if i%20 == 0:
            print "iteration {0}\tError : {1:0.6f}".format(i,err)
            #bpn.NN_cout("Train_"+str(i))
        if err <= lnErr:
            print "Min error reached at {0}".format(i)
            break

    #bpn.setWeights()

    bpn.printWeights()
    for i in range(len(input)):
        output = bpn.run(npA(input[i]))
        print "in :"+str(input[i])+", ou : "+str(output)