Ejemplo n.º 1
0
    print('You must set the no. of neural in each hidden layer !')

# array of transfer function that related with no. of hidden layer
# dim=len(nNeuronArr)-2 (set transfer funciton only neuron in each hidden layer)
transfNetwork = ['hardlim']

# print('\ninput=')
# print(p)
#
# print('\ntarget=')
# print(target)
# singleNN=nn()
# singleNN.inAndOut(p,1,target,0,[2,1,1],['hardlim'])

# Build the network
nnSP1 = skNN.MLP()
nnSP1.build(p, indexSampleP, target, indexSampleTarget, nNodeInEachHid,
            transfNetwork)

nnSP1.setWeightAndBias(1, 30)

percError = 10
# nnSP1.train.unified(loopInInput=10,tol=tol,visualize='text',visualizeStep='none')
# nnSP1.train.unified(loopInInput=10,tol=tol,visualize='graph',visualizeStep='click')
nnSP1.train.unified(loopInInput=10,
                    tol=percError,
                    visualize='graph',
                    visualizeStep=0.05)

# --------------------- Try another input -------------------------
# # input of network
Ejemplo n.º 2
0
# plt.plot(p,target,'or')
# plt.show()

# network structure
# nHidLayer=1
nNodeInEachHid = np.array([[15, 1]])
nHidLayer = len(nNodeInEachHid)
# if len(nNodeInEachHid) != nHidLayer:
# print('You must set the no. of neural in each hidden layer !')

# array of transfer function that related with no. of hidden layer
# dim=len(nNeuronArr)-2 (set transfer function only neuron in each hidden layer)
transfNetwork = ['sigm', 'purelin']

# Build the network
nnfit = wbxnn.MLP()
nnfit.build(p, indexSampleP, target, indexSampleTarget, nNodeInEachHid,
            transfNetwork)

# training
# visualize='text','graph'
# epoch is no. of repeat the entire input set to train
# nIte is np. of repeat batch input
nnfit.train.BP(lr=0.0015,
               batchSize=3,
               epoch=1000,
               nIte=5,
               errorType='SE',
               TolBatch=0.1,
               TolAll=0.1,
               visualize='graph',