Exemple #1
0
#
# So let's try.
# Attention: training might be a long process since we train a big network.

from __future__ import print_function
from ffnet import ffnet, mlgraph, readdata
from numpy import array

# Generate standard layered network architecture and create network
conec = mlgraph((3,22,12,1))
net = ffnet(conec)

# Read training data omitting first column and first line
print("READING DATA...")
data = readdata( 'data/black-scholes.dat',
                 usecols  = (1, 2, 3, 4),
                 skiprows =  1)
input =  data[:, :3] #first 3 columns
target = data[:, -1] #last column

print("TRAINING NETWORK...")
import sys; sys.stdout.flush() #Just to ensure dislpaing the above messages here
net.train_tnc(input, target, maxfun = 5000, messages=1)

# Test network
print
print("TESTING NETWORK...")
output, regression = net.test(input, target, iprint = 0)
Rsquared = regression[0][2]
maxerr = abs( array(output).reshape( len(output) ) - array(target) ).max()
print("R-squared:           %s  (should be >= 0.999999)" %str(Rsquared))
Exemple #2
0
            c += 1
#			print 'good ', pred[i], y[i]
        else:
            #			print 'bad ', pred[i], y[i]
            pass
    print 'c = ', c, ' len = ', len(pred)
    return (c / len(pred)) * 100


# Generate standard layered network architecture and create network
conec = mlgraph((400, 200, 10))
net = ffnet(conec)

# Read data file
print "READING DATA..."
data = readdata('data.csv')
numpy.random.shuffle(data)
X = data[:, 1:]
y = data[:, 0]  #first 64 columns - bitmap definition
input = X
target = numpy.ndarray((input.shape[0], 10))
for i in range(len(y)):
    target[i] = numpy.zeros((1, 10))
    if y[i] == 10:
        y[i] = 0
    target[i][y[i]] = 1

# Train network with scipy tnc optimizer - 58 lines used for training
print "TRAINING NETWORK..."
net.train_tnc(input[:4500], target[:4500], maxfun=3000, messages=1)
#net.train_cg(input[:100],target[:100],maxiter=2000)
Exemple #3
0
# Training file (data/ocr.dat) contains 68 patterns - first 58 
# are used for training and last 10 are used for testing. 
# Each pattern contains 64 inputs which define 8x8 bitmap of 
# the digit and last 10 numbers are the targets (10 targets for 10 digits).
# Layered network architecture is used here: (64, 10, 10, 10).

from ffnet import ffnet, mlgraph, readdata

# Generate standard layered network architecture and create network
conec = mlgraph((64,10,10,10))
net = ffnet(conec)

# Read data file
print "READING DATA..."
data = readdata( 'data/ocr.dat', delimiter = ' ' )
input =  data[:, :64] #first 64 columns - bitmap definition
target = data[:, 64:] #the rest - 10 columns for 10 digits

# Train network with scipy tnc optimizer - 58 lines used for training
print "TRAINING NETWORK..."
net.train_tnc(input[:58], target[:58], maxfun = 2000, messages=1)

# Test network - remaining 10 lines used for testing
print
print "TESTING NETWORK..."
output, regression = net.test(input[58:], target[58:], iprint = 2)

############################################################
# Make a plot of a chosen digit along with the network guess
try:
Exemple #4
0
# Training file (data/ocr.dat) contains 68 patterns - first 58
# are used for training and last 10 are used for testing.
# Each pattern contains 64 inputs which define 8x8 bitmap of
# the digit and last 10 numbers are the targets (10 targets for 10 digits).
# Layered network architecture is used here: (64, 10, 10, 10).

from ffnet import ffnet, mlgraph, readdata

# Generate standard layered network architecture and create network
conec = mlgraph((64, 10, 10, 10))
net = ffnet(conec)

# Read data file
print "READING DATA..."
data = readdata('data/ocr.dat', delimiter=' ')
input = data[:, :64]  #first 64 columns - bitmap definition
target = data[:, 64:]  #the rest - 10 columns for 10 digits

# Train network with scipy tnc optimizer - 58 lines used for training
print "TRAINING NETWORK..."
net.train_tnc(input[:58], target[:58], maxfun=2000, messages=1)

# Test network - remaining 10 lines used for testing
print
print "TESTING NETWORK..."
output, regression = net.test(input[58:], target[58:], iprint=2)

############################################################
# Make a plot of a chosen digit along with the network guess
try:
Exemple #5
0
#
# So let's try.
# Attention: training might be a long process since we train a big network.

from ffnet import ffnet, mlgraph, readdata
from numpy import array
import numpy as np

# Generate standard layered network architecture and create network
conec = mlgraph((3, 22, 12, 1))
net = ffnet(conec)

# Read training data omitting first column and first line
print "READING DATA..."
data = readdata('black-scholes.dat',
                 usecols=(1, 2, 3, 4),
                 skiprows=1)
data_ld = np.loadtxt('ld_3PB.csv', delimiter=';')
data_par = np.loadtxt('params_3PB.txt')
data_ld_x = -data_ld[np.arange(0, 58, 2)]
data_ld_x /= np.max(data_ld_x)
data_ld_y = -data_ld[np.arange(1, 59, 2)]
data_ld_y /= np.max(data_ld_y)
# input = data[:, :3]  # first 3 columns
# target = data[:, -1]  # last column
input = np.vstack((data_ld_x.flatten(), data_par[0, :-1].repeat(35), data_par[1, :-1].repeat(35))).T
target = data_ld_y.flatten()

print "TRAINING NETWORK..."
import sys; sys.stdout.flush()  # Just to ensure dislpaing the above messages here
net.train_tnc(input, target, maxfun=5000, messages=1)
Exemple #6
0
from ffnet import ffnet, mlgraph, readdata, savenet, loadnet
import time
import numpy as np

# Read data file
print "READING DATA..."
data = readdata('../expDataNew/data_rt_rr_statwosysdsk.txt', delimiter=',')
input = data[:, 1:]
target = data[:, :1]

# Generate standard layered network architecture and create network
conec = mlgraph((input.shape[1], (input.shape[1] + 1) / 2, 1))
net = ffnet(conec)

print "TRAINING NETWORK..."
n = 6168

net.randomweights()
st = time.time()
net.train_tnc(input[:n], target[:n])
el = time.time() - st
print "Time to train NN with %d examples: %0.3f sec" % (n, el)
# Save net
#savenet(net,'rt_rr_statwosysdsk.network')

print
print "TESTING NETWORK..."
output, regression = net.test(input[n:], target[n:], iprint=1)

y_act = np.array(target[n:])
y_prd = np.array(output)
Exemple #7
0
from ffnet import ffnet, mlgraph, readdata, savenet, loadnet
import time
import numpy as np

# Read data file
print "READING DATA..."
data = readdata('./data_w_k_statwosysdsk.txt', delimiter=' ')
n = int(656 * 1)

for i in range(2, 10):
    input = data[:, [0, 1, i]]
    target = data[:, [i + 8]]

    # Generate standard layered network architecture and create network
    conec = mlgraph((input.shape[1], 8, 1))
    net = ffnet(conec)

    print "TRAINING NETWORK..."

    net.randomweights()
    st = time.time()
    net.train_tnc(input[:n], target[:n])
    el = time.time() - st
    print "Time to train NN with %d examples: %0.3f sec" % (n, el)

    output, regression = net.test(input[:n], target[:n], iprint=0)

    y_act = np.array(target[:n])
    y_prd = np.array(output)
    err = abs(y_act - y_prd) / abs(y_act)
    print "Training error", sum(err) / len(err) * 100
Exemple #8
0
		if numpy.argmax(pred[i]) == y[i]:
			c += 1
#			print 'good ', pred[i], y[i]
		else: 
#			print 'bad ', pred[i], y[i]
			pass
	print 'c = ', c,' len = ', len(pred)
	return (c/len(pred))*100

# Generate standard layered network architecture and create network
conec = mlgraph((400,200,10))
net = ffnet(conec)

# Read data file
print "READING DATA..."
data = readdata( 'data.csv')
numpy.random.shuffle(data)
X = data[:,1:]
y =  data[:,0]#first 64 columns - bitmap definition
input = X
target = numpy.ndarray((input.shape[0],10))
for i in range(len(y)):
	target[i] = numpy.zeros((1,10))
	if y[i] == 10:
		y[i] = 0
	target[i][y[i]] = 1

# Train network with scipy tnc optimizer - 58 lines used for training
print "TRAINING NETWORK..."
net.train_tnc(input[:4500], target[:4500], maxfun = 3000, messages=1)
#net.train_cg(input[:100],target[:100],maxiter=2000)