예제 #1
0
파일: XOR.py 프로젝트: kaan-sev/neuralpy
import neuralpy

net = neuralpy.Network(2, 10, 8, 1)

neuralpy.output(net.feedforward([1, 1]))
neuralpy.output(net.feedforward([0, 1]))
neuralpy.output(net.feedforward([1, 0]))
neuralpy.output(net.feedforward([0, 0]))

datum_1 = ([1, 1], [0])
datum_2 = ([1, 0], [1])
datum_3 = ([0, 1], [1])
datum_4 = ([0, 0], [0])

training_data = [datum_1, datum_2, datum_3, datum_4]

epochs = 300

learning_rate = 3

net.train(training_data, epochs, learning_rate, monitor_cost=True)

neuralpy.output()

neuralpy.output(net.feedforward([1, 1]))
neuralpy.output(net.feedforward([0, 1]))
neuralpy.output(net.feedforward([1, 0]))
neuralpy.output(net.feedforward([0, 0]))

net.show_costs()
예제 #2
0
# 	|_______|_______|___________|
# 	|	F 	|	T 	|	  T 	|
# 	|_______|_______|___________|
# 	|	T 	|	F 	|	  T 	|
# 	|_______|_______|___________|
# 	|	F 	|	F 	|	  F 	|
# 	|_______|_______|___________|
#
# 	In our network, 1 will represent True and
# 	0 will represent False
#
import neuralpy

# set up a basic neural network with a 2-node input layer,
# one 3-neuron hidden layer, and one 1-neuron output layer
net = neuralpy.Network(2, 3, 1)

# here is some arbitrary input that we will use to test
x = [1, 1]
out = net.feedforward(x)
neuralpy.output(out)

# here is our training_data the reflects the truth table
# in the header of this file
datum_1 = ([1, 1], [1])
datum_2 = ([1, 0], [1])
datum_3 = ([0, 1], [1])
datum_4 = ([0, 0], [0])

training_data = [datum_1, datum_2, datum_3, datum_4]
예제 #3
0
print y.head(10)
print x.describe()
print y.describe()
"""
# organize the data to be used by the DNN
count = 0
dataSet = [([x.ix[count]],[y.ix[count]])] # list containing tuples of lists
count =1
while (count<sample_size ):
	#print " Working on data item : ",count
	dataSet = (dataSet +[([x.ix[count]] ,[y.ix[count]])])
	count = count + 1

# Get the neural network library
import neuralpy
fit = neuralpy.Network(1,3,7,1) # each argument is the number of neurons in the layer. The 1st one is the input, last one is the output
epochs = 100
learning_rate = 1
print " fitting model right now "
fit.train( dataSet , epochs , learning_rate )

# Assess model performance
count = 0
pred =[]
while ( count < sample_size ):
	out = fit.forward(x[count]) # the neural network ouput

	print(" Obs : ",count +1," y = ",round(y[ count ] ,4) ,
	 " prediction = ", round(pd.Series(out) ,4))
	pred.append(out)
	count = count + 1
예제 #4
0
# print(x.head(10))
# print(y.head(10))
# View information about the data: count mean std min 25% 50% 75% max
# print(x.describe())

count = 0
# dataSet is a list
dataSet = [([x.ix[count]]), ([y.ix[count]])]
count = 1

while (count < sample_size):
    print("Working on data item: ", count)
    dataSet = (dataSet + [([x.ix[count, 0]])], [([y.ix[count, 0]])])
    count = count + 1

fit = neuralpy.Network(1, 3, 7, 1)
epochs = 100
learning_rate = 1
print("fitting model right now")
fit.train(dataSet, epochs, learning_rate)

count = 0
pred = []

while (count < sample_size):
    out = fit.forward(x[count])
    print("Obs: ", count + 1, "y = ", round(y[count], 4), "prediction = ",
          round(pd.Series(out), 4))
    pred.append(out)
    count = count + 1
예제 #5
0
import neuralpy

layers = [2, 10, 8, 1]
net = neuralpy.Network(layers)

#neuralpy will automatically generate random incoming weights and biases for each processing layer.


datum_1 = ([1, 1], [0])
datum_2 = ([1, 0], [1])
datum_3 = ([0, 1], [1])
datum_4 = ([0, 0], [0])

training_data = [datum_1, datum_2, datum_3, datum_4]
epochs = 300
learning_rate = 3
#monitor_cost tracks the cost of every epoch
net.train(training_data, epochs, learning_rate)

print net.forward([1,0])
print net.forward([0,0])
print net.forward([0,1])
print net.forward([1,1])
예제 #6
0
import pandas as pd
import neuralpy as ne
import random

random.seed(2016)
sample_size=50
sample = pd.Series(random.sample(range(-10000,10000),sample_size))
x=sample/10000
y=x**2

count = 0
dataSet = [([x.ix[count]],[y.ix[count]])]
count = 1
while (count < sample_size):
#    print "Working on data item: ", count
    dataSet = (dataSet+[([x.ix[count,0]], [y.ix[count]])])
    count += 1

fit = ne.Network(1, 6, 12, 1)
epochs = 5000
learing_rate = 1
print "Fitting model right now"
fit.train(dataSet, epochs, learing_rate)

count = 0
pred = []
while(count < sample_size):
    out = fit.forward(x[count])
    print 'Obs:' + str((count+1)) + '\t' + 'y=' + str(round(y[count],4)) + '\t' + 'prediction=' + str(round(pd.Series(out),4))
    pred.append(out)
    count += 1