Exemplo n.º 1
0
def predict(args):
    day = args[2]
    num_lineups = int(args[3])
    filename = FILENAME_USED_NN
    if len(args) == 5:
        filename = "trainedModels/" + args[4]
    day_x, playerList = getInputForDay(day, True)
    print(day_x.shape)
    print(len(playerList))
    N, M = day_x.shape
    day_X = np.ones((N, M + 1))
    day_X[:, 1:] = day_x
    model = NeuralNet(NeuralNet.load(filename))

    scores, predictedLineups = model.getPrediction(day_X, playerList,
                                                   num_lineups)
    print()
    print("No noise lineup : ")
    for i in range(len(scores)):
        predictedLineup = predictedLineups[i]

        print("player selected:")
        for player in predictedLineup:
            print(player.toString())
        print()
        print("Expected total score :" + str(scores[i]))
        print("---------------")
Exemplo n.º 2
0
def test(train_X, test_X, train_y, test_y, args):
    filename = FILENAME_USED_NN
    if len(args) == 3:
        filename = "trainedModels/" + args[2]
    model_loaded = NeuralNet.load(filename)
    model = NeuralNet(model_loaded)
    print("final train accuracy:", model.score(train_X, train_y))
    print("final test accuracy:", model.score(test_X, test_y))
Exemplo n.º 3
0
	def convolutionTest(self):
		nn = NeuralNet()
		expected = np.array([[-1,0], [-3,-11], [-5,-17]])

		# Convolve the vertical Sobel edge detector
		data = np.array([[1,1,1,0], [0,1,0,0], [2,1,3,4], [5,1,6,7], [8,9,10,11]])
		kernel = np.array([[1,0,-1], [2,0,-2], [1,0,-1]])
		output = nn.convolve(data, kernel)

		return self.arrayIsSame(output, expected)
Exemplo n.º 4
0
class NeuralNetCritic:
    def __init__(self):
        self.net = NeuralNet(nodes=config["nodes"])

    def getTDError(self, state, newState, reinforcement):
        return self.net.criterion(state, newState, reinforcement)

    def updateEligibility(self, state, isCurrentState=False):
        return True

    def updateValueFunction(self, state, TDError):
        self.net.train(state, TDError)
Exemplo n.º 5
0
def make_model(model=None,
               hidden_layer_sizes=None,
               learning_r=None,
               batch_size_2=None,
               dropout_rate=None,
               epoch=None):
    optimizer = DEFAULT_OPTIMIZER(learning_r)
    loss = 'mean_squared_error'
    return NeuralNet(model, INPUT_SIZE, OUTPUT_SIZE, hidden_layer_sizes,
                     optimizer, loss, batch_size_2, dropout_rate, epoch)
Exemplo n.º 6
0
def main():
    '''Experiment on NN tos simulate y= f(x1, x2) = sin(2*pi*x1)*sin(2*pi*x2)'''
    '''Case 11: randomly generated inputs with random weights and biases
    Optimized to yield the best network performance
'''

    datafile = './data/perf/data.txt'
    testfile = './data/perf/test.txt'
    trainvcsv = './data/perf/training.csv'
    testcsv = './data/perf/testing.csv'
    imgfile = './data/perf/loss.png'
    wbfile = './data/perf/weights_biases.txt'

    dm.makeDir('./data/perf')

    dm.genRandData(datafile, 50)
    dm.genData(testfile, 10)

    perf_number = 0
    max_perf = 0

    while perf_number < 75:
        dataset = []
        dm.parseData(datafile, dataset)

        NN = NeuralNet(3, True)  # create NN with 3 hidden neurons

        NN.savewb('Weights and Biases before Training', wbfile)

        NN.train(dataset, imgfile, trainvcsv, 500)  # train the neural network

        NN.savewb('Weights and Biases after Training', wbfile)

        testset = []
        dm.parseData(testfile, testset)

        perf_number = NN.test(testset, testcsv)
        if perf_number > max_perf:
            max_perf = perf_number
        print(f'\n\nMax perf: {max_perf}\n\n')

        with open(wbfile, 'a+') as F:
            F.write(f'\n\nMax perf: {perf_number}\n\n')
Exemplo n.º 7
0
	def maxpoolTest(self):
		nn = NeuralNet()

		# Should fail - Wrong size array
		expected = np.array([[4,4], [0,4]])

		data = np.array([[4,0,1,3,4], [0,0,2,4,4], [0,0,4,4,4], [0,0,4,4,4]])
		windowsize = 2 # 2x2 window

		output = nn.maxPool(data, windowsize)

		if output is not None:
			return False

		# Should pass
		expected = np.array([[4,4], [0,4]])

		data = np.array([[4,0,1,3], [0,0,2,4], [0,0,4,4], [0,0,4,4]])
		windowsize = 2 # 2x2 window

		output = nn.maxPool(data, windowsize)

		return self.arrayIsSame(output,expected)
Exemplo n.º 8
0
	def activationTest(self):
		nn = NeuralNet()

		data = np.array([[1,0,0,0], [1,1,1,0], [1,0,0,0], [1,0,0,0], [1,0,0,1]])
		kernel = np.array([[1,0,-1], [2,0,-2], [1,0,-1]])
		output = nn.convolve(data, kernel)

		expectedRelu = np.array([[2,2], [3,1], [4,0]])
		expectedBin = np.array([[1,1], [1,1], [1,0]])
		expectedSigmoid = np.array([[0.119,0.119], [0.047,0.269], [0.0180,0.731]])
		expectedTanH = np.array([[0.964,0.964], [0.995,0.762], [0.999,-0.762]])
		expected = np.array([[-1,0], [-3,-11], [-5,-17]])

		reluOutput = nn.activationFunction(output, 'relu')
		binOutput = nn.activationFunction(output, 'binary')
		sigmoidOutput = nn.activationFunction(output, 'sigmoid')
		tanhOutput = nn.activationFunction(output, 'tanh')

		for index, value in np.ndenumerate(sigmoidOutput): sigmoidOutput[index] = round(value, 3)
		for index, value in np.ndenumerate(tanhOutput): tanhOutput[index] = round(value, 3)

		if not UnitTests.arrayIsSame(reluOutput, expectedRelu):
			print "Relu activationFunction test failed."
			print "Expected:"
			print expectedRelu
			print "Actual:"
			print reluOutput
			return False
		if not UnitTests.arrayIsSame(binOutput, expectedBin):
			print "Binary activationFunction test failed."
			print "Expected:"
			print expectedBin
			print "Actual:"
			print binOutput
			return False
		if not UnitTests.arrayIsSame(sigmoidOutput, expectedSigmoid):
			print "Sigmoid activationFunction test failed."
			print "Expected:"
			print expectedSigmoid
			print "Actual:"
			print sigmoidOutput
			return False
		if not UnitTests.arrayIsSame(tanhOutput, expectedTanH):
			print "tanh activationFunction test failed."
			print "Expected:"
			print expectedTanH
			print "Actual:"
			print tanhOutput
			return False

		return True
Exemplo n.º 9
0
def run(day):
    print("This mETHOD should not be being calleD!")
    day_x, day_y, Gamesplayers = getSortedOrderForDay(day)
    playersList = [item.playerID for items in Gamesplayers for item in items]

    N, M = day_x.shape
    day_X = np.ones((N, M + 1))
    day_X[:, 1:] = day_x
    model = NeuralNet.load(FILENAME_USED_NN)
    score, realLineupIndex, predictedLineupIndex = model.scoreDay(
        day_X, day_y, True)
    realLineup = [playersList[i] for i in realLineupIndex]
    predictedLineup = [playersList[i] for i in predictedLineupIndex]
    print("test accuracy for day:", score)
    print("good players ids:", realLineup)
    print("players selected ids :", predictedLineup)
Exemplo n.º 10
0
def continue_training(train_X, train_y, test_X, test_y, args):
    print("NOT SURE THIS IS WORKING")
    epoch = int(args[2])
    filename = FILENAME_USED_NN
    if len(args) == 4:
        filename = "trainedModels/" + args[3]
    train_x_flat = np.array([item for items in train_X for item in items])
    train_y_flat = np.array([item for items in train_y for item in items])
    test_x_flat = np.array([item for items in test_X for item in items])
    test_y_flat = np.array([item for items in test_y for item in items])
    model_loaded = NeuralNet.load(filename)

    model = make_model(model=model_loaded,
                       learning_r=DEFAULT_LEARNING_RATE,
                       batch_size_2=DEFAULT_BATCH_SIZE,
                       epoch=epoch)
    print("Continue training on :" + str(len(train_y_flat)) + "games")
    model.fit(train_x_flat, train_y_flat, test_x_flat, test_y_flat)
    model.save(filename)
Exemplo n.º 11
0
#!/usr/bin/python

import numpy as np
import matplotlib.pyplot as plt
from neuralNet import NeuralNet
from textProcessing import TextProcessing

plt.ion()

proc = TextProcessing()
# Mapping words as image, so need a large input :
wordLength = 20
layerLength = proc.maxChar-proc.minChar
externalSize = layerLength*wordLength
nn = NeuralNet(externalSize, externalSize, externalSize / 2 )

dictFile = open('corncob_lowercase.txt','r')
rawWords = dictFile.read()
dictFile.close()

print len(rawWords), type(rawWords), len(rawWords.split())
dictOfWords = rawWords.split()

alphabet = 'abcdefghijklmnopqrstuvwxyz'
iterNb = 0
errorEvolution=[]
for i in range(100):
#while abs(nn.endError) > 0.01:
	#inputtext = alphabet[np.random.randint(0,len(alphabet)-1)] 
	#answer=""
	# Randomly pick a word from dict :
Exemplo n.º 12
0
    def __loadNNets(self, name, includeOptimisers=False):
        nNetA = NeuralNet(in_channels=3).to(self.device)
        nNetB = NeuralNet(in_channels=4).to(self.device)
        nNetC = NeuralNet(in_channels=4).to(self.device)

        if name is not None:
            name = name.replace(".pth", "a.pth")
            nNetA.load(name)

            name = name.replace("a.pth", "b.pth")
            nNetB.load(name)

            name = name.replace("b.pth", "c.pth")
            nNetC.load(name)
        else:
            nNetA.loadMostRecent("a.pth")
            nNetB.loadMostRecent("b.pth")
            nNetC.loadMostRecent("c.pth")

        nnets = nNetA, nNetB, nNetC
        if includeOptimisers:
            optimisers = tuple(torch.optim.Adam(
                N.parameters(), lr=0.001) for N in nnets)
            return nnets, optimisers
        else:
            return nnets
Exemplo n.º 13
0
from optimizer import Optimizer
from neuralNet import NeuralNet
import functions

size = [1, 2, 3]
training_examples = [1, 4, 5]
training_targets = [2, 8, 10]
model = NeuralNet(size, last_activation_function=functions.relu)
model.fit(10, training_examples, training_targets, 0.1)
Exemplo n.º 14
0
	def trainTest(self, dataFilepath = None):
		nn = NeuralNet()

		nn.train()
		return
Exemplo n.º 15
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

# These two lines are necessary to find source files!!!
import sys
sys.path.append('../src')

from neuralNet import NeuralNet, DataSet
from files import files

if __name__ == '__main__':
    f = files["haberman"]
    ds = DataSet(f)
    n = NeuralNet([3, 1, 2], ds.dataMatrix, numericalEvaluation=True)
    n.startTraining(1)
Exemplo n.º 16
0
 def __init__(self):
     self.net = NeuralNet(nodes=config["nodes"])
Exemplo n.º 17
0
    c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
    cv2.rectangle(img, c1, c2, color, -1)
    cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4),
                cv2.FONT_HERSHEY_PLAIN, 1, [225, 255, 255], 1)

    return img


num_classes = 80
classes = loadClasses("data/coco.names")
confidence = 0.5
nms_thresh = 0.4
dimension = 416

print("loading network")
model = NeuralNet("yolov3.cfg")
model.loadWeights("yolov3.weights")
print("network loaded")

model.networkInfo["height"] = 416
model.cuda()
model.eval()

camera = cv2.VideoCapture(0)

while True:
    ret, frame = camera.read()
    image, orig_im = preprocess(frame, 416)

    image = image.cuda()
Exemplo n.º 18
0
#!/usr/bin/python

import numpy as np
import matplotlib.pyplot as plt
import brewer2mpl

from neuralNet import NeuralNet
from textProcessing import TextProcessing

plt.ion()

inputSize= 10
nn = NeuralNet(inputSize, inputSize, 4)

# Let's learn patterns of length 5 with a 1-value and two 0.5-values
#dataset =
onePos = [3, 7, 2, 5, 0, 1, 4, 6, 8]
patterns = [[0.0 if elem != onePos[samp] else 1.0 for elem in range(inputSize)] for samp in range(len(onePos))] 

print patterns

datasetSize=500
dataset=[]
# generate dataset of size 500
for d in range(datasetSize):
	pat = np.random.randint(0,len(patterns))
	sample = [patterns[pat][elem]+0.25*np.random.rand() for elem in range(inputSize)]
	dataset.append(sample)

#print "dataset"
#print dataset