Example #1
0
 def makeNet(self):
     net = NeuralNet(self.inSize, self.outSize, self.hidSize)
     weights = [[0]*self.size() for i in xrange(self.size())]
     for c in self.conn:
         weights[c.fro][c.to] += c.w if c.on else 0
     for i in xrange(self.size()):
         for j in xrange(self.size()):
             net.addSynapse(i,j,weights[i][j]) if weights[i][j] != 0 else 0        #[net.addSynapse(s.fro, s.to, s.w) for s in self.conn if s.on]
     return net
Example #2
0
File: xor2.py Project: Tug/mnist
def xor():
    inputs = array([[0, 0], [0, 1], [1, 0], [1, 1]])
    targets = array([[0, 1], [1, 0], [1, 0], [0, 1]])
    net = NeuralNet([2, 2, 2], 0.1, 0.99)
    for i in xrange(10000):
        E = net.learn(inputs, targets)
        if(i % 100 == 0):
            print "Error =", E
    for inp in inputs:
        print inp, '->', net.test(inp)
Example #3
0
def main():

    set_backend("gnumpy")
    #set_gradcheck_mode(True)

    ######################################################
    # Load MNIST dataset
    tic()
    data = load_mnist(digits=range(10),
                      split=[85,0,15],
                      #split=[30,0,0]   # for faster training when debugging
                      )
    print ("Data loaded in %.1fs" % toc())

    ######################################################
    # Create a neural network with matching input/output dimensions
    cfg = NeuralNetCfg(L1=1e-7*0,init_scale=0.1**2)
    cfg.input(data.Xshape,dropout=0.2)
    cfg.hidden(800,"logistic",dropout=0.5,maxnorm=4.0)
    cfg.hidden(800,"logistic",dropout=0.5,maxnorm=4.0)
    cfg.output(data.Yshape,"softmax",maxnorm=4.0)

    model = NeuralNet(cfg)

    ######################################################
    # Rescale the data to match the network's domain/range
    data.rescale(model.ideal_domain(),model.ideal_range())

    ######################################################
    # Train the network
    report_args = { 'verbose'   : True,
                    'interval'  : 10,       # how many epochs between progress reports (larger is faster)
                    'visualize' : True}

    trainer = TrainingRun(model,data,report_args,
                          learn_rate=10,
                          learn_rate_decay=.998,
                          momentum=[(1,.5),(400,0.99)],
                          batchsize=128)

    tic()
    trainer.train(3000)
    print ("Training took %.1fs" % toc())

    #####################################################
    
    if get_gradcheck_mode():
        model.gradcheck(data.train)

    raw_input()
Example #4
0
File: digits.py Project: Tug/mnist
def digits():
    our_dataset = filter(isOneOfOurDigit, dataset)
    our_testset = filter(isOneOfOurDigit, dataset)
    imagelen = len(our_dataset[0][0])
    net = NeuralNet([imagelen, imagelen, nbDigit])
    for i in xrange(len(our_dataset)):
        E = net.backProp(our_dataset[i][0], our_digits_bin[our_dataset[i][1]])
        if i % 100 == 0:
            print "Error =", E
    nbError = 0
    for i in xrange(len(our_testset)):
        r = net.test(our_testset[i][0])
        if reverseFlaggedArray(r) != our_testset[i][1]:
            nbError += 1
    print "error rate : ", (nbError + 0.0) / len(our_testset) * 100, "%"
Example #5
0
def main():
    start_time = time.time()

    parser = argparse.ArgumentParser()
    parser.add_argument("filename", help="Output png.")
    parser.add_argument("-i", help="Number of input nodes.", type=int, default=16)
    parser.add_argument("-l", help="Number of hidden layers.", type=int, default=12)
    parser.add_argument("-x", help="Image width.", type=int, default=800)
    parser.add_argument("-y", help="Image height.", type=int, default=600)

    args = parser.parse_args()

    # Initialize settings
    NetSettings = namedtuple("NetSettings", "input hidden output")
    ImgSettings = namedtuple("ImgSettings", "width height "
                                            "mapR maxR "
                                            "mapG maxG "
                                            "mapB maxB")

    netSettings = NetSettings(args.i, args.l, 3)
    imgSettings = ImgSettings(args.x, args.y,
                              2, 155,
                              2, 155,
                              2, 155)

    data = NeuralNet.initializeData(imgSettings.width, imgSettings.height)

    # Output Image
    outputImage = np.empty(imgSettings.width * imgSettings.height * 3, dtype=np.uint8)

    input, layers, weights = NeuralNet.buildNeuralNet(netSettings.input, netSettings.hidden, netSettings.output)

    result = NeuralNet.runNeuralNet(input, layers[-1], data)

    # transform result in image data
    for index in range(len(data)):
        outputImage[index * 3] = int(result[index][imgSettings.mapR] * imgSettings.maxR)
        outputImage[index * 3 + 1] = int(result[index][imgSettings.mapG] * imgSettings.maxG)
        outputImage[index * 3 + 2] = int(result[index][imgSettings.mapB] * imgSettings.maxB)

    with open(args.filename, 'w') as outfile:
        pngWriter = png.Writer(imgSettings.width, imgSettings.height)
        pngWriter.write(outfile, np.reshape(outputImage, (-1, imgSettings.width*3)))

    print("--- %s seconds ---" % (time.time() - start_time))
Example #6
0
def digitstest():
    our_dataset = filter(isOneOfOurDigit, dataset)
    our_testset = filter(isOneOfOurDigit, testset)
    normalize(our_dataset)
    normalize(our_testset)
    imagelen = len(our_dataset[0][0])
    eta = 0.01
    m = 0.9
    hiddenN = 600
    limit = int(len(our_dataset)*sep)
    print "original training set: ", len(our_dataset)
    print "training set: ", limit+1, "validation set: ", len(our_dataset) - limit
    print "test set", len(our_testset)
    net = NeuralNet([imagelen, 500, nbDigit], eta, m, 0.1)
    errors = []
    meanError = 1
    meanWidth = 2000
    # training
    for i in xrange(1, limit):
        E = net.backProp(our_dataset[i][0], our_digits_bin[our_dataset[i][1]])
        meanErrorBak = meanError
        meanError = ((meanWidth-1)*meanError + E)/meanWidth
        errors.append(meanError)
    plotError(errors, "error training")
    errors = []
    meanError = 1
    meanErrorBak = 1
    h = 50
    # validation
    for i in xrange(limit, len(our_dataset)):
        E = net.backProp(our_dataset[i][0], our_digits_bin[our_dataset[i][1]])
        meanError = ((meanWidth-1)*meanError + E)/meanWidth
        errors.append(meanError)
        if i % h == 0:
            slope = (meanError - meanErrorBak)/h
            meanErrorBak = meanError
            if slope > -0.0001:
                print "early-stopping %d/%d" %(i+1-limit, len(our_dataset)-limit)
                break
    plotError(errors, "Error training+validation (eta=%1.2f, m=%1.2f, n=%d)" %(eta, m, hiddenN))
    Egen = test(net, our_testset)
    print "%f;%f;%d;%f" %(eta, m, hiddenN, Egen*100)
Example #7
0
def makePickledObjects():
    data = NeuralNetUtil.buildExamplesFromPenData() 
    net = NeuralNet.buildNeuralNet(data, weightChangeThreshold=0.00075,hiddenLayerList = [24])[0]
    print net
    f = open('test_cases/nnet','w')
    cPickle.dump(net,f)
    f.close()
    
    f = open('test_cases/percep','w')
    cPickle.dump(net.layers[0][1],f)
    f.close()
Example #8
0
def main(viz=False):

    tic()
    data = load_mnist()
    print ("Data loaded in %.1fs" % toc())

    # Create a neural network with matching input/output dimensions
    #
    cfg = NeuralNetCfg(L1=1e-6,init_scale=0.05)
    cfg.input(data.Xshape)
    cfg.hidden(800,"logistic",dropout=0.5)
    cfg.hidden(800,"logistic",dropout=0.25)
    cfg.output(data.Yshape,"softmax")

    model = NeuralNet(cfg)

    # Rescale the data to match the network's domain/range
    #
    data.rescale(model.ideal_domain(),model.ideal_range())

    # Train the network
    #
    report_args = { 'verbose'   : True,
                    'interval'  : 5,       # how many epochs between progress reports (larger is faster)
                    'window_size' : "compact",
                    'visualize' : viz}

    trainer = TrainingRun(model,data,report_args,
                          learn_rate=2,
                          learn_rate_decay=.995,
                          momentum=[(0,.5),(400,0.9)],
                          batchsize=64)

    print "Memory available after data loaded:", memory_info(gc=True)

    tic()
    trainer.train(100)  # train for several epochs
    print ("Training took %.1fs" % toc())
def acc_neuralnet():
    """basic neural net to check how good it can build a model , and as a comparsion to hybrid model"""
    ind = 0
    report = pd.DataFrame(index=range(0),
                          columns=[
                              'Stock Name', 'accuracy', 'profit count',
                              'loss count', 'total no of rise',
                              'total number of loss'
                          ])
    for i in bar(xrange(len(x))):
        p_count, total_count_p, l_count, total_count_l, accuracy = NeuralNet.neuralnet(
            x[i])
        report.loc[ind] = [
            x[i], accuracy, p_count, l_count, total_count_p, total_count_l
        ]
        ind = ind + 1
    print "Mean accuracy----------", report['accuracy'].mean()
    report.to_csv("./report/neuralnet_results.csv")
Example #10
0
def q3Test(testFile, module=NeuralNet):
    testFuncName = testFile.readline().strip()
    getData = getattr(NeuralNetUtil, testFuncName)
    examples, tests = getData()
    testRangeStart = testFile.readline().strip()
    testRangeEnd = testFile.readline().strip()
    testRangeStart = 0 if testRangeStart == 'None' else int(testRangeStart)
    testRangeEnd = len(examples) if testRangeEnd == 'None' else int(
        testRangeEnd)
    examples = examples[testRangeStart:testRangeEnd]
    sNet = NeuralNet.NeuralNet([16, 24, 10])

    file = open('test_cases/nnet')
    net = cPickle.load(file)
    copyWeights(sNet, net)

    solution = sNet.backPropLearning(examples, 0.1)
    return solution
    def __init__(self, root):

        self.root = root
        root.title("Movie Reccomendation")
        self.canvas = tk.Canvas(root, width=1000, height=1000)
        self.canvas.place(x=0, y=0, anchor=tk.NW)

        self.nMoviesPicked = 0
        self.nMaxMoviesPicked = 10  #The total number of movies the user will pick as "Liked" or "Not Liked"
        self.Network = NeuralNet.NeuralNet(dfMovieData)

        self.imDB = imdb.IMDb()

        self.font = "SourceCodePro"
        self.listMoviesReccomend = []
        self.movieSample = None

        self.placeButtons()
        self.askMovie()  # Start
Example #12
0
def q4Test(testFile,module=NeuralNet):
    testFuncName = testFile.readline().strip()
    getData = getattr(NeuralNetUtil, testFuncName)
    examples = getData()
    testRangeStart = testFile.readline().strip()
    testRangeEnd = testFile.readline().strip()
    testRangeStart = 0 if testRangeStart=='None' else int(testRangeStart)
    testRangeEnd = len(examples) if testRangeEnd=='None' else int(testRangeEnd)
    examples = (examples[0][testRangeStart:testRangeEnd],examples[1][testRangeStart:testRangeEnd])
    
    alph = float(testFile.readline())
    weight = float(testFile.readline())
    
    file = open('test_cases/nnet')
    net = cPickle.load(file)
    
    sNet = NeuralNet.NeuralNet([16,24,10])
    copyWeights(sNet,net)
    
    solution = NeuralNet.buildNeuralNet(examples, alpha=alph, weightChangeThreshold = weight,startNNet = sNet)
    return solution[1]
def train(model, clusterNum, components):
    d = getShots()
    data = []
    results = []
    values = []
    scored = 0
    expected = 0
    for shot in d[clusterNum]:
        data.append([
            shot.distance_ten_seconds, shot.distance_total_game, shot.velocity,
            shot.distance_closest_def, shot.angle_closest_def,
            shot.distance_second_def, shot.angle_second_def,
            shot.angle_closest_teammate, shot.distance_closest_teammate,
            shot.shot_distance, shot.shot_angle, shot.offense_hull,
            shot.defense_hull, shot.shot_clock, shot.catch_and_shoot
        ])
        results.append(shot.result)
        values.append(shot.value)
    if model == "SVM":
        prob = SVM.predict(data, components)[0]
        shot_prob = [each[1] for each in prob]
    if model == "NaiveBayes":
        prob = NaiveBayes.predict(data, components)[0]
        shot_prob = [each[1] for each in prob]
    if model == "NeuralNet":
        prob = NeuralNet.predict(data, components)
        shot_prob = [each[1] for each in prob]
    print(shot_prob)
    print(len(shot_prob))
    for i in range(len(data)):
        value = values[i]
        if results[i] == 1:
            scored += value
        expected += (shot_prob[i] * value)
    #print(count/total)
    print("Expected points:", str(expected), "Actual points:", str(scored))
    return (expected)
Example #14
0
def main():
    start_time = time.time()
    time_periods = ['day', 'week', 'month', 'year']
    fileObject = open("stock_symbol_list.pickle", 'rb')
    symbols = pickle.load(fileObject)
    fileObject.close()
    #symbols = ['DVN', 'MMM', 'YYY', 'AAPL','ADBE','AMZN']
    #symbols = ['DVN']
    result_list = []
    random.shuffle(symbols)
    #for time_period in time_periods:
    #    result_list.append("==================")
    #    result_list.append(time_period)
    #    stockdata_dict, max_num_of_days = DownloadData.DownloadData(symbols[:1], time_period)
    #    stockdata_dict = RemoveOutliers.RemoveOutliers(stockdata_dict)
    #    for i in range(10, 1000, 10):
    #        if i < max_num_of_days:
    #            r_squared, rmse = NeuralNet.NeuralNet(stockdata_dict, max_num_of_days, i)
    #            result_list.append("num of days: "+str(i))
    #            result_list.append("r_squared: "+str(r_squared))
    #            result_list.append("rmse: " + str(rmse))

    #            fileObject = open("result_list.pickle",'wb') # open the file for writing
    #            pickle.dump(stockdata_dict,fileObject)
    #            fileObject.close()

    fileObject = open("stockdata_dict_year_FULL_tuesday.pickle", 'rb')
    max_num_of_days = 13
    stockdata_dict = pickle.load(fileObject)
    #stockdata_dict = RemoveOutliers.RemoveOutliers(stockdata_dict)
    fileObject.close()
    r_squared, rmse = NeuralNet.NeuralNet(stockdata_dict, max_num_of_days, 10)

    elapsed_time = (time.time() - start_time) / 60
    print("This took " + str(elapsed_time) + " minutes.")
    print("DONE!")
Example #15
0
def testGame(nets):
    # TEST GAME FOR NET
    # 3 outputs, 1 is worth 2 points, 2 are worth -1
    # 1 input, telling where the 2 point output is
    scores = []
    for net in nets:
        totalScore = 0
        points = [2, -1, -1]
        for trial in range(10):
            #random.shuffle(points)
            prize = 0
            for i in range(len(points)):
                if points[i] == 2:
                    prize = i
            inputs = []
            inputs.append(prize - 1)
            output = NN.calcNetwork(net, inputs)
            score = 0
            for i in range(len(output)):
                score += output[i] * points[i]
            totalScore += score
        totalScore = totalScore / 10
        scores.append(totalScore)
    return scores
Example #16
0
    def reproduce(self, other, crossRate, mutRate):
        weights1, weights2 = self.NN.getFlattenedValues(
        ), other.NN.getFlattenedValues()
        #assert(len(weights1) == len(weights2))
        childWeights = copy.copy(weights1)
        #crossover stage
        if (random.random() < crossRate):
            pos = random.randint(0, len(childWeights) - 1)
            childWeights[pos:] = weights2[pos:]

        #mutation stage
        for i in range(len(childWeights)):
            if (random.random() < mutRate):
                weight = childWeights[i]
                changePercent = (2 * random.random()) - 1
                weight = weight * (1 + changePercent)  #mutate
                childWeights[i] = weight

        childNumInputs = self.NN.numInputs
        childNumOutputs = self.NN.numOutputs
        childNumHiddenLayerNeurons = self.NN.numLayerNeurons[:-1]
        childNN = NeuralNet.NeuralNet(childNumInputs, childNumOutputs,
                                      childNumHiddenLayerNeurons, childWeights)
        return individual(childNN)
Example #17
0
def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    device = torch.device('cpu')
    num_action = 2
    num_state = 4
    num_process = 5

    global_Actor = NeuralNet.ActorNet(inputs = num_state, outputs = num_action,num_hidden_layers = 3 , hidden_dim = 32).to(device)
    #summary(global_Actor, input_size=(10,num_state))
    global_Critic = NeuralNet.CriticNet(inputs = num_state, outputs = 1,num_hidden_layers = 3 , hidden_dim = 32).to(device)
    #summary(global_Critic, input_size=(10,num_state))
    batch_size = 1
    GAMMA = 0.99
    max_episodes = 1000
    max_step = 1000
    global_Actor.share_memory()
    global_Critic.share_memory()

    processes = []
    processes_socket =[]
    processes_agent = []
    mp.set_start_method('spawn')
    print("MP start method:",mp.get_start_method())

    ip = '110.76.78.109'
    port = 1111
    for rank in range(num_process):
        processes_socket.append(0)
        processes_socket[rank] = ClientSocket.MySocket(port,  'f',  'ffff?f')
        processes_agent.append(0)
        processes_agent[rank]= Agent.Brain(GlobalActorNet = global_Actor, GlobalCriticNet = global_Critic,device =device , socket= processes_socket[rank] ,num_action = num_action, max_episodes = max_episodes,
                    max_step= max_step, batch_size=batch_size, GAMMA=GAMMA)
        p = mp.Process(target=processes_agent[rank].train, args=(global_Actor,global_Critic))
        p.start()
        processes.append(p)

    for p in processes:
        p.join()

    NeuralNet.save_model(global_Actor, "D:/modelDict/actor")
    NeuralNet.save_model(global_Critic, "D:/modelDict/critic")
Example #18
0
import random, numpy as np
import NeuralNet as NN
params = [
    100, 0.05, 250, 3, 20
]  # [Init pop (pop=100), mut rate (=5%), num generations (250), chromosome/solution length (2), # winners/per gen]
curPop = np.random.choice(
    np.arange(-15, 15, step=0.01), size=(params[0], params[3]), replace=False
)  #initialize current population to random values within range
nextPop = np.zeros((curPop.shape[0], curPop.shape[1]))
fitVec = np.zeros((params[0], 2))  #1st col is indices, 2nd col is cost
for i in range(params[2]):  #iterate through num generations
    fitVec = np.array([
        np.array(
            [x,
             np.sum(NN.costFunction(NN.X, NN.y, curPop[x].reshape(3, 1)))])
        for x in range(params[0])
    ])  #Create vec of all errors from cost function
    print("(Gen: #%s) Total error: %s\n" % (i, np.sum(fitVec[:, 1])))
    winners = np.zeros((params[4], params[3]))  #20x2
    for n in range(len(winners)):  #for n in range(10)
        selected = np.random.choice(range(len(fitVec)),
                                    params[4] / 2,
                                    replace=False)
        wnr = np.argmin(fitVec[selected, 1])
        winners[n] = curPop[int(fitVec[selected[wnr]][0])]
    nextPop[:len(winners)] = winners  #populate new gen with winners
    nextPop[len(winners):] = np.array([
        np.array(
            np.random.permutation(
                np.repeat(winners[:, x],
                          ((params[0] - len(winners)) / len(winners)),
Example #19
0
import chess.uci
import chess.pgn
import sys
import string
import NeuralNet
import os
import ast

# Neural net topology
topology = [64, 44, 18, 1]

# Assigns the existing trained weights in the text file to the neural net.
with open("./TrainedWeightsText.txt", "r") as myfile:
    weightsF = myfile.read().replace('\n', '')
weightsL = ast.literal_eval(weightsF)
evalNet = NeuralNet.Net(topology)
for layer in range(len(evalNet.layers) - 1):
    for neuron in range(len(evalNet.layers[layer])):
        evalNet.layers[layer][neuron].outputWeights = weightsL[layer][neuron]

path = "./PGNFiles/McDonnell.pgn"

# Trains neural network.
with open(path) as f:
    count = 0
    for n in range(100):
        try:
            print("GAMECOUNT", n)
            game = chess.pgn.read_game(f)
            while not game.is_end():
                node = game.variations[0]
Example #20
0
import matplotlib as plt
#%matplotlib inline
import random as rn, numpy as np
import NeuralNet as NN
# [Init pop (pop=100), mut rate (=10%), num generations (5), chromosome/solution length (3), # winners/per gen]
initPop, mutRate, numGen, solLen, numWin = 100, 0.1, 5, 3, 10
#initialize current population to random values within range
curPop = np.random.choice(np.arange(-15,15,step=0.01),size=(initPop,solLen),replace=False)
nextPop = np.zeros((curPop.shape[0], curPop.shape[1]))
fitVec = np.zeros((initPop, 2)) #1st col is indices, 2nd col is cost
for i in range(numGen): #iterate through num generations
    #Create vector of all errors from cost function for each solution
    fitVec = np.array([np.array([x, np.sum(NN.costFunction(NN.X, NN.y, curPop[x].T))]) for x in range(initPop)])
    print("(Gen: #%s) Total error: %s\n" % (i, np.sum(fitVec[:,1])))
    #plt.pyplot.scatter(i,np.sum(fitVec[:,1]))
    winners = np.zeros((numWin, solLen)) #10x3
    for n in range(len(winners)): #for n in range(10)
        selected = np.random.choice(range(len(fitVec)), numWin/2, replace=False)
        wnr = np.argmin(fitVec[selected,1])
        winners[n] = curPop[int(fitVec[selected[wnr]][0])]
    nextPop[:len(winners)] = winners #populate new gen with winners
    duplicWin = np.zeros((((initPop - len(winners))),winners.shape[1]))
    for x in range(winners.shape[1]): #for each col in winners (3 cols)
        #Duplicate winners (10x3 matrix) 9 times to create a 90x3 matrix, then shuffle columns
        numDups = ((initPop - len(winners))/len(winners)) #num times to duplicate, needs to fill rest of nextPop
        duplicWin[:, x] = np.repeat(winners[:, x], numDups, axis=0)#duplicate each col
        duplicWin[:, x] = np.random.permutation(duplicWin[:, x]) #shuffle each col ("crossover")
    #Populate the rest of the generation with offspring of mating pairs
    nextPop[len(winners):] = np.matrix(duplicWin)
    #Create a mutation matrix, mostly 1s, but some elements are random numbers from a normal distribution
    mutMatrix = [np.float(np.random.normal(0,2,1)) if rn.random() < mutRate else 1 for x in range(nextPop.size)]
Example #21
0
import NeuralNet

df_train = pd.read_csv(os.path.join(const.DATAPATH, 'data_train.csv'))
df_valid = df_train.sample(frac=0.1)
df_train = df_train.sample(frac=0.9)
df_test = pd.read_csv(os.path.join(const.DATAPATH, 'data_test.csv'))


# get data type
dtype = 'sent_{}'.format(args.dtype)

# get specific model
if args.net == 'fasttext':
    from NeuralNet.fasttext_model import *
    tndataset = WordCharGramDataset(df_train[dtype].apply(lambda x: x.split()), df['target'])
    vldataset = WordCharGramDataset(df_valid[dtype].apply(lambda x: x.split()), df['target'])
    tsdataset = WordCharGramDataset(df_test[dtype].apply(lambda x: x.split()), df['target'])
    model = FastText(args)

# get data loader
train_loader, valid_loader test_loader = DataLoader(tndataset, batch_size=5), DataLoader(vldataset, batch_size=5), DataLoader(tsdataset, batch_size=5)

# train 神经网络有特殊的训练模块
m = NeuralNet.train(train_loader, valid_loader, model, args)

# test
evaluate(m, test_loader)

if __name__ == '__main__':
    main()
Example #22
0
        #print("Model weights after training",self.model.get_weights());

        return [train_loss_results, train_accuracy_results]

    def plotStats(self, train_loss_results, train_accuracy_results):
        fig, axes = plt.subplots(2, sharex=True, figsize=(12, 8))
        fig.suptitle('Training Metrics - Call, Raise, or Fold')

        axes[0].set_ylabel("Loss", fontsize=14)
        axes[0].plot(train_loss_results)

        axes[1].set_ylabel("Accuracy", fontsize=14)
        axes[1].set_xlabel("Epoch", fontsize=14)
        axes[1].plot(train_accuracy_results)

        plt.show()


if __name__ == '__main__':
    import tensorflow as tf
    import NeuralNet

    #pdb.set_trace();
    tf.enable_eager_execution()
    net = NeuralNet.NeuralNet(
        train=False,
        csvPath="/home/the2b/Documents/school/ai/project/src/test6.csv",
        dataUrl="http://127.0.0.1/test6.csv")
    trainingRes = net.trainModel(epochs=501, batchSize=72, bufferSize=10000)
    net.plotStats(trainingRes[0], trainingRes[1])
import Testing
import NeuralNetUtil
import NeuralNet

n = 0

while True:
    print "------ #", n, "neurons per hidden layer ------"
    x = 0
    xorList = []
    while x < 5:
        print "Iteration #", x + 1
        xor_Net, xor_test = NeuralNet.buildNeuralNet(Testing.xorData, maxItr = 200, hiddenLayerList = [n])
        xorList.append(xor_test)
        x += 1
        
    print "Iteration result:"
    print "Accuracy standard deviation:", Testing.stDeviation(xorList)
    print "Accuracy average:", Testing.average(xorList)
    print "Max accuracy:", max(xorList)

    if Testing.average(xorList) == 1:
        break
    n += 1
Example #24
0
import NeuralNet
import Testing

print "Question 6:"
print "PenData"
hiddenLayer = 0
while hiddenLayer <= 40:
    print "HiddenLayer: ", hiddenLayer
    iteration = 0
    accuracies = []
    while iteration < 5:
        accuracies.append(
            NeuralNet.buildNeuralNet(Testing.penData,
                                     maxItr=200,
                                     hiddenLayerList=[hiddenLayer])[1])
        iteration += 1
    print "Average:", Testing.average(accuracies)
    print "Standard Deviation:", Testing.stDeviation(accuracies)
    print "Maximum:", max(accuracies)
    hiddenLayer += 5

print "CarData"
hiddenLayer = 0
while hiddenLayer <= 40:
    print "HiddenLayer: ", hiddenLayer
    iteration = 0
    accuracies = []
    while iteration < 5:
        accuracies.append(
            NeuralNet.buildNeuralNet(Testing.carData,
                                     maxItr=200,
Example #25
0
database_dir = 'teac600dataBIG_20amb'
batch_size = 400
train_steps = 400 * 1000
epochs = 1

#os.mkdir('data')

fol = 'data/features{}'.format(f)
os.mkdir(fol)
os.mkdir(fol + '/losses')
os.mkdir(fol + '/models')
f_name = os.path.basename(__file__)
shutil.copyfile(f_name, 'data/SourceCode.txt')

#Loading the W.csv and b.csv files from database folder
W = student.get_weights_and_biases(folder=database_dir, file='W.csv')
b = student.get_weights_and_biases(folder=database_dir, file='b.csv')

#deducing the architecture of the teacher from W and b
arc = [len(W[i]) for i in range(len(W))]
arc.append(len(W[len(W) - 1][1]))

max_fea = arc[0]
k = [1 if i < f else 0 for i in range(max_fea)]

print('teacher architecture:', arc)

teach = teacher.Teacher(scale=k,
                        architecture=arc,
                        W=W,
                        b=b,
Example #26
0
import matplotlib.pyplot as plt
import sys
import load_datasets
import NeuralNet  # importer la classe du Reseau de Neurones
import DecisionTree  # importer la classe de l'Arbre de Decision

# importer dautres fichiers et classes si vous en avez developpes
# importer dautres bibliotheques au besoin, sauf celles qui font du machine learning

decision_tree_iris = DecisionTree.DecisionTree()
decision_tree_congress = DecisionTree.DecisionTree()
decision_tree_monks1 = DecisionTree.DecisionTree()
decision_tree_monks2 = DecisionTree.DecisionTree()
decision_tree_monks3 = DecisionTree.DecisionTree()

rn_iris = NeuralNet.NeuralNet()
rn_congress = NeuralNet.NeuralNet()
rn_monks1 = NeuralNet.NeuralNet()
rn_monks2 = NeuralNet.NeuralNet()
rn_monks3 = NeuralNet.NeuralNet()

# Charger/lire les datasets
(train_iris, train_labels_iris, test_iris,
 test_labels_iris) = load_datasets.load_iris_dataset(0.7)
(train_congress, train_labels_congress, test_congress,
 test_labels_congress) = load_datasets.load_congressional_dataset(0.7)
(train_monks1, train_labels_monks1, test_monks1,
 test_labels_monks1) = load_datasets.load_monks_dataset(1)
(train_monks2, train_labels_monks2, test_monks2,
 test_labels_monks2) = load_datasets.load_monks_dataset(2)
(train_monks3, train_labels_monks3, test_monks3,
Example #27
0
 #   testDow.append(dow[i])


#test = createTests(trainData, testData, trainDow, testDow)
test = createTests(trainData, testData)
#print test
#test = ([([0,0,0],[0]), ([0,0,1],[0]), ([0,1,1],[1]), ([1,0,1],[1])], [([1,0,0],[0]), ([1,0,1],[1]), ([0,0,0],[0]), ([0,1,1],[1])])

sizes = []
acc = []
plotNet = []
plotReal = []

#for r in range(10, 60, 5):
for i in range(0,1):
    results, nnet, accuracy = NeuralNet.buildNeuralNet(test, 0.1, 0.00005, [5])
#    acc.append(accuracy)
#sizes.append(acc)
    correct = 0
    incorrect = 0
    percentError = 0.0
    a = 0.0
    b = 1.0
    for r in results:
        nnetPrice = ((r[1] * (maxPrice - minPrice) - a) / (b - a)) + minPrice
        knownPrice = ((r[0] * (maxPrice - minPrice) - a) / (b - a)) + minPrice
        plotNet.append(nnetPrice)
        plotReal.append(knownPrice) 
        if nnetPrice - knownPrice < knownPrice * 0.015  and nnetPrice - knownPrice > -knownPrice * 0.015:
            correct += 1
        else:
        return frame, self.reward, self.game_over

    def get_frames(self):
        return np.array(list(self.frames))

    def get_ambient_data(self):
        return [self.ball_x - (self.paddle_x + self.PADDLE_WIDTH / 2)]


if __name__ == "__main__":
    game = CatchGame()
    NAME = 'weuler'

    brain = NeuralNet.NeuralNet([], [],
                                1,
                                5,
                                1,
                                saved_weight1=w1,
                                saved_weight2=w2)

    game.reset()
    input_t = game.get_frames()
    game_over = False

    report = []

    while not game_over:
        output = brain.get_output(game.get_ambient_data())

        if output > 0.5:
            action = 2
        else:
Example #29
0
import numpy as np
import copy
import NeuralNet
import load_datasets

# X = (hours sleeping, hours studying), y = score on test
X = ([2, 9], [1, 5], [3, 6])
y = np.array(([92], [86], [89]), dtype=float)

X = [[0,0,1], [0,1,1], [1,0,1], [1,1,1]]

y = [0, 1, 1, 0]

n= 1;

train_iris, train_labels_iris, test_iris, test_labels_iris = load_datasets.load_iris_dataset(0.03)
train_votes, train_labels_votes, test_votes, test_labels_votes = load_datasets.load_congressional_dataset(0.02)
train_monks, train_labels_monks, test_monks, test_labels_monks = load_datasets.load_monks_dataset(n)

train = train_votes
labels = train_labels_votes

NN = NeuralNet.NeuralNet(1, 2, len(train[0]), 1)
for i in xrange(1000):
    NN.train(train, labels)

print "Actual Output: \n" + str(labels)
print "Predicted Output: \n" + str(NN.forward(train).T[0])

    # [0,1,0,1,1,1,0,0,0,0,0,0,1,1,2,1], 0))
Example #30
0
                                                    y,
                                                    train_size=train_size,
                                                    test_size=test_size)

# SGD parameters
epochs = 1000
batch_size = int(len(Y_train) / 32)
n_features = X_train.shape[1]
eta = 0.001
lmb = 1e-4

activation = [lrf.leaky_relu, lrf.nooutact]
derivative = [lrf.leaky_relu_deriv, lrf.nooutact_deriv]

# Creating the network object and defining the hyperparameters
neural_net = nn.ANN(lmb = lmb, bias = 0.01, eta = eta, early_stop_tol = 1e-7,\
                                early_stop_nochange = 2000, mode = 'regression', regularization = 'l2')
# Adding layers
neural_net.add_layers(n_features=[n_features, 20],
                      n_neurons=[20, 1],
                      n_layers=2)

# Training the network
neural_net.train(epochs, batch_size, X_train, Y_train, activation, derivative \
                             , X_test, Y_test, verbose = False)
# performance metrics
pred = neural_net.feed_out(X_test, activation)

reg = sklearn.neural_network.MLPRegressor(hidden_layer_sizes=(20),
                                          activation='logistic',
                                          batch_size=batch_size,
                                          learning_rate='adaptive',
def HW12_P2(train_data, test_data):
    NeuralNet.DrawLinearContour(train_data, -1, 1, -1, 1)
Example #32
0
import Testing
import NeuralNetUtil
import NeuralNet

numNeurons = 0
while 1:
	print "--------------running with", numNeurons , "neurons per hidden layer------------------"
	i = 0
	acclist = []
	while i < 5:
		print "running iteration #", i+1
		nnet, testAccuracy = NeuralNet.buildNeuralNet(Testing.xorData,maxItr = 200, hiddenLayerList = [numNeurons])
		acclist.append(testAccuracy)
		i = i + 1

	print "Iterations finished"
	print "accuracy average:", Testing.average(acclist)
	print "accuracy standard deviation:", Testing.stDeviation(acclist)
	print "max accuracy:", max(acclist)
	if Testing.average(acclist) == 1:
		break
	numNeurons = numNeurons + 1
import NeuralNet as nNet
import numpy as np
#entradas simuladas pos x,y distancia con el tubo 3 entradas 1 salida
nn = nNet.NeuralNetwork([3, 2, 1], activation="tanh")

X = np.array([250, 250, 40])

print(nn.predict(X))
import simulation
from characters import *
import NeuralNet as nn
import copy
import multiprocessing
from multiprocessing import Pool
from functools import partial
import json
import pickle

if __name__ == "__main__":
    scoresOverTime = []
    generation = 1
    populationCount = 50
    population = [AiPlayer(brain=nn.Brain([0,0,0], [0,0,0])) for _ in range(0,populationCount)]
    mode = "m"
    while mode == "m" or mode == "s":
        try:
            results = []
            print(f"Generation: {generation}")
            if mode == "m":
                pool = Pool(4)
                results = pool.map(simulation.run, population)
                pool.close()
                pool.join()
            else:
                for player in population:
                    results.append(simulation.run(player))
            results = sorted(results, key=lambda k: k['score'], reverse=True)
            best = results[0]
            maxScore = best['score']
Example #35
0
from NeuralNet import *
from Config import Config
import tflowtools as TFT
import sys
from CaseManager import *
import numpy as np
import mnist.mnist_basics as mnist
from Case import *
import CSVReader

if __name__ == '__main__':
    args = ArgumentParser.parseArgs()
    config = Config(args)
    caseManager = CaseManager(
        config,
        cfunc=lambda: config.src_function(*config.src_args
                                          ),  # * unpacks list arguments
        vfrac=config.vfrac,
        tfrac=config.tfrac,
        case_fraction=config.case_fraction,
        src_function=config.src_function,
        src_args=config.src_args,
        src_path=config.src_file_path)

    nn = NeuralNet(config, caseManager)

    nn.do_training()

    # nn.do_testing()
    # TFT.fireup_tensorboard('probeview')
Example #36
0
    def getMove(self, state, w_prey, w_hunter):
        #Case: prey. set parameters
        if state.turn == Config.PREY_TURN:
            self_x = state.prey.head().x
            self_y = state.prey.head().y
            self_dir = state.prey.dir
            food_x = state.food.x
            food_y = state.food.y
            other_x = state.hunter.head().x
            other_y = state.hunter.head().y
            weight = w_prey
        #Case: hunter. set parameters
        else:
            self_x = state.hunter.head().x
            self_y = state.hunter.head().y
            self_dir = state.hunter.dir
            food_x = state.food.x
            food_y = state.food.y
            other_x = state.prey.head().x
            other_y = state.prey.head().y
            weight = w_hunter

        #Determine adjacent blocks
        adjacent_direction_blocks = direction_blocks(self_x, self_y, self_dir, Config.mapSize)

        #Determine status (blocked vs empty) of adjacent blocks
        front_block_status = state.is_empty(Block(adjacent_direction_blocks[0][0], adjacent_direction_blocks[0][1]))
        left_block_status = state.is_empty(Block(adjacent_direction_blocks[1][0], adjacent_direction_blocks[1][1]))
        right_block_status = state.is_empty(Block(adjacent_direction_blocks[2][0], adjacent_direction_blocks[2][1]))

        #print(front_block_status, left_block_status, right_block_status)

        #Determine vector to food and other snake
        food_vector = block2block_angle_dist(self_x, self_y, food_x, food_y, self_dir, Config.mapSize)
        other_snake_vector = block2block_angle_dist(self_x, self_y, other_x, other_y, self_dir, Config.mapSize)

        #Determine distance and angle to food and other snake
        food_angle = food_vector[0]
        food_distance = food_vector[1]
        other_snake_angle = other_snake_vector[0]
        other_snake_distance = other_snake_vector[1]

        #Predict next move based on neural network feedforwarding
        predicted_move = nn.forward_propagation(np.array([front_block_status, left_block_status, right_block_status, self_dir/3, food_angle, food_distance, other_snake_angle, other_snake_distance]), weight)

        #Convert predicted move to global direction
        predicted_direction = turn2direction(predicted_move, self_dir)

        #Determine number of available moves
        available_moves = state.get_available_moves()
        available_moves_list = []
        for i in range(len(available_moves)):
            available_moves_list.append(available_moves[i][0])

        #Avoid collision if possible
        if state.next_state(predicted_direction).is_final():
             for i in range(len(available_moves_list)):
                if not state.next_state(available_moves_list[i]).is_final():
                     predicted_direction = available_moves_list[i]

        return predicted_direction
Example #37
0
def main_NN():
    n = 50
    np.random.seed(1)

    X, x_grid, y_grid, z, z_true = Franke_dataset(n, noise=0.0)
    z = np.reshape(z, (n * n, 1))

    iters = 5000
    lmbd = 0.0
    gamma = 1e-5

    n_categories = 1

    n_params = 5
    n_gammas = 3
    params = np.zeros(n_params)
    params[1:] = np.logspace(1, -2, n_params - 1)
    gammas = np.logspace(-5, -6, n_gammas)

    print(params)
    print(gammas)

    test_frac = 0.3
    n_test = int(test_frac * n**2)
    X_train, X_test, z_train, z_test = train_test_split(X,
                                                        z,
                                                        test_size=test_frac,
                                                        random_state=123)
    z_test_1d = np.ravel(z_test)
    train_single_NN = True
    # plot_surf(x_grid, y_grid, z.reshape(n,n), cm.coolwarm, 1)
    # plt.show()

    if train_single_NN == True:
        # config = [4,16,4]
        # config = [30,20,30,20,30,20]
        config = [100, 50]
        hidden_a_func = ['tanh', 'tanh']
        output_a_func = ''
        # config = [16,8,4]
        NN = NeuralNet(X_train, z_train, config, hidden_a_func, output_a_func,
                       'reg')
        NN.train(iters, gamma, lmbd=lmbd)
        z_pred = NN.predict_regression(X_test)

        r2_score = metrics.r2_score(z_test_1d, z_pred)
        mse = metrics.mean_squared_error(z_test_1d, z_pred)

        print("gamma =", gamma)
        print("lmbd =", lmbd)
        print("r2 =", r2_score)
        print("mse =", mse)

        print("--------------\n")

        # x_test, y_test = np.meshgrid(X_test[:,0], X_test[:,1])
        # n_test_1d = int(np.sqrt(n_test))
        # x_grid, y_grid = get_grid(X_test)

        # plot_surf(x_grid, y_grid, z_pred.reshape(n_test_1d, n_test_1d), cm.coolwarm)
        # plot_surf(x_grid, y_grid, z_test.reshape(n_test_1d, n_test_1d), cm.gray, alpha=0.5)
        # plt.show()

        # plt.imshow(z_pred.reshape(n,n))
        # plt.show()
        #
        # plt.imshow(z_train.reshape(n,n))
        # plt.show()

    # exit()
    config = [80, 60]
    hidden_a_func = ['sigmoid', 'tanh']
    NN_grid = NeuralNet(X_train, z_train, config, hidden_a_func, '', 'reg')
    NN_grid.grid_search(X_test, z_test_1d, params, gammas, 'reg', config)

    # Iterate over multiple hidden layer configurations
    # for i in range(1, 6):
    #     for j in range(0, i+1):
    #         for k in range(0, 1):
    #             config = [i, j, k]
    #             NN_grid.grid_search(X_test, y_test, params, gammas, config)

    best_accuracy, best_config, best_lmbd, best_gamma = NN_grid.return_params()

    print("\n--- Grid search done ---")
    print('Best accuracy:', best_accuracy)
    print("with configuration", best_config, "lmbd =", best_lmbd, "gamma =",
          best_gamma)
import random, numpy as np, NeuralNet as NN
params = [100, 0.05, 250, 3, 20]
curPop = np.random.choice(np.arange(-15,15,step=0.01),size=(params[0],params[3]),replace=False)
nextPop = np.zeros((curPop.shape[0], curPop.shape[1]))
fitVec = np.zeros((params[0], 2))
for i in range(params[2]):
	fitVec = np.array([np.array([x, np.sum(NN.costFunction(NN.X, NN.y, curPop[x].reshape(3,1)))]) for x in range(params[0])])
	winners = np.zeros((params[4], params[3])) #20x2
	for n in range(len(winners)):
		selected = np.random.choice(range(len(fitVec)), params[4]/2, replace=False)
		wnr = np.argmin(fitVec[selected,1])
		winners[n] = curPop[int(fitVec[selected[wnr]][0])]
	nextPop[:len(winners)] = winners
	nextPop[len(winners):] = np.array([np.array(np.random.permutation(np.repeat(winners[:, x], ((params[0] - len(winners))/len(winners)), axis=0))) for x in range(winners.shape[1])]).T
	curPop = np.multiply(nextPop, np.matrix([np.float(np.random.normal(0,2,1)) if random.random() < params[1] else 1 for x in range(nextPop.size)]).reshape(nextPop.shape))
Example #39
0
    def train(self):
        print('Training some bots...')
        for x in range(TRAINING_EPOCH):
            print('Generation: ' \
                + str(x + 1) + '/' \
                + str(TRAINING_EPOCH) \
                + ', Mutations: ' + str(self.mutations) \
                + ', Avg. Score: ' \
                + str(self.avg_score) \
                + ', High Score: ' \
                + str(self.high_score), \
                end = '          \r')
            self.avg_score = 0
            world = BotWorld(self.bots)
            # run the simulation for EPOCH_LENGTH time steps
            world.update(EPOCH_LENGTH)

            # create a roulette wheel for bot pairing
            roulette = []
            for bot in self.bots:
                # each bot has a chance, even if they captured no food
                roulette += [bot] * (self.bots[bot].score + 1)
                self.avg_score += self.bots[bot].score
                if self.bots[bot].score > self.high_score:
                    self.high_score = self.bots[bot].score
                    self.elite_bot = {bot: self.bots[bot]}

            # calculate the average score for this generation
            self.avg_score = round(self.avg_score / len(self.bots), 2)
            newbots = {}
            for _ in range(POPULATION):
                # choose two random parents from the roulette wheel
                parent_a = random.choice(roulette)
                parent_b = random.choice(roulette)
                # get the first parent's weights as a list
                parent_a_weights = self.bots[parent_a].nn.encoded()
                x = 0
                # make sure parents are different
                while parent_a == parent_b and x < POPULATION:
                    parent_b = random.choice(roulette)
                    x += 1
                parent_b_weights = self.bots[parent_b].nn.encoded()
                # pick a random point to cross over parent a and b
                crossover = random.randint(1, len(parent_a_weights) - 1)
                # randomly choose a parent for first segment of genes
                if random.randint(0, 1):
                    crossed_weights = parent_a_weights[:crossover]
                    crossed_weights += parent_b_weights[crossover:]
                else:
                    crossed_weights = parent_b_weights[:crossover]
                    crossed_weights += parent_a_weights[crossover:]
                n = NeuralNet.NeuralNetwork(\
                    NETWORK_INPUTS, \
                    NETWORK_OUTPUTS, \
                    NETWORK_LAYERS, \
                    NETWORK_HIDDEN_LAYER_AF, \
                    NETWORK_OUTPUT_LAYER_AF)
                # randomly mutate a weight
                if random.random() < EVOLUTION_MUTATION_RATE:
                    crossed_weights[random.randint( \
                        0, len(crossed_weights) - 1)] = \
                            random.uniform(-1, 1)
                    self.mutations += 1
                # create new bot, add it to the next generation's population
                n.decode(crossed_weights)
                bot = Bot(n)
                newbots[bot.id] = bot
            self.bots = newbots
        print()
Example #40
0
import features

from NeuralNet import *
from training import *
from linearRegression import *
from data import *

# Analyzing command line arguments
if len(sys.argv) < 2:
  print 'Usage:'
  print '  python %s <JSON file>' % sys.argv[0]
  exit()

inputFile = sys.argv[1]

# Import Data
reviews = Data(inputFile, numLines = 10000, testLines = 1000)
reviews.shuffle()

# Try a neural net instead of a simple average of the 
#IN PROGRESS
NN = NeuralNet(reviews)	
NN.SGD()
NN.test()
#superCoolNet.gradientCheck()

#result =  superCoolNet.predict("neural nets are the bomb more words are needed here words eight long")

#superCoolNet.getInfo()
#print result
import NeuralNet
import generate_data
import numpy as np
import matplotlib.pyplot as mp

# tabulates all 2 bit inputs and computes the XOR value
X = generate_data.tabulate(3)
Y = generate_data.get_label(X)

# creates a neural network with 1 hidden layer
structure = (3, 2)
xor_network = NeuralNet.dnn(X.shape[0], Y.shape[0], structure, 1)
print(X)
print(Y)
##print(xor_network.hidden_units[0])
##print(xor_network.hidden_units[1])
##print(xor_network.hidden_units[1])
##print(xor_network.output_layer)
##prediction = xor_network.predict(X)
##print(prediction)
##
loss = xor_network.learn(X, Y, 0.005, 15000)
##print(xor_network.hidden_units[0])
##print(xor_network.hidden_units[1])
##print(xor_network.hidden_units[1])
##print(xor_network.output_layer)
prediction = xor_network.predict(X)
print(prediction)

mp.ylim(0, max(loss) + 1)
mp.plot(loss)
Example #42
0
import numpy as np
import matplotlib.pylab as plt
import matplotlib.animation as anima
import NeuralNet


def animate(i):
    NN.train([input_vector, true_outcomes], iterations=i)
    yar = NN.output_values
    ax1.clear()
    ax1.plot(input_vector, yar)


Fs = 100
f = 5
sample = 100
input_vector = np.arange(100., sample+100, 0.1)
test_vector = np.arange(0., sample+100, 0.1)
true_outcomes = np.sin(2 * np.pi * f * input_vector / Fs)
NN = NeuralNet.NeuralNet(len(input_vector), len(true_outcomes), 20)
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
ani = anima.FuncAnimation(fig, animate, interval=1)
plt.show()

outcome = NN.predict(test_vector)
plt.plot(test_vector, outcome)
Example #43
0
__author__ = 'Aaron'

# IMPORTS
import sys

from States import *
from constants import *
from NeuralNet import *

# GLOBAL VARIABLES
clock = pygame.time.Clock()
ann = NeuralNet(NUM_INPUTS, NUM_OUTPUTS, NUM_HIDDEN, NUM_PER_HIDDEN)


# STATE MANAGER
class StateManager(object):
    def __init__(self, ann=None):
        """
        Initializes the state manager.
        Contains "global" variables to hold neural network and score.
        """
        self.ann = ann
        self.fitness = 0

        self.state = None
        self.go_to(MenuState())

    def go_to(self, state):
        self.state = state
        self.state.manager = self
Example #44
0
from NeuralNet import *
inputs = 2
myRange = 1

myNet1 = NeuralNet(3, 3, inputs)
myNet2 = NeuralNet(3, 3, inputs)

myinput = myNet1.shapeInput([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])

myNet1.updateNeurons(myinput, myRange)
myinput = myNet1.shapeInput([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])

myNet2.updateNeurons(myinput, myRange)

myinput = myNet1.shapeInput(myNet2.speak())

###conversation

change = 0

for i in range(10000):
    myinput = myNet1.shapeInput(myNet2.winning)
    myNet1.updateNeurons(myinput, myRange)
    print "Person 1: ", myNet1.winning

    myinput = myNet1.shapeInput(myNet2.winning)
    myNet2.updateNeurons(myinput, myRange)
    print "Person 2: ", myNet2.winning

    distance = float(
        math.sqrt((((myNet1.winning[0] - myNet1.winning[1])**2)) +
Example #45
0
import numpy as np
import matplotlib.pyplot as plt
import random
import sys
#import load_datasets as loader
import NeuralNet  # importer la classe du Réseau de Neurones
#import DecisionTree  # importer la classe de l'Arbre de Décision
# importer d'autres fichiers et classes si vous en avez développés
# importer d'autres bibliothèques au besoin, sauf celles qui font du machine learning

nn = NeuralNet.NeuralNet(4, 3)

test_data_location = 'data/mnist_test.csv'
train_data_location = 'data/mnist_train.csv'

test1 = np.loadtxt(test_data_location,
                   encoding='utf-8',
                   dtype=int,
                   skiprows=1,
                   delimiter=',')
train1 = np.loadtxt(train_data_location,
                    encoding='utf-8',
                    dtype=int,
                    skiprows=1,
                    delimiter=',')

test_labels1 = test1[:, 0].astype(int)
train_labels1 = train1[:, 0].astype(int)
train1 = train1[:, 1:] / 255
test1 = test1[:, 1:] / 255
Example #46
0
import random, numpy as np
import NeuralNet as NN
params = [100, 0.05, 250, 3, 20] # [Init pop (pop=100), mut rate (=5%), num generations (250), chromosome/solution length (2), # winners/per gen]
curPop = np.random.choice(np.arange(-15,15,step=0.01),size=(params[0],params[3]),replace=False) #initialize current population to random values within range
nextPop = np.zeros((curPop.shape[0], curPop.shape[1]))
fitVec = np.zeros((params[0], 2)) #1st col is indices, 2nd col is cost
for i in range(params[2]): #iterate through num generations
	fitVec = np.array([np.array([x, np.sum(NN.costFunction(NN.X, NN.y, curPop[x].reshape(3,1)))]) for x in range(params[0])]) #Create vec of all errors from cost function
	print("(Gen: #%s) Total error: %s\n" % (i, np.sum(fitVec[:,1])))
	winners = np.zeros((params[4], params[3])) #20x2
	for n in range(len(winners)): #for n in range(10)
		selected = np.random.choice(range(len(fitVec)), params[4]/2, replace=False)
		wnr = np.argmin(fitVec[selected,1])
		winners[n] = curPop[int(fitVec[selected[wnr]][0])]
	nextPop[:len(winners)] = winners #populate new gen with winners
	nextPop[len(winners):] = np.array([np.array(np.random.permutation(np.repeat(winners[:, x], ((params[0] - len(winners))/len(winners)), axis=0))) for x in range(winners.shape[1])]).T #Populate the rest of the generation with offspring of mating pairs
	nextPop = np.multiply(nextPop, np.matrix([np.float(np.random.normal(0,2,1)) if random.random() < params[1] else 1 for x in range(nextPop.size)]).reshape(nextPop.shape)) #randomly mutate part of the population
	curPop = nextPop

best_soln = curPop[np.argmin(fitVec[:,1])]
X = np.array([[0,1,1],[1,1,1],[0,0,1],[1,0,1]])
result = np.round(NN.runForward(X, best_soln.reshape(3,1)))
print("Best Sol'n:\n%s\nCost:%s" % (best_soln,np.sum(NN.costFunction(NN.X, NN.y, best_soln.reshape(3,1)))))
print("When X = \n%s \nhThetaX = \n%s" % (X[:,:2], result,))