コード例 #1
0
def analyzeSymbol(stockSymbol):
    startTime = time.time()

    trainingData = getTrainingData(stockSymbol)

    network = NeuralNetwork(inputNodes=3, hiddenNodes=3, outputNodes=1)

    network.train(trainingData)

    # get rolling data for most recent day
    predictionData = getPredictionData(stockSymbol)

    # get prediction
    returnPrice = network.test(predictionData)

    # de-normalize and return predicted stock price
    predictedStockPrice = denormalizePrice(returnPrice, predictionData[1],
                                           predictionData[2])

    # create return object, including the amount of time used to predict
    returnData = {}
    returnData['price'] = predictedStockPrice
    returnData['time'] = time.time() - startTime

    return returnData
コード例 #2
0
def analyzeSymbol(stockSymbol):
    startTime = time.time()
    flag = 0
    trainingData = getTrainingData(stockSymbol)

    network = NeuralNetwork(inputNodes=3, hiddenNodes=3, outputNodes=1)

    network.train(trainingData)

    # get rolling data for most recent day

    network.train(trainingData)
    for i in range(0, 5):
        # get rolling data for most recent day
        predictionData = getPredictionData(stockSymbol, flag)
        returnPrice = network.test(predictionData)

        # de-normalize and return predicted stock price
        predictedStockPrice = denormalizePrice(returnPrice, predictionData[1],
                                               predictionData[2])

        print predictedStockPrice
        flag += 1
        global new_value
        new_value = predictedStockPrice

    return predictedStockPrice
コード例 #3
0
def analyzeSymbol():
   
    
    trainingData = getTrainingData()

    
    network = NeuralNetwork(inputNodes = 3, hiddenNodes = 5, outputNodes = 1)
    
    model = network.train(trainingData)
    
   
    # get rolling data for most recent day
    
    
    predictionData = getPredictionData(0)    
        
        
    returnPrice = network.test(predictionData)
       
        
    predictedStockPrice = denormalizePrice(returnPrice, predictionData[1], predictionData[2])
    returnData = {}
    returnData[0] = predictedStockPrice
        

    return (predictedStockPrice) 
コード例 #4
0
def getAccuracy(var, begin, name, step, loop, path="./results/"):
    value = begin
    for i in range(loop):
        print(i)
        # create network
        if (var == 'hid'):
            nn = NeuralNetwork(hid=value, act=ACT)
        elif (var == 'lay'):
            nn = NeuralNetwork(lay=value, act=ACT)
        elif (var == 'lr'):
            nn = NeuralNetwork(lr=value, act=ACT)
        elif (var == 'epoch'):
            nn = NeuralNetwork(act=ACT)

        # train network
        if (var == 'epoch'):
            nn.train(ds.train_data, ds.train_labels_arr, value, DATA)
        else:
            nn.train(ds.train_data, ds.train_labels_arr, EPOCH, DATA)

        # counters
        correct = 0
        false = 0
        for i in range(TEST):
            output = nn.feedforward(ds.test_data[i],
                                    isRound=False,
                                    isSoftmax=True)
            output_digit = np.where(output == np.amax(output))
            if output_digit[0][0] == ds.test_labels[i]:
                correct += 1
            else:
                false += 1
        # calc accuracy
        accuracy = (correct * 100) / (correct + false)
        # log accuracy
        logResults(path, name, value, accuracy)
        # increment value
        value += step
コード例 #5
0
def analyzeSymbol(stockSymbol):
    startTime = time.time()
    
    trainingData = getTrainingData(stockSymbol)
    
    network = NeuralNetwork(inputNodes = 3, hiddenNodes = 3, outputNodes = 1)

    network.train(trainingData)

    # get rolling data for most recent day
    predictionData = getPredictionData(stockSymbol)

    # get prediction
    returnPrice = network.test(predictionData)

    # de-normalize and return predicted stock price
    predictedStockPrice = denormalizePrice(returnPrice, predictionData[1], predictionData[2])

    # create return object, including the amount of time used to predict
    returnData = {}
    returnData['price'] = predictedStockPrice
    returnData['time'] = time.time() - startTime

    return returnData
コード例 #6
0
def analyzeId(c_id):
    startTime = time.time()
    count2 =0
    itr = 0
    if (c_id>0):
        network = NeuralNetwork(inputNodes = 5, hiddenNodes = 18, outputNodes = 1) #company id >0 for individual prediciton
        averageError = 1      
        while (averageError>0.01 and itr<2000):
        #for i in range(1000):
            count1 =0
            resultBackProp =[]
            while (count1<295):
                trainingData = getTrainingData(c_id,count1)
                resultBackProp.append(network.train(trainingData))
                count1 = count1 +1  
            averageError = sum(resultBackProp)/len(resultBackProp)
        print ("avgError = ", averageError)

        #==================================================================    
        # get data for testing
        
        testError =[]
        while (count2<95):
            testData = getTestData(c_id,count2)
            testError.append(network.accuracyTest(testData))
            count2 = count2 + 1
        
        testErrorAverage = sum(testError)/len(testError)
        
        #====================================================================

        # get data for tomorrow's prediction
        predictionData = getPredictionData(c_id)

        # get prediction result
        returnPrice = network.test(predictionData)

        # de-normalize and return predicted stock price
        final = InitialCalculation()
        predictedStockPrice = final.denormalizePrice(returnPrice)

        # create return object, including the accuracy of the testing
        returnData ={}
        returnData['price'] = predictedStockPrice
        #returnData['accuracy'] = accuracy
        returnData['error'] = testErrorAverage
        returnData['time'] = time.time() - startTime
        print(returnData)

    else:
        nep_network = NeuralNetwork(inputNodes = 8, hiddenNodes = 20, outputNodes = 1) #company id =0 for nepse prediciton
        averageError = 1      
        while (averageError>0.03 ):
        #for i in range(1000):
            count1 =0
            resultBackProp =[]
            #print(getTrainingData(c_id,count1))
            while (count1<95):
                trainingData = getTrainingData(c_id,count1)
                resultBackProp.append(nep_network.trainNepse(trainingData))
                count1 = count1 +1  
            averageError = sum(resultBackProp)/len(resultBackProp)
        print ("avgError = ", averageError)

        #==================================================================    
        # get data for testing
        # considering 50 test data, we nedd 45 loops 
        testError =[]
        while (count2<10):
            testData = getTestData(c_id,count2)
            testError.append(nep_network.accuracyTestNepse(testData))
            count2 = count2 + 1
        testErrorAverage = sum(testError)/len(testError)
        #print ("test error =", testError)
        #print("te average = ", testErrorAverage)
        #accuracy = (1- testErrorAverage)*100
        #====================================================================

        # get data for tomorrow's prediction
        predictionData = getPredictionData(c_id)

        # get prediction result
        returnPrice = nep_network.test(predictionData)

        # de-normalize and return predicted stock price
        final = InitialCalculation()
        predictedStockPrice = final.denormalizePriceNepse(returnPrice)

        # create return object, including the accuracy of the testing
        returnData ={}
        returnData['price'] = predictedStockPrice
        #returnData['accuracy'] = accuracy
        returnData['error'] = testErrorAverage
        returnData['time'] = time.time() - startTime
        print(returnData)
コード例 #7
0
def analyzeSymbol(name):
    
    filename = ('uploads/'+name)
    
    count = sum(1 for line in open(filename))



    con = sql.connect("EBL.db")
    cur = con.cursor()
    
    
    y =name
    


    #cur.execute ("DROP TABLE if exists bankname ")
    #cur.execute ('''CREATE TABLE bankname (id INTEGER not null PRIMARY KEY, bankname)''')
#    cur.execute("insert into bankname (bankname) values (?)",  (y,))
    #cur.execute("insert into bankname (name) values (55)")

    con.commit()

   
    


    cur.execute ("DROP TABLE if exists {} ".format (name))





   
    cur.execute( '''CREATE TABLE {}(id INTEGER not null PRIMARY KEY, date DATE, ltp INTEGER, percent INTEGER, high INTEGER, low INTEGER, prediction INTEGER)'''.format(y))
    #cur.execute("CREATE TABLE  fuche ( id INTEGER not null  PRIMARY KEY, date, volume, lamo)")
    with open ('uploads/'+name) as csvfile:
        readCSV = csv.reader(csvfile, delimiter=',')
    #print(readCSV)

        
        for row in readCSV:

            
            cur.execute("insert into {} (date, ltp, percent, high, low) values (?,?, ?, ?, ?)".format(y),  ( row[0], row[1], row[2], row[3], row[4]))
            con.commit()
            #b=b+1



#INSERT INTO MyTable(MyIntColumn) VALUES(NULL);
    # cur.execute("insert into {} (date, ltp, percent, high, low) values (NULL, NULL, NULL, NULL, NULL)".format(y))
    # con.commit()



    x = name
    trainingData = getTrainingData(x)


     
    network = NeuralNetwork(inputNodes = 3, hiddenNodes = 2, outputNodes = 1)
    

    network.train(trainingData)


    
    f****n = []
    pussy = []
    for a in range (0,count-9):
        predictionData = getPredictionData(name, a)    
        
        #returnPrice = loaded_model(network.test(predictionData))
        returnPrice = network.test(predictionData)
        #result = loaded_model.returnPrice
        
        predictedStockPrice = denormalizePrice(returnPrice, predictionData[1], predictionData[2])

        predictedStockPrice=math.floor(predictedStockPrice)
        returnData = {}
        returnData[a] = predictedStockPrice
      

        

        con = sql.connect("EBL.db")
        cur = con.cursor()
        

        

        pussy += [predictedStockPrice]
        #pussy[a]=predictedStockPrice




        
        cur.execute('update {} set prediction = ? WHERE (id = ?)'.format(y), (pussy[a], a+11,)) 
        #cur.execute('insert into fuche (lamo) values (?) WHERE (id = 5)', (returnData[a],)) Working code 
        con.commit()
    


    '''
    for x in range (0,9):
        cur.execute ('SELECT *  FROM plcs WHERE id=(SELECT max(id)-? FROM plcs)', (x,))
        print(x)
        result = cur.fetchall()
        for row in result:
        #print (result)
            print (row[0])
'''


    #cur.execute ('SELECT COUNT(1) from {}'.format(y) )
   # totalrow = cur.fetchone()[0]
#con.commit()
    


    
    
        #cur.execute('UPDATE  fuche set  lamo = "maya" where id = (?)', (a))
        
       # con.commit()
    #print ("Return price")
    #print (returnData['price'])
        #print ("Prediction")
        #conn = sqlite3.connect('EBL.db')
        #curs = conn.cursor()       
        #nm = '2036-12-30'
        #addr = '230'
        #city = '776'
       # curs.execute("INSERT INTO banks (date, value, luck) VALUES (?,?,?)",(nm, addr, returnData[a]) )
       
        #conn.commit()

# to insert the predicted value inside the database
       #print (returnData[a])
   
#            print (b)
    















    


    for bipin in range (0, 9):

        xdata=[]
        for r in range (0,10):
            cur.execute ('SELECT *  FROM {} WHERE id=(SELECT max(id)-? FROM {} )'.format(name, name), (r,))
   # print(a)
            result = cur.fetchall()
            for row in result:
        #print (result)
        #print (row[0])
                data = row[2]
                if data==None:
                    data=row[6]
                xdata.append(data)
        # print (xdata)




        fuckoff = getTimeSeriesValues(xdata, 10)


        f****n = fuckoff[0][0]
        


        Predicted = network.test(f****n) 
        predict =   float(denormalizePrice(Predicted, f****n[1], f****n[2]))
        predict=math.floor(predict)
        # print ("Predicted")
        # print (predict) 
    

        

        


        cur.execute("SELECT * from {} where id = (SELECT max(id) FROM {})".format(name, name))
        result = cur.fetchall()
        for row in result:
            
            x = row[0]
            print (x)
            datee=row[1]



        # print(row[1]+1)
        # end_date = row[1] + datetime.timedelta(days=10)
        # print (end_date)

            date = datetime.strptime(row[1], "%Y-%m-%d")
            modified_date = date + timedelta(days=1)
            x = datetime.strftime(modified_date, "%Y-%m-%d")
            # print(row[1])



        cur.execute("insert into {} (date, prediction) values (?, ?)".format(name), (x, predict,))
        con.commit()        
    return (returnData[a])
コード例 #8
0
ファイル: train.py プロジェクト: italojs/pokedex-keras
from neuralNetwork import NeuralNetwork
from dataset import Dataset
import getArgs
import gc

args = getArgs.train()

batch_size = args["batch_size"]
dataset = Dataset(args["dataset"])
net = NeuralNetwork(args["epochs"], batch_size, args["learn_rate"], dataset)
net.train()
net.evaluate(dataset)
net.save_model(args["model"])
dataset.save_labels(args["le"])
コード例 #9
0
# 2 Train the neural network
# ==============================

epochs = 3000

for epoch in range(epochs):
    for record in training_data_list:
        # split the record by the ',' semicolon and parse to float values
        values = record.split(',')
        # parse x1 and x2
        input_vector = [float(values[0]), float(values[1])]
        # parse output
        target_value = [float(values[2])]

        # train neural network
        neuralNetwork.train(input_vector, target_value)

    # print status
    if ((epoch + 1) % 500) == 0:
        print("epoch: ", epoch + 1)

# 3 Show trained neural network
# ==============================

# generate test values
step_width = 0.01
x1_test = []
x2_test = []
for i in range(int(1.0 / step_width) + 1):
    x1_test.append(i * step_width)
    x2_test.append(i * step_width)
コード例 #10
0
ファイル: hw4.py プロジェクト: mckinziebrandon/two_layer_nn
def trainNeuralNetwork(reload=False, wantDeskew=True):

    # Get the MNIST data, either by loading saved data or getting it new.
    X_train, labels_train, X_test, labels_test = get_data(reload, wantDeskew)

    print("X_train.shape", X_train.shape)
    print("labels_train.shape", labels_train.shape)

    # ___________ Initialize the neural network. ____________
    neural_net = NeuralNetwork(n_in=X_train.shape[1],
                               n_hid=1200,
                               n_out=10,
                               eta=0.1,
                               decay_const=0.8,
                               alpha=0.9,
                               l2=0.07,
                               batch_size=50,
                               n_epochs=15)

    # Give training data to neural_net to store and further preprocessing.
    neural_net.set_data(X_train, labels_train)

    # Create arrays for plotting results.
    accuracy = []
    loss = []

    n_train = int(50e3)
    n_batches = n_train // neural_net.batch_size
    batches = np.array_split(np.arange(n_train), n_batches)
    print("Splitting into", len(batches), "of size", neural_net.batch_size)

    epochs = np.arange(neural_net.n_epochs)
    n_iter_total = len(epochs) * len(batches)
    x_axis = np.arange(0, n_iter_total, n_iter_total // 100)
    print("preparing to collect", len(x_axis), "points to plot.")

    print("Beginning", n_iter_total,
          "iterations of minibatch gradient descent.")
    for i in epochs:
        print('\n========== EPOCH {} ======'.format(i))
        neural_net.new_epoch(i)
        for j, batch in enumerate(batches):

            # Tell neural net what data to train with.
            neural_net.set_active(batch)

            # Calculate values along feedforward.
            X, S_h, H, S_o, O = neural_net.forward_pass()

            # Get the training loss at this iteration.
            if i * len(batches) + j in x_axis:
                print(".", end=" ")
                sys.stdout.flush()
                loss.append(neural_net.get_loss())
                accuracy.append(neural_net.train_accuracy())

            # Update weights via backprop.
            neural_net.train(X, H, O)

    # Save Kaggle predictions in CSV file.
    if True:
        pred_labels_test = neural_net.predict_test(util.preprocess(X_test))
        Id = np.reshape(np.arange(1, 1 + X_test.shape[0]),
                        (X_test.shape[0], 1))
        Category = np.reshape(pred_labels_test, (X_test.shape[0], 1))
        columns = np.hstack((Id, Category))
        np.savetxt('predictions.csv',
                   columns,
                   delimiter=',',
                   header='Id, Category',
                   fmt='%d')

    neural_net.print_results()
    util.plot_error(x_axis, loss, accuracy, neural_net.get_params())
コード例 #11
0
def main():
    # training sets for the XOR problem only have 4 cases
    training_sets = [
        [[0, 0], [0]],
        [[0, 1], [1]],
        [[1, 0], [1]],
        [[1, 1], [0]]
    ]
    # call global variable to use sigmoid for this problem
    global activation
    activation = "sigmoid"
    nn = NeuralNetwork(len(training_sets[0][0]), 5, len(training_sets[0][1]), False)
    # can use large learning rate due to simple training
    nn.LEARNING_RATE = 1
    temp1, temp2 = [], []
    # bigger range = longer training = less error = better results
    for i in range(10000):
        training_inputs, training_outputs = random.choice(training_sets)
        nn.train(training_inputs, training_outputs)
        temp1.append(i)
        k = nn.calculate_total_error(training_sets)
        temp2.append(k)
        print(i, k)

    print()
    test1 = [0, 0]
    test2 = [0, 1]
    test3 = [1, 0]
    test4 = [1, 1]
    # verify performance
    print(nn.feed_forward(test1)[0])
    print(nn.feed_forward(test2)[0])
    print(nn.feed_forward(test3)[0])
    print(nn.feed_forward(test4)[0])
    print()

    plt.plot(temp1, temp2)
    nn.inspect()

    training_sets = []
    # much larger training set to account for many multiplications
    for i in range(1000):
        x = random.randint(-100, 100)
        y = random.randint(-100, 100)
        training_sets.append([[x / 100.0, y / 100.0], [(x * y) / 10000.0]])
    # call global variable to use relu for this problem
    global activation
    activation = "relu"
    nn = NeuralNetwork(len(training_sets[0][0]), 20, len(training_sets[0][1]), True)
    # smaller learning rate so error rate doesn't stagnate
    nn.LEARNING_RATE = .05
    temp1, temp2 = [], []
    # my computer's not fast enough to run a higher value so my error rate is high
    for i in range(10000):
        training_inputs, training_outputs = random.choice(training_sets)
        nn.train(training_inputs, training_outputs)
        temp1.append(i)
        k = nn.calculate_total_error(training_sets)
        temp2.append(k)
        print(i, k)

    plot_learning_curves(temp1, temp2)
    nn.inspect()
コード例 #12
0
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
learning_rate = 0.1
epochs = 2

history_performance = []
training_data = tuple(load_mnist.load_training(output_nodes))
test_data = tuple(load_mnist.load_test())

for i in range(0, 100):
    scorecard = []
    network = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)

    for row in training_data:
        network.train(*row)

    for row in test_data:
        correct_label = row[1]

        outputs = network.query(row[0])
        label = numpy.argmax(outputs)
        if label == correct_label:
            scorecard.append(1)
        else:
            scorecard.append(0)

    performance = sum(scorecard) / len(scorecard)
    print("ШАГ:", i, "/,", "Эффективность =", performance)
    history_performance.append(performance)
コード例 #13
0
MNIST_train_ans_set = one_hot_encode(labels=MNIST_train[:, 0:1])

# Pre-processing the images so that, instead of having pixel values in [0, 255], they are normalized to [0, 1].
MNIST_train_inp_set_temp = MNIST_train[:, 1:] / 255  # input matrix; X
MNIST_train_inp_set = np.ceil(MNIST_train_inp_set_temp)

# Creating a neural network object.
MNIST_nn = NeuralNetwork(num_inputs=784, num_hidden=50, num_outputs=10)

# Defining a learning rate.
eta = 1

# Training the neural network.
MNIST_nn.train(ans_set=MNIST_train_ans_set,
               inp_set=MNIST_train_inp_set,
               epochs=10,
               eta=eta,
               threshold=.02)

# Using the trained neural network to generate predictions on the test set.
# First, reading in the test set images, and doing the same pre-processing as on the training set.
MNIST_test_df = pd.read_csv('MNIST_test.csv')
MNIST_test = MNIST_test_df.values  # no labels in the test set
MNIST_test_inp_set_temp = MNIST_test / 255  # input matrix; X
MNIST_test_inp_set = np.ceil(MNIST_test_inp_set_temp)

# Transposing the input set so X is a collection of column vectors, each column representing an image.
MNIST_test_inp_set_transpose = np.transpose(MNIST_test_inp_set)

# Loop through and feed forward every image to generate the prediction.
コード例 #14
0
epochs = 1

for e in range(epochs):
    # go through all records in the training data set
    for i, record in enumerate(training_data_list):
        print(str(((i + 1) / len(training_data_list) * 100) / epochs) + "%")
        # split the record by the ',' commas
        all_values = record.split(',')
        # scale and shift the inputs
        inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
        # create the target output values (all 0.01, except the desired label which is 0.99)
        targets = np.zeros(output_nodes) + 0.01
        # all_values[0] is the target label for this record
        targets[int(all_values[0])] = 0.99
        n.train(inputs, targets)
        pass
    pass

# load the mnist test data CSV file into a list
test_data_file = open("data/mnist_test.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()

# test the neural network

# scorecard for how well the network performs, initially empty
scorecard = []

# go through all the records in the test data set
for record in test_data_list:
コード例 #15
0
import pandas as pd

# Importing time to time execution - https://pythonhow.com/measure-execution-time-python-code/
import time

# Starting off the timer
start = time.time()

# Importing the excel data for the XOR problem. Pandas dataframe.
df = pd.read_excel('XOR_dataset.xlsx', 'Sheet1')

# Numpy array - https://stackoverflow.com/questions/13187778/convert-pandas-dataframe-to-numpy-array-preserving-index
data = df.values
ans_set = data[:, 0:1]  # first column; Y
inp_set = data[:, 1:]  # input matrix; X

# Defining a learning rate
eta = 0.5

# Creating a neural network object.
nn = NeuralNetwork(2, 6, 1)

# Training the neural network.
nn.train(ans_set, inp_set, 15000, eta)

# Validating the neural network.
print('\nValidation Success rate: ', nn.validate(ans_set, inp_set))

# Printing the execution time
end = time.time()
print('Execution time (s): ', format(end - start, '.10f'))
コード例 #16
0
expected_outputs = np.array([
    [1],
    [0],
    [0],
    [0],
    [0],
    [0],
    [0],
    [0],
])

# This learning rate is extremely high, yet yields very good results.
# I think this is due to the fact that the perfect output values are 0 and 1 and the sigmoid function is only 0 or 1 at its extremes
# and needs an infinite input for that. To do this the network is trying to train infinite weights.
nn.train(inputs, expected_outputs, learning_rate, epochs)

print('After training:')
MSE = 0.0
for input, expected_output in zip(inputs, expected_outputs):
    output = nn.activate(input)
    print('{} -> {}'.format(input, output))
    MSE += ((expected_output - output)**2).sum()
print('MSE: {}\n'.format(MSE / len(inputs)))

# 4.3 C. XOR and Back-propagation
# Neural Network Settings
learning_rate = 20
epochs = 1000
weight_lower_bound = -1
weight_upper_bound = 1
コード例 #17
0
INPUTS  = 784
OUTPUTS = 10
HIDDEN  = 131
LAYERS  = 4
LR      = 0.0595
ACT     = "sigmoid"

EPOCH   = 65
DATA    = 100
TEST    = 500

ds = Dataset()

nn = NeuralNetwork(INPUTS, OUTPUTS, HIDDEN, LAYERS, LR, ACT)

nn.train(ds.train_data, ds.train_labels_arr, EPOCH, DATA)

 # counters
correct = 0
false   = 0
for i in range(TEST):
    output = nn.feedforward(ds.test_data[i], isRound=False, isSoftmax=True)
    output_digit = np.where(output == np.amax(output))
    if output_digit[0][0] == ds.test_labels[i]:
        correct += 1
    else:
        false += 1
# calc accuracy
accuracy = (correct * 100) / (correct + false)
# log accuracy
# set file path