示例#1
0
# importing necessary packages
from neuralnetwork import NeuralNetwork
import numpy as np

# construct the XOR dataset
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
Y = np.array([[0], [1], [1], [0]])

# define our 2-2-1 neural network and train it
nn = NeuralNetwork([2, 2, 1], alpha=0.5)
nn.fit(X, Y, epochs=20000)

# now that our network is trained, loop over the XOR data points
for (x, targets) in zip(X, Y):
    # make the prediction on the data point and display the result
    # to the console
    pred = nn.predict(x)[0][0]
    step = 1 if pred > 0.5 else 0
    print(
        "[INFO] data = {}, ground truth = {}, pred = {:.4f}, step = {}".format(
            x, targets[0], pred, step))
from neuralnetwork import NeuralNetwork
import random
import math
networks = []
for i in range(10):
    networks.append(NeuralNetwork([2, 4, 3, 3], -1, 1, -1, 1))

yeet = None

losse = []

amount = 25


class West:
    def __init__(self, network):
        self.network = network
        self.ammo = 0

    def run(self, enemyAmmo):
        result = self.network.run([self.ammo, enemyAmmo])
        highestIndex = -1
        highestVal = 0
        for index in range(len(result)):
            output = result[index]
            if highestIndex < 0:
                highestVal = output
                highestIndex = index
            elif output > highestVal:
                highestVal = output
                highestIndex = index
示例#3
0
def main():
    out = []
    tickers = pd.read_excel('./tickers.xlsx')
    #tickers = pd.DataFrame({'Ticker': ['GUSH', 'AAPL']})
    print (tickers)
    for tick in tickers['Ticker']:
        print (tick)
        try:
            st = Scraper()
            df = st.scrape(tick)
            #df.to_csv('./output/df.csv')
            ind = df['Date'].iloc[-50:]
            ind = ind.reset_index()
            #ind.to_csv('./output/ind.csv')
            del df['Date']
            points = []
            for record in df['Adj Close']:
                points.append(float(record))
                print (record)
            best_case = 0
            #for w in [0.8, 0.9, 1, 1.1, 1.2, 1.5, 2, 3, 4, 5]:
            for w in [5]:
                segment = Segment(points, 0, len(points), multiplier=w)
                segment.get_turning_points()
                index = sorted(segment.turning_points)
                print(index)
                closes = []
                for i in index:
                    closes.append(points[i])
                profit = Profit.profit(closes)
                #verify logic below
                if profit > best_case:
                    print("weight: ", w)
                    best_case = profit
                    print("profit: ", profit)
                    best_index = index
                    best_closes = closes
            t = Transform()
            transform = t.trend(best_index, best_closes)
            #pd.DataFrame(transform).to_excel('./output/transformed.xlsx')
            df['transformed'] = transform
            sra = t.correlation(df)
            significant_sra = t.significant(sra)
            df_norm = t.normalize(df[significant_sra['Field']])
            #df_norm.to_csv('./output/normalizedinputs.csv')
            n = NeuralNetwork()
            neural, actual = n.network(df_norm, transform)
            pd.DataFrame({'Neural': neural}).to_csv('./output/neural.csv')
            esf = ExponentialSmoothing()
            smoothed = esf.es(neural, actual)
            smoothed['Closes'] = points[-50:]
            smoothed['Date'] = ind['Date']
            smoothed['Ticker'] = tick
            if len(out) == 0:
                out = smoothed
            else:
                out = out.append(smoothed)
        except:
            pass
    today = datetime.date.today()
    pd.DataFrame(out).to_excel('./output/smoothed %s.xlsx'%format(today))
                       [green_bin[5] * 1.0],
                       [green_bin[6] * 1.0],
                       [green_bin[7] * 1.0],
                       [blue_bin[0] * 1.0],
                       [blue_bin[1] * 1.0],
                       [blue_bin[2] * 1.0],
                       [blue_bin[3] * 1.0],
                       [blue_bin[4] * 1.0],
                       [blue_bin[5] * 1.0],
                       [blue_bin[6] * 1.0],
                       [blue_bin[7] * 1.0]]

        training_data.append(input_data)
        targets.append(target_data)

nn = NeuralNetwork([3, 8, 24], [nnet.logsigmoid, nnet.purelin])

chromo_size = 0

for i in range(0, len(nn.layers)):
    chromo_size += nn.layers[i].weights.size
    chromo_size += nn.layers[i].bias.size

# INIT
toolbox = base.Toolbox()

creator.create("FitnessMin", base.Fitness, weights=(-1.0,))

# Definindo a estrutura do indivíduo
IND_SIZE = chromo_size  # Individual size
INT_MIN, INT_MAX = 1, 25500
示例#5
0
    lr = 0.1  # learning rate
    i_from = 1001
    i_to = 10000

    # Read directory form config.ini
    inifile = configparser.ConfigParser()
    inifile.read('config.ini')
    PATH = os.path.join(
        Path(__file__).resolve().parents[1], inifile['GENERAL']['TEST_DIR'])
    file_r = '20181108_01_c_0200-ergebnis.csv'
    file_qb = '20181108_01_c_0200_query_before_#8b.csv'
    file_qa = '20181108_01_c_0200_query_after_#8b.csv'
    #file_w = 'weight.csv'

    # initialise NeuralNetwork
    nn = NeuralNetwork(x, y, lr)

    # Set weights from csv file
    #nn.weight = pd.read_csv(os.path.join(PATH, file_w), sep=';', header=None)

    # Read csv file and split to input and output
    csv_all = pd.read_csv(os.path.join(PATH, file_r), sep=';')
    csv_input = (csv_all[['input1', 'input2']] - i_from) / (i_to - i_from)
    csv_output = csv_all[['output1', 'output2']]

    # Query data (predict data) from inputs
    csv_query = pd.DataFrame()
    for index, line in csv_input.iterrows():
        csv_query = csv_query.append(
            pd.DataFrame(data=np.array(nn.query(line), ndmin=2).T),
            ignore_index=True)
示例#6
0
                       [red_bin[2] * 1.0], [red_bin[3] * 1.0],
                       [red_bin[4] * 1.0], [red_bin[5] * 1.0],
                       [red_bin[6] * 1.0], [red_bin[7] * 1.0],
                       [green_bin[0] * 1.0], [green_bin[1] * 1.0],
                       [green_bin[2] * 1.0], [green_bin[3] * 1.0],
                       [green_bin[4] * 1.0], [green_bin[5] * 1.0],
                       [green_bin[6] * 1.0], [green_bin[7] * 1.0],
                       [blue_bin[0] * 1.0], [blue_bin[1] * 1.0],
                       [blue_bin[2] * 1.0], [blue_bin[3] * 1.0],
                       [blue_bin[4] * 1.0], [blue_bin[5] * 1.0],
                       [blue_bin[6] * 1.0], [blue_bin[7] * 1.0]]

        training_data.append(input_data)
        targets.append(target_data)

nn = NeuralNetwork([6, 24], [nnet.purelin])

# input_samples = [[[0], [0]], [[0], [2]], [[2], [1]], [[3], [2]]]
# targets = [[0], [0], [1], [1]]

perceptron.learn(nn, training_data, targets, epoches=2)

print(str(nn.layers[0].weights))
print(str(nn.layers[0].bias))

mse = 0

image = cv2.imread(target_image)
rows, cols, _ = image.shape  # Size of background Image

new_image = np.zeros((rows, cols, 3))
示例#7
0
import numpy as np
from neuralnetwork import NeuralNetwork

# Training data set
training_inputs = []
training_inputs.append(np.array([1, 1]))
training_inputs.append(np.array([1, 0]))
training_inputs.append(np.array([0, 1]))
training_inputs.append(np.array([0, 0]))

# OR GATE
labels = np.array([1, 0, 0, 0])

nn = NeuralNetwork(2)
nn.train(training_inputs, labels, True)
data = digits.data.astype("float")
data = (data - data.min()) / (data.max() - data.min())
print("[INFO] samples: {}, dim: {}".format(data.shape[0], data.shape[1]))

# construct the training and testing splits
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  digits.target,
                                                  test_size=0.25)

# convert the labels from integers to vectors
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

# train the network
print("[INFO] training network...")
nn = NeuralNetwork([trainX.shape[1], 32, 16, 10])
print("[INFO] {}".format(nn))
epoches = 1000
losses = nn.fit(trainX, trainY, epochs=epoches)

# evaluate the network
print("[INFO] evaluating network...")
predictions = nn.predict(testX)
predictions = predictions.argmax(axis=1)
print(classification_report(testY.argmax(axis=1), predictions))
'''
# construct a figure that plots the loss over time
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epoches), losses)
plt.title("Training Loss")
示例#9
0
from tkinter import *
import numpy
import matplotlib.pyplot as plot
from neuralnetwork import NeuralNetwork

input_nodes = 784
hidden_nodes = 300
output_nodes = 10
learning_rate = .2

nn = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)

training_data_file = open('data/mnist_train.csv', 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()

epochs = 4

for e in range(epochs):
    for record in training_data_list:
        all_values = record.split(',')
        inputs = (numpy.asfarray(all_values[1:]) / 255 * .99) + .01
        targets = numpy.zeros(output_nodes) + .01
        targets[int(all_values[0])] = .99
        nn.train(inputs, targets)
        pass
    pass
test_data_file = open('data/mnist_test.csv', 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
示例#10
0
        labels = np.argmax(y, axis=1)
        y_train[fold] = labels[train_obs]
        y_test[fold] = labels[test_obs]

        # train and test perceptron
        perc = Perceptron(num_feats=num_feats,learn_rate=learn_rate)
        perc.train(x[train_obs],y[train_obs])
        yp_train[fold] = np.argmax(perc.predict(x[train_obs]), axis=1)
        yp_test[fold] = np.argmax(perc.predict(x[test_obs]), axis=1)
        perc.close()

        for node in range(num_nodes):
            print "NODE:",3*node + 1

            # train and test neural network
            nn = NeuralNetwork(num_feats=num_feats, num_nodes=3*node + 1, learn_rate=learn_rate)
            nn.train(x[train_obs],y_train[fold].reshape(num_tr_obs,1))
            yn_train[fold,node] = nn.predict(x[train_obs]).reshape(num_tr_obs, )
            yn_test[fold,node] = nn.predict(x[test_obs]).reshape(num_te_obs,)
            nn.close()



    test = {}
    test['y_train'] = y_train
    test['y_test'] = y_test
    test['yp_train'] = yp_train
    test['yp_test'] = yp_test
    test['yn_train'] = yn_train
    test['yn_test'] = yn_train
示例#11
0
            test_labels.append(classes[name])


load_training_data()
load_test_data()

print("Performing Principal Component Analysis...")
# Perform Principal Component Analysis on the train and test data
for j in range(len(train_data)):
    train_data[j] = pca.fit_transform(train_data[j]).ravel()
for j in range(len(test_data)):
    test_data[j] = pca.fit_transform(test_data[j]).ravel()

accuracy = 0
# create an instance of the network
nn = NeuralNetwork(1000, 500, 3, 0.01)

print("Training...")
# iterate N times perform training at each step
for epoch in range(100):
    for j in range(len(train_data)):
        nn.train(train_data[j], labels[j])
    accuracy = nn.accuracy(train_data, labels)
    print("\tepoch {i}, accuracy = {accuracy}".format(i=epoch + 1,
                                                      accuracy=accuracy))

# ask the user if they'd like to save the trained model, then the appropriate action is taken
save = input("Save current model with accuracy %.4f? [Y/N]: " % accuracy)
if save == "Y":
    model_file_path = "models/model.json"
    nn.write_to_file(model_file_path)
示例#12
0
# Split training and test error
trainN = int(0.8 * N)

X_train = x_encoded[0:trainN]
y_train = y_target[0:trainN]

X_test = x_encoded[trainN:]
y_test = y_target[trainN:]

# Training the model with softmax layer
N, M = X_train.shape

print("Training with softmax layer with activation function as tanh")
# tanh activation_function
nn = NeuralNetwork(M, np.array([100, 50]), 2)
nn.train(X_train,
         y_train,
         X_test,
         y_test,
         epochs=300,
         learning_rate=2e-1,
         learning_rate_decay=0.98,
         method="two")
nn.plot

print("Training with softmax layer with activation function as sigmoid")
# Sigmoid activation function
nn2 = NeuralNetwork(M,
                    np.array([100, 50]),
                    2,
示例#13
0
import normalizer
import numpy as np
import PIL
from ui import init
from neuralnetwork import NeuralNetwork

init()

img = PIL.Image.open("image.png").convert("L")
imgarr = np.array(img)

print("imagearray created")

np.set_printoptions(threshold=np.nan)
normalized_input = normalizer.normalizeInput(imgarr)

nn = NeuralNetwork(normalized_input, np.ndarray(shape=(10)))

nn.feedforward()

print(nn.result())
示例#14
0
    "Accuracy score of implementation of logistic regression by gradient descent: ",
    accuracy)

# Neural network

runs = 50
my_acc = 0
skl_acc = 0

print("averaging over ", runs, " neural networks...")

for i in range(runs):
    testNet = NeuralNetwork(X_train_scaled,
                            y_trainv,
                            n_hidden_neurons=[50],
                            n_categories=1,
                            epochs=100,
                            batch_size=100,
                            eta=0.1,
                            lmbd=0.01)
    testNet.train()
    y_fit = testNet.predict(X_test_scaled)
    indicator = 0
    for fit, test in zip(y_fit, y_test):
        if fit == test:
            indicator += 1
    accuracy = indicator / len(y_fit)
    from sklearn.neural_network import MLPClassifier
    dnn = MLPClassifier(hidden_layer_sizes=(50, ),
                        activation='logistic',
                        alpha=0.01,
                        learning_rate_init=0.1,
示例#15
0
validation_file = open("data/validation.dat", "r")
for line in validation_file:
    validation_data.append([float(x) for x in line[:-2].split()])
    validation_labels.append(int(line.split()[-1]))
validation_file.close()

# load test data
test_data = []
test_labels = []
test_file = open("data/test.dat", "r")
for line in test_file:
    test_data.append([float(x) for x in line[:-2].split()])
    test_labels.append(int(line.split()[-1]))
test_file.close()

# create np.arrays for data/labels
train_data = np.array(train_data)
train_labels = np.array(train_labels).reshape(len(train_labels), 1)

validation_data = np.array(validation_data)
validation_labels = np.array(validation_labels).reshape(
    len(validation_labels), 1)

test_data = np.array(test_data)
test_labels = np.array(test_labels).reshape(len(test_labels), 1)

# create neural network with 50 inputs, 30 hidden units, and 3 classes
nn = NeuralNetwork([50, 30, 3])
nn.set_data(train_data)
nn.set_labels(train_labels)
示例#16
0
 def test_activation(self):
     network = NeuralNetwork(3, 2, 1, 0.5)
     # Test that the activation function is a sigmoid
     self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))