コード例 #1
0
ファイル: pong.py プロジェクト: chaostal/ai-pong
    def __init__(self, x, ball):
        Player.__init__(self, x)
        self.y_change = 5
        self.ball = ball
        input_nodes = 3
        hidden_nodes = 5
        output_nodes = 3

        learning_rate = 0.3

        self.neural = nn.neuralNetwork(input_nodes, hidden_nodes, output_nodes,
                                       learning_rate)

        self.data = []

        for a in range(0, 100):
            self.neural.train([1, 30, 100], [1, 0, 0])
            self.neural.train([1, 30, 400], [1, 0, 0])
            self.neural.train([1, 130, 30], [0, 0, 1])
            self.neural.train([1, 50, 50], [0, 1, 0])
            self.neural.train([1, 430, 30], [0, 0, 1])
コード例 #2
0
import nn

input_nodes = 4
hidden_nodes = 3
output_nodes = 3

learning_rate = 0.3

n = nn.neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)

for a in range(0, 1000):
    n.train([1, 2, 3, 7], [0.2, 0.8, 0.4])

#r = n.query([1.0,0.5,-1.5])
r = n.query([1, 2, 3, 0])  #.0,0.5,-1.5])
print r
コード例 #3
0
ファイル: main.py プロジェクト: lucasausbury/CIS678
import sys
import config
import parse
import nn

if len(sys.argv) < 2:
	sys.exit('Usage: %s directory-name' % sys.argv[0])

d = sys.argv[1]
#try:
translate = parse.buildTranslate(d)
data = parse.getData(d, "training", True)

data['translate'] = translate
nn = nn.neuralNetwork( data )
#print nn

#data = parse.getData(d, "test")

#for row in data['inputs']:
#	print nn.predict( row )
#except Exception as error:
#	print error
コード例 #4
0
import numpy as np
import gym, sys, os
from nn import neuralNetwork
import random

env = gym.make("CartPole-v1")
Qmodel = neuralNetwork(env.observation_space.shape[0], 15, env.action_space.n, 0.3)
# Tmodel = neuralNetwork()
state = env.reset()
print(Qmodel.query(state).T[0])

EPISODES = 500
ALPHA = 0.6
GAMMA = 0.99
EPSILON = 0.6

MEMORY = []
BATCH_SIZE = 256

def main():
    for i in range(10):
        state = env.reset()
        done = False
        rew = 0
        while not done:
            env.render()
            next_state, reward, done, _ = env.step(np.argmax(Qmodel.query(state).T[0]))
            state = next_state
            rew += reward
        print(f"Testing Episode {i} has {rew} rewards")
コード例 #5
0
## @package ocr_mnist_digits
#  This module is a demonstration of character recognition based on a neural network
#
#  It uses the MNIST database for the training and testing data. This project
#  is based upon the book by Tariq Rashid (O'Reilly: "Neurale Netzwerke selbst programmieren").

import nn

## CREATE INSTANCE OF NEURAL NETWORK
# input is a 28 by 28 image, 500 hidden nodes.
# Output is digit indicator 0...9. Learning rate is 0.1.
n = nn.neuralNetwork((28 * 28), 500, 10, 0.1)

## TRAIN WITH MNIST DATABASE (60'000 samples)
# Training data: http://www.pjreddie.com/media/files/mnist_train.csv
#
# Training process is repeated 5 times (epoches)
# Training does not have to be done every time, since the resulting weights are saved in a .csv
# file and automatically restored before testing
#
# vvv Uncomment next line to train neural network vvv
#
# nn.train.train(n,"C:/Users/stefa/Desktop/mnist_train.csv",5) # <---- Change this path

## TEST WITH SEPARATE TEST DATA SET (10'000 samples)
# Testing data: http://pjreddie.com/media/files/mnist_test.csv
#
#
nn.test.test(n,
             "C:/Users/stefa/Desktop/mnist_test.csv")  # <---- Change this path
コード例 #6
0
import sys
import config
import parse
import nn

if len(sys.argv) < 2:
    sys.exit('Usage: %s directory-name' % sys.argv[0])

d = sys.argv[1]
#try:
translate = parse.buildTranslate(d)
data = parse.getData(d, "training", True)

data['translate'] = translate
nn = nn.neuralNetwork(data)
#print nn

#data = parse.getData(d, "test")

#for row in data['inputs']:
#	print nn.predict( row )
#except Exception as error:
#	print error
コード例 #7
0
import numpy
from nn import neuralNetwork
import codecs, json

# create the network object
n = neuralNetwork()

# load the previously generated weights

n.loadconfig()

xor_possibilities = [[0, 0, 0], [1, 0, 1], [1, 1, 0], [0, 1, 1]]

# train the network
for i in range(0, 100000):
    index = numpy.random.randint(4)
    record = xor_possibilities[index]
    # scaling the data to be between 0 and 1
    inputs = (numpy.asfarray(record[1:]) / 0.99) + 0.01
    targets = numpy.zeros(n.onodes) + 0.01
    targets[int(record[0])] = 0.99
    n.train(inputs, targets)

# save the updated weights
n.saveconfig("xor")

# predict the output of the 4 xor possiblities
xor_00 = n.query([numpy.asfarray(xor_possibilities[0][1:])])
xor_01 = n.query([numpy.asfarray(xor_possibilities[1][1:])])
xor_10 = n.query([numpy.asfarray(xor_possibilities[2][1:])])
xor_11 = n.query([numpy.asfarray(xor_possibilities[3][1:])])
コード例 #8
0
def run_experiment(
    data_path,
    model_path,
    experiment_title,
    results_dir,
    report_format='latex',
    test_size=0.1,
    reinfer=False,
    input_nodes=50,
    hidden_nodes=30,
    lr=0.005,
    epochs=40,
):
    """
    Run a classification experiment.
    Requires a pre-trained doc2vec model trained on the same data.

    Parameters
    ----------
    data_path: str
        path to a pkl file with the data
    model_path: str
        path to a file with a pre-trained doc2vec model
    experiment_title: str
        unique name for the experiment (to be used for storing results)
    results_dir: str
        path to the directory where results are to be stored
    report_format: {'latex', 'json'}
        format in which the classification report is to be stored
    test_size: float
        percentage of data to be saved for testing (default: 0.1)
    reinfer: bool
        if True, vectors will be re-inferred; if False, the vectors that are already in the model will be used (default: False)
    input_nodes: int
        number of neurons in the input layer; should match the input vector size (default: 50)
    hidden_nodes: int
        number of neurons in the hidden layer (default: 30)
    lr: float
        learning rate of the neural network (default: 0.005)
    epochs: int
        number of iterations over the training data (default: 40)

    Returns:
    -------
    None
    """
    ############## PATHS ##############
    data_path = Path(data_path)
    results_dir = Path(results_dir)

    # model_path = Path(model_path)
    # commented out since was giving trouble in Windows

    ############## PREPARE DATA ##############
    df = pd.read_pickle(data_path)
    print(f"Data loaded: {df.shape[0]} rows.")

    # convert the genre tags to vectors, where
    # each vector has 6 dimensions (6 genres), where
    # all values are 0.01 except for the gold class which is 0.99
    print("Converting tags to arrays...", flush=True, end=' ')
    tags_index = {
        'action': 0,
        'animation': 1,
        'comedy': 2,
        'fantasy': 3,
        'romance': 4,
        'sci-fi': 5,
    }
    df['out_vec'] = df['tag'].apply(
        lambda x: tag2vec(x, convert_dict=tags_index))
    print("done!")

    # split
    print("Splitting to train / test...", flush=True, end=' ')
    train_data, test_data = train_test_split(
        df,
        test_size=test_size,
        random_state=19,
    )
    print("done!")

    ############## PREPARE VECTORS ##############
    # load doc2vec model
    print(f"Loading doc2vec model: {model_path} ...", flush=True, end=' ')
    model = Doc2Vec.load(model_path)
    print("done!")

    # tag docs (lists of tokens) with their index
    print("Tagging documents...", flush=True, end=' ')
    train_tagged = train_data.apply(
        lambda x: TaggedDocument(words=x['tokens'], tags=[x.name]), axis=1)
    test_tagged = test_data.apply(
        lambda x: TaggedDocument(words=x['tokens'], tags=[x.name]), axis=1)
    print("done!")

    # prep input vectors
    print(f"Preparing input vectors (reinfer={reinfer})...",
          flush=True,
          end=' ')
    if reinfer:
        X_train = [
            model.infer_vector(tagged_doc.words) for tagged_doc in train_tagged
        ]
        X_test = [
            model.infer_vector(tagged_doc.words) for tagged_doc in test_tagged
        ]
    else:
        X_train = [
            model.docvecs[tagged_doc.tags[0]] for tagged_doc in train_tagged
        ]
        X_test = [
            model.docvecs[tagged_doc.tags[0]] for tagged_doc in test_tagged
        ]
    print("done!")

    # prep output vectors (gold labels):
    print("Preparing output vectors...", flush=True, end=' ')
    y_train = train_data['out_vec'].to_list()
    y_test = test_data['out_vec'].to_list()
    print("done!")

    ############## TRAIN NEURAL NETWORK ##############
    # create an instance of neural network
    n = neuralNetwork(inputnodes=input_nodes,
                      hiddennodes=hidden_nodes,
                      outputnodes=6,
                      learningrate=lr)

    # train the neural network
    print(f"Training the neural network ({epochs} epochs)...")
    for e in range(epochs):
        print(f"epoch {e}...")
        for idx, record in enumerate(X_train):
            n.train(record, y_train[idx])
    print("done!")

    ############## TEST NEURAL NETWORK ##############
    print("Testing the neural network...", flush=True, end=' ')
    y_pred = list()
    for record in X_test:
        pred_label = n.query(record)
        y_pred.append(pred_label)
    print("done!")

    ############## EVALUATE ##############
    # save classification report to file
    print("Generating classification report...", flush=True, end=' ')
    classif_report(
        y_test,
        y_pred,
        convert_dict=tags_index,
        out_dir=results_dir,
        title=f"report_{experiment_title}",
        format=report_format,
    )

    # save confusion matrix plot to file
    print("Generating confusion matrix plot...", flush=True, end=' ')
    plot_confusion_matrix(
        y_test,
        y_pred,
        convert_dict=tags_index,
        out_dir=results_dir,
        title=f"cm_plot_{experiment_title}",
    )

    return None