Beispiel #1
0
 def __init__(self, load_from_filename=None, batch_size=16):
     self.batch_size = batch_size
     wordvec_dim = word_vector.get_dimensionality()
     self.neural_network = neural_network.build_model(
         wordvec_dim, INPUT_WORD_COUNT)
     if load_from_filename:
         self.neural_network.load_weights(load_from_filename)
def train_action(window_size, hidden_layers):
    print("Fetching data...", file=sys.stderr)
    data = data_manager.fetch_protein_data("../data/training_data")
    print("Done! Preparing for training...", file=sys.stderr)
    input_data, signal_data = data_manager.prepare_dataset(data, window_size, loading_screen)
    print("Done! Train and test splitting...", file=sys.stderr)
    train_input, test_input = data_manager.train_test_split(input_data, signal_data)
    print("Done", file=sys.stderr)

    print("Building signal model", file=sys.stderr)

    # Input layer
    architecture = [input_data[0].shape[0]]
    # Hidden layer
    architecture.extend(hidden_layers)
    # Output layer
    architecture.append(2)

    model = nn.build_model(architecture)

    directory = MODEL_PATH + get_directory_name()
    try:
        os.makedirs(directory)
    except:
        print("There was an error while creating model's sub-directory.", file=sys.stderr)
        exit(1)

    start_time = time.time()
    history = nn.train_model(model, train_input[0], train_input[1],
                             filename=directory + MODEL_NAME)
    exec_time = time.time() - start_time

    test_eval = model.evaluate(test_input[0], test_input[1])

    results, precision = test_sequences(data, model, window_size, eval=True)
    save_history(history, directory + HISTORY_NAME)
    save_log(window_size, architecture, test_eval, precision, exec_time, directory + LOG_NAME)
    print("Done.", file=sys.stderr)
Beispiel #3
0
import tensorflow as tf

from neural_network import build_model
from trash.autoencoder_preproc import build_autoencoder, load_autoencoder, save_autoencoder

m1 = build_model(1, 3)
m1.save("m1")
build_autoencoder()

save_autoencoder()

tf.reset_default_graph()

m1 = build_model(1, 3)
m2 = build_autoencoder()
m1.load("m1")
load_autoencoder()
Beispiel #4
0
    y_min, y_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    h = 0.01

    # Generate a grid of points with a distance h between them
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))

    # Predict the function value for the whole grid
    Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)

    # Plot the contour and training examples
    plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
    plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)


np.random.seed(0)
X, y = make_moons(200, noise=0.20)
plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)

plt.figure(figsize=(16, 32))
hidden_layer_dimensions = [1, 2, 3, 4]
for i, nn_hdim in enumerate(hidden_layer_dimensions):
    plt.subplot(5, 2, i + 1)
    plt.title("HiddenLayerSize % d" % nn_hdim)
    model = build_model(X, y, nn_hdim, print_loss=False)
    plot_decision_boundary(lambda x: predict(model, x), X, y)
    poop = np.array([1.6, -0.4])
    print(predict(model, poop))
plt.show()
Beispiel #5
0
def mouse_down(x, y):
    win32api.SetCursorPos((x, y))
    win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)


def mouse_up(x, y):
    win32api.SetCursorPos((x, y))
    win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)


LR = 1e-3
MODEL_NAME = 'models/wrm13-encoded-1e-04-15-ep-407k-data.model'

tf.reset_default_graph()

model2 = build_model(LR, 4)
model2.load(MODEL_NAME)
build_autoencoder()
load_autoencoder()

print("Start/Pause action with Alt+T")
paused = True

while True:
    img = grab_screen()

    scr_W, scr_H = len(img[0]), len(img)
    img = preproc_img(img)

    keys = key_check()
    if "ALT" in keys and "T" in keys:
Beispiel #6
0
    y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
    h = 0.01
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))
    Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
    plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)


#X = np.array([[-2, 1], [1, 1], [1.5, -0.5], [-2, -1], [-1, -1.5], [2, -2]])
#Y = np.array([[0, 1], [0, 1], [0, 1], [1, 0], [1, 0], [1, 0]])
#b = nn.build_model(X, Y, 4, print_loss=True)

np.random.seed(0)
X, y = make_moons(200, noise=0.20)
#nn.build_model(X, y, 4, 200000,print_loss=True)
plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)

plt.figure(figsize=(16, 32))
hidden_layer_dimensions = [1, 2, 3, 4]
for i, nn_hdim in enumerate(hidden_layer_dimensions):
    plt.subplot(5, 2, i + 1)
    plt.title('HiddenLayerSize%d' % nn_hdim)
    model = nn.build_model(X, y, nn_hdim, 200000, print_loss=True)
    #nn.plot_decision_boundary(lambda x: nn.predict(model, x), X, y)
    plot_decision_boundary(
        lambda X: np.array([nn.predict(model, x) for x in X]), X, y)

plt.savefig('foo.png')
Beispiel #7
0
# Authors: Ruilin Lin (Ryan), Jared Knutson
# Course:  CS491 (Topics) - Machine Learning
# Prof:    Dr. Emily Hand
#

import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
import neural_network as nn
import random

#This is the dataset used to test our network
np.random.seed(0)
X, y = make_moons(200, noise=0.20)

nn.build_model(X, y, 1)
nn.build_model(X, y, 2)
nn.build_model(X, y, 3)

plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)

# # Generate outputs, Use this code
plt.figure(figsize=(16, 32))
hidden_layer_dimensions = [1, 2, 3, 4]
for i, nn_hdim in enumerate(hidden_layer_dimensions):
    plt.subplot(5, 2, i + 1)
    plt.title('HiddenLayerSize%d' % nn_hdim)
    model = nn.build_model(X, y, nn_hdim)
    nn.plot_decision_boundary(
        lambda P: np.array([nn.predict(model, x) for x in X]), X, y)
plt.show()
def mouse_down(x, y):
    win32api.SetCursorPos((x, y))
    win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)


def mouse_up(x, y):
    win32api.SetCursorPos((x, y))
    win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)


LR = 1e-3
MODEL_NAME = 'models/wrm13-encoded-1e-04-15-ep-407k-data.model'

tf.reset_default_graph()

model2 = build_model(1, 3)
model2.load("m1")
build_autoencoder()
load_autoencoder()

print("Start/Pause action with Alt+T")
paused = True

while True:
    img = grab_screen()

    scr_W, scr_H = len(img[0]), len(img)
    img = preproc_img(img)

    keys = key_check()
    if "ALT" in keys and "T" in keys:
Beispiel #9
0

def plot_decision_boundary(pred_func, X, y):
    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
    h = .01
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))
    Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
    plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)


X, y = make_moons(200, noise=0.20)

plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)
plt.show()

plt.figure()
hidden_layer_dimensions = [1, 2, 4, 5]
# hidden_layer_dimensions = [10, 20, 50, 100]

for i, nn_hdim in enumerate(hidden_layer_dimensions):
    plt.subplot(2, 2, i + 1, aspect='equal')
    plt.title('Hidden Layer Size : ' + str(nn_hdim))
    model = nn.build_model(X, y, nn_hdim, 30000, True)
    plot_decision_boundary(lambda x: nn.predict(model, x), X, y)

plt.show()
import os
import numpy as np
from neural_network import build_model
from screen_consts import WIDTH, HEIGHT
from CV_helpfile import get_rotated_samples
import random

LR = 1e-4
EPOCHS = 15
batch_size = 4
MODEL_NAME = 'wrm13-encoded-{}-{}-ep-407k-data.model'.format('%.e'%LR,EPOCHS)
save_path = "models/"

folder = 'preprocessed_encoded_notshuffled/'

model = build_model(LR, batch_size)
#model.load(save_path + MODEL_NAME)

for i in range(EPOCHS):
	listdir = os.listdir(folder)
	random.shuffle(listdir)
	for file_num, train_data_file in enumerate(listdir):
		train_data = np.load(folder + train_data_file)

		batches = [[ train_data[i2 + j2 - (batch_size-1)][0] for j2 in range(batch_size)]
												for i2 in range(len(train_data))]


		batches = np.array(batches).reshape(-1, batch_size, 2080)
		actions = np.array(train_data[:, 1].tolist()).reshape(-1, 3)