Пример #1
0
def test_00(memory, learning_rate=0.001):
    # Configurations
    mark = 'test'
    D = memory

    # Initiate model
    model = NeuralNet(memory, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory]))

    nn.add(Linear(output_dim=2 * D))
    nn.add(Activation('relu'))
    nn.add(Linear(output_dim=2 * D))
    nn.add(Activation('relu'))
    nn.add(Linear(output_dim=2 * D))
    nn.add(Polynomial(order=3))
    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(learning_rate)

    # Return model
    return model
Пример #2
0
def res_00(th, activation='relu'):
    assert isinstance(th, NlsHub)
    model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=Predictor)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add blocks
    nn.add(Input([th.memory_depth]))
    nn.add(
        Linear(output_dim=th.hidden_dim,
               weight_regularizer=th.regularizer,
               strength=th.reg_strength))
    nn.add(Activation(activation))

    def add_res_block():
        net = nn.add(ResidualNet())
        net.add(
            Linear(output_dim=th.hidden_dim,
                   weight_regularizer=th.regularizer,
                   strength=th.reg_strength))
        net.add(Activation(activation))
        net.add_shortcut()

    for _ in range(th.num_blocks):
        add_res_block()
    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(th.learning_rate)

    # Return model
    return model
Пример #3
0
def bres_net_res0(th, activation='relu'):
    assert isinstance(th, NlsHub)
    # Initiate a neural net model
    th.mark = '{}-{}'.format(th.mark, 'res')
    model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=BResNet)
    nn = model.nn
    assert isinstance(nn, BResNet)
    nn.strict_residual = False

    # Add layers
    nn.add(Input([th.memory_depth]))
    nn.add(Linear(output_dim=th.hidden_dim))
    nn.add(Activation(activation))
    branch = nn.add_branch()
    branch.add(Linear(output_dim=1))

    def add_res_block():
        net = nn.add(ResidualNet())
        net.add(Linear(output_dim=th.hidden_dim))
        net.add(Activation(activation))
        net.add_shortcut()
        branch = nn.add_branch()
        branch.add(Linear(output_dim=1))

    for _ in range(th.num_blocks - 1):
        add_res_block()
    nn.add(Linear(output_dim=1))

    # Build
    model.default_build(th.learning_rate)

    return model
Пример #4
0
def mlp_00(mark,
           memory_depth,
           layer_dim,
           layer_num,
           learning_rate,
           activation='relu'):
    # Configurations
    pass

    # Initiate a predictor
    model = NeuralNet(memory_depth, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory_depth]))
    for i in range(layer_num):
        nn.add(Linear(output_dim=layer_dim))
        nn.add(Activation(activation))
    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(learning_rate)

    # Return model
    return model
Пример #5
0
def get_model(n_features=29,
              n_classes=2,
              learning_rate=LEARNING_RATE,
              verbose=VERBOSE,
              batch_size=BATCH_SIZE,
              n_steps=N_STEPS,
              width=WIDTH,
              save_step=100,
              cuda=True,
              preprocessing=None,
              skew=None):
    def data_augment(X, y, W, training=True):
        if training:
            z = np.random.normal(loc=0, scale=WIDTH, size=(X.shape[0]))
            X = skew(X, z)
        if preprocessing is not None:
            X, y, W = preprocessing(X, y, W)
        return X, y, W

    net = Net(n_features, n_classes)
    model = NeuralNet(net,
                      n_classes=n_classes,
                      learning_rate=learning_rate,
                      preprocessing=data_augment,
                      verbose=verbose,
                      batch_size=batch_size,
                      n_steps=n_steps,
                      width=width,
                      save_step=save_step,
                      cuda=cuda)
    model.name = 'DataAugmentNeuralNet'
    return model
Пример #6
0
def res_00(memory,
           blocks,
           order1,
           order2,
           activation='relu',
           learning_rate=0.001):
    # Configurations
    mark = 'res'
    D = memory

    # Initiate model
    model = NeuralNet(memory, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([D]))

    def add_res_block():
        net = nn.add(ResidualNet())
        net.add(Linear(output_dim=D))
        net.add(Activation(activation))
        net.add(Linear(output_dim=D))
        net.add_shortcut()
        net.add(Activation(activation))

    def add_res_block_poly():
        net = nn.add(ResidualNet())
        net.add(Linear(output_dim=D))
        net.add(Polynomial(order=order1))
        net.add(Linear(output_dim=D))
        net.add_shortcut()
        net.add(Polynomial(order=order2))

    if activation == 'poly':
        for _ in range(blocks):
            add_res_block_poly()
    else:
        for _ in range(blocks):
            add_res_block()

    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(learning_rate)

    # Return model
    return model
Пример #7
0
def svn_01(memory_depth, mark, hidden_dim, order1, learning_rate=0.001):

    strength = 0
    # Initiate a predictor
    model = NeuralNet(memory_depth, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory_depth]))
    nn.add(Linear(output_dim=hidden_dim))
    nn.add(Polynomial(order=order1))
    nn.add(
        Linear(output_dim=1,
               weight_regularizer='l2',
               strength=strength,
               use_bias=False))

    # Build model
    nn.build(loss='euclid',
             metric='rms_ratio',
             metric_name='RMS(err)%',
             optimizer=tf.train.AdamOptimizer(learning_rate))

    # Return model
    return model
Пример #8
0
def get_model( n_features=29, n_classes=2, learning_rate=LEARNING_RATE, verbose=VERBOSE, 
              batch_size=BATCH_SIZE, n_steps=N_STEPS, save_step=100, cuda=True,
              preprocessing=None):
    net = Net(n_features, n_classes)
    model = NeuralNet(net, n_classes=n_classes, learning_rate=learning_rate, preprocessing=preprocessing, verbose=verbose,
                      batch_size=batch_size, n_steps=n_steps, save_step=save_step, cuda=cuda)
    return model
Пример #9
0
def mlp_00(learning_rate=0.001, memory_depth=80):
    """
  Performance on WH:
    [0] depth = 80
  """
    # Configuration
    hidden_dims = [2 * memory_depth] * 4
    strength = 0
    activation = 'lrelu'

    mark = 'mlp_D{}_{}_{}'.format(memory_depth, hidden_dims, activation)

    # Initiate a predictor
    model = NeuralNet(memory_depth, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory_depth]))
    lc._add_fc_relu_layers(nn, hidden_dims, activation, strength=strength)
    nn.add(Linear(output_dim=1, weight_regularizer='l2', strength=strength))

    # Build model
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    optimizer = tf.train.AdamOptimizer(learning_rate)
    nn.build(loss='euclid',
             metric='rms_ratio',
             metric_name='RMS(err)%',
             optimizer=optimizer)

    # Return model
    return model
Пример #10
0
def train_model(request_dict: dict = None):
    """
    train model among options specified in project_conf.json.
    :param request_dict: request posted via API
    :return: mae, after saving updated model
    """

    model = None
    if request_dict:
        data = pd.DataFrame(request_dict["bitcoin_last_minute"], index=[0])
    else:
        logging.info("Train mode.")

    model_name = conf_object.project_conf["model"]

    if model_name == 'rfregressor':
        from models.rfregressor import RFregressor
        model = RFregressor()

    if model_name == 'neuralnet':
        from models.neural_net import NeuralNet
        model = NeuralNet(data=data)

    if model_name == 'lstm':
        from models.lstm import LSTM
        model = LSTM(data=data)

    mae = model.eval()

    # save model
    with open(os.path.join(fix_path(), 'models/model.pkl'), 'wb') as f:
        pickle.dump(model, f)

    return mae
Пример #11
0
def pet(memory, hidden_dim, order, learning_rate, mark='pet'):
    # Initiate a predictor
    model = NeuralNet(memory_depth=memory, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory]))
    nn.add(Linear(output_dim=hidden_dim, use_bias=False))
    nn.add(inter_type=pedia.sum)
    for i in range(1, order + 1):
        nn.add_to_last_net(Homogeneous(order=i))

    # Build model
    model.default_build(learning_rate=learning_rate)

    return model
Пример #12
0
def rnn0(th):
    assert isinstance(th, NlsHub)
    # Initiate a neural net model
    nn_class = lambda mark: Predictor(mark=mark, net_type=Recurrent)
    model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=nn_class)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input(sample_shape=[th.memory_depth]))
    for _ in range(th.num_blocks):
        nn.add(BasicRNNCell(state_size=th.hidden_dim))
    nn.add(Linear(output_dim=1))

    # Build
    model.default_build(th.learning_rate)

    return model
Пример #13
0
def get_model(n_features=29,
              n_classes=2,
              learning_rate=LEARNING_RATE,
              verbose=VERBOSE,
              batch_size=BATCH_SIZE,
              n_steps=N_STEPS,
              width=WIDTH,
              save_step=100,
              cuda=True,
              preprocessing=None,
              skew=None):
    def data_augment(X, y, W, training=True):
        if training:
            z_list = [
                np.random.normal(loc=0, scale=WIDTH, size=(X.shape[0]))
                for _ in range(5)
            ]
            X = np.concatenate([
                X,
            ] + [skew(X, z) for z in z_list], axis=0)
            y = np.concatenate([
                y,
            ] + [y for _ in range(5)], axis=0)
            if W is not None:
                W = np.concatenate([
                    W,
                ] + [W for _ in range(5)], axis=0)
        if preprocessing is not None:
            X, y, W = preprocessing(X, y, W)
        return X, y, W

    net = Net(n_features, n_classes)
    model = NeuralNet(net,
                      n_classes=n_classes,
                      learning_rate=learning_rate,
                      preprocessing=data_augment,
                      verbose=verbose,
                      batch_size=batch_size,
                      n_steps=n_steps,
                      width=width,
                      save_step=save_step,
                      cuda=cuda)
    model.name = 'TrueDataAugmentNeuralNet'
    return model
Пример #14
0
def tlp(memory_depth, hidden_dim, mark='tlp'):
    # Hyper-parameters
    learning_rate = 0.001

    # Initiate a predictor
    model = NeuralNet(memory_depth, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory_depth]))
    nn.add(Linear(output_dim=hidden_dim))
    nn.add(Activation('sigmoid'))
    nn.add(Linear(output_dim=1, use_bias=False))

    # Build model
    model.default_build(learning_rate=learning_rate)

    return model
 def get_model(self, num_features, num_targets):
     if self.model_type == 'rf_class':
         model = RfClass(self.model_params)
     elif self.model_type == 'log_reg':
         model = LogReg(self.model_params)
     elif self.model_type == 'nn':
         self.model_params['num_features'] = num_features
         self.model_params['num_targets'] = num_targets
         model = NeuralNet(self.model_params)
     else:
         return None
     return model
Пример #16
0
def svn(memory_depth, order, hidden_dim, mark='svn'):
    # Hyper-parameters
    learning_rate = 0.001

    # Initiate a predictor
    model = NeuralNet(memory_depth, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory_depth]))
    nn.add(Linear(output_dim=hidden_dim))
    nn.add(Polynomial(order=order))
    nn.add(Linear(output_dim=1, use_bias=False))

    # Build model
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    optimizer = tf.train.AdamOptimizer(learning_rate)
    model.default_build(optimizer=optimizer, learning_rate=learning_rate)

    return model
Пример #17
0
def mlp_00(th):
    assert isinstance(th, NlsHub)
    # Initiate a predictor
    model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=Predictor)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([th.memory_depth]))
    for i in range(th.num_blocks):
        nn.add(
            Linear(output_dim=th.hidden_dim,
                   weight_regularizer=th.regularizer,
                   strength=th.reg_strength))
        nn.add(Activation(th.actype1))
    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(th.learning_rate)

    # Return model
    return model
Пример #18
0
def bres_net_wid0(th, activation='relu'):
    assert isinstance(th, NlsHub)
    # Initiate a neural net model
    th.mark = '{}-{}'.format(th.mark, 'wid')
    model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=BResNet)
    nn = model.nn
    assert isinstance(nn, BResNet)

    # Add layers
    nn.add(Input([th.memory_depth]))
    nn._inter_type = pedia.fork
    for _ in range(th.num_blocks):
        branch = nn.add()
        branch.add(Linear(output_dim=th.hidden_dim))
        branch.add(Activation(activation))
        branch.add(Linear(output_dim=1))

    # Build
    model.default_build(th.learning_rate)

    # Return model
    return model
Пример #19
0
def bres_net_dep0(th, activation='relu'):
    assert isinstance(th, NlsHub)
    # Initiate a neural net model
    th.mark = '{}-{}'.format(th.mark, 'dep')
    model = NeuralNet(th.memory_depth, mark=th.mark, nn_class=BResNet)
    nn = model.nn
    assert isinstance(nn, BResNet)

    # Add layers
    nn.add(Input([th.memory_depth]))
    for _ in range(th.num_blocks):
        nn.add(
            Linear(output_dim=th.hidden_dim,
                   weight_regularizer=th.regularizer,
                   strength=th.reg_strength))
        nn.add(Activation(activation))
        branch = nn.add_branch()
        branch.add(Linear(output_dim=1))
    # Build
    model.default_build(th.learning_rate)

    # Return model
    return model
Пример #20
0
def svn_00(memory, learning_rate=0.001):
    # Configuration
    D = memory
    hidden_dims = [2 * D] * 3
    p_order = 2
    mark = 'svn_{}_{}'.format(hidden_dims, p_order)

    # Initiate a predictor
    model = NeuralNet(memory, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([D]))
    for dim in hidden_dims:
        nn.add(Linear(output_dim=dim))
        nn.add(Polynomial(p_order))
    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(learning_rate)

    return model
Пример #21
0
def net_00(memory_depth, learning_rate=0.001):
    # Configuration
    hidden_dim = 10
    homo_order = 4
    mark = 'net_h{}_homo{}'.format(hidden_dim, homo_order)

    # Initiate a predictor
    model = NeuralNet(memory_depth, mark=mark)
    nn = model.nn
    assert isinstance(nn, Predictor)

    # Add layers
    nn.add(Input([memory_depth]))
    nn.add(Linear(output_dim=hidden_dim))
    nn.add(inter_type=pedia.sum)
    for i in range(1, homo_order + 1):
        nn.add_to_last_net(Homogeneous(i))

    # Build model
    model.default_build(learning_rate)

    # Return model
    return model
Пример #22
0
def mlp02(mark,
          memory_depth,
          layer_num,
          hidden_dim,
          learning_rate,
          activation,
          identity_init=True):
    # Initiate a neural net
    if identity_init:
        model = NeuralNet(memory_depth,
                          mark=mark,
                          bamboo=True,
                          identity_initial=True)
    else:
        model = NeuralNet(memory_depth, mark=mark, bamboo=True)
    nn = model.nn
    assert isinstance(nn, Bamboo)

    # Add layers
    nn.add(Input([memory_depth]))

    for _ in range(layer_num):
        nn.add(Linear(output_dim=hidden_dim))
        nn.add(Activation(activation))
        branch = nn.add_branch()
        branch.add(Linear(output_dim=1))

    nn.add(Linear(output_dim=hidden_dim))
    nn.add(Activation(activation))
    nn.add(Linear(output_dim=1))

    # Build model
    model.default_build(learning_rate)

    # Return model
    return model
Пример #23
0
import os
import pandas as pd
import numpy as np
from pathlib import Path
from utils.utils import *
import time
from pathlib import Path
from models.neural_net import NeuralNet

test_number = 1
save_path = "./tmp/" + str(test_number)
Config = load_config("config.yaml", save_path)

X, Y = load_dataset(year=1, shuffle=True)

imputer = Imputer(strategy='mean')
normalizer = Normalizer(strategy="l2", norm_axis=1)
processor = Processor(X, Y, 15, 10, "regularize", 64, True)

X = imputer.fit_transform(abstracts=X)
X = normalizer.transform(abstracts=X)

# x_train, y_train, x_dev, y_dev, x_test, y_test = processor.split_data()
# print(x_train.shape, y_train.shape, x_dev.shape, y_dev.shape, x_test.shape, y_test.shape)

neural_net = NeuralNet(**Config["HyperParameters"], **Config["Processor"],
                       **Config["Progress"])

neural_net.initialize_params(X, Y, 2)
neural_net.train()
Пример #24
0
    print(model_tag)


    if model_tag == 'rfc':
        print("creating model")
        model = RandomForestModel(dataset, labels)
        model.train()
        accuracy = model.test()

    elif model_tag == 'dtc':
        model = DecisionTreeModel(dataset, labels)
        model.train()
        accuracy = model.test()

    elif model_tag == 'nn':
        model = NeuralNet(dataset, labels)
        accuracy = model.test()

    elif model_tag == 'cnn':
        model = ConvNet(conv_dataset, labels)
        accuracy = model.test()

    elif model_tag == 'xgb':
        model = XGB(dataset, labels)
        model.train()
        accuracy = model.test()
        model.crossval()
        # model.gridSearch()


Пример #25
0
from computer import Computer
from flask import Flask, render_template, request, Response
import time
import keras
from tensorflow.python.keras.backend import set_session
from models.neural_net import NeuralNet

import tensorflow as tf

sess = keras.backend.get_session()
graph = tf.get_default_graph()
set_session(sess)

ml_model_file ='test_10000_bsize64_epochs50'
ml_model = keras.models.load_model(f'models/trained_models/{ml_model_file}')
model = NeuralNet()
model.model = ml_model

app = Flask(__name__)

@app.route("/", methods=['GET', 'POST'])
def create_chess():
    print(game.board.state.legal_moves)
    return '''
        <img src="/board">
        <p><form action="/move"><input type=text name=move><input type=submit value="Make move"></form></p>
        <p><form action="/reset"><input type=submit value="Reset"></form></p>
        <p><form action="/undo"><input type=submit value="Undo Move"></form></p>
        <p> Latest computer move: {} </p>
    '''.format(game.board.state.peek().uci() if game.board.state.move_stack else "Null")