示例#1
0
    def create_model(self):
        model = nn.Model()

        model.add(nn.Layer_Dense(6, 128))
        model.add(nn.Activation_ReLU())

        model.add(nn.Layer_Dense(128, 128))
        model.add(nn.Activation_ReLU())
        model.add(nn.Layer_Dense(128, 128))
        model.add(nn.Activation_ReLU())

        model.add(nn.Layer_Dense(128, env.ACTION_SPACE_SIZE))
        model.add(nn.Activation_Linear())

        model.set(loss=nn.Loss_MeanSquaredError(),
                  optimizer=nn.Optimizer_Adam(learning_rate=0.0005),
                  accuracy=nn.Accuracy_Regression())
        model.finalize()
        return model
示例#2
0
def main():
    path = get_path(file)
    male, female = loader.read_bmi(path)

    dataset = male
    plot(dataset, title='Data', show=True)
    n_in = len(dataset.inputs[0])
    n_out = len(dataset.outputs[0])

    train_data, test_data = split(dataset, test_size=0.4)

    model = nn.Model(nn.Linear(n_in, 32, nn.sigmoid),
                     nn.Linear(32, 8, nn.sigmoid),
                     nn.Linear(8, n_out, nn.sigmoid))

    bmi = nn.Classifier(model)
    bmi.train(train_data, test_data, target_acc=0.92)

    res = result(bmi, dataset)

    plot(dataset, categories, n_cols=2, title='Data')
    plot(res, categories, title='Prediction', show=True, num=2, n_cols=2)
示例#3
0
import gym
import math
import matplotlib.pyplot as plt
import numpy as np

import nn


agent = nn.Model()

env = gym.make('CartPole-v0')
state = env.reset()
done = False

scores = []


for i in range(200):
    score = 0
    for j in range(100):
        env.render()
        
        action = agent.predict(state)
        state_, r, done, info = env.step(action)

        if score > 10:
            agent.remember(state, r, action, done, state_)
        agent.train()
        
        score += 1
        state = state_
示例#4
0
import matplotlib.pyplot as plt 
import numpy as np 
import gym 

import multiprocessing as mp 
import sys 

env = gym.make('CarRacing-v0') 

# plt.ion() 

# define network architecture 
x = i = nn.Input((2*96//8*96//8*3//3,)) 
x = nn.Dense(20)(x) 
x = nn.Dense(3)(x) 
net = nn.Model(i, x) 
del x, i 

# vectorized weights and original shape information 
outw, outs = nn.get_vectorized_weights(net) 

# run car racing problem 
def fitness_car_race(w, render: bool=False, steps=1000): 
    score = 0

    nn.set_vectorized_weights(net, w, outs) 

    n = 2

    for _ in range(n): 
        # env._max_episode_steps = steps
示例#5
0
    def ask(self):
        cppns = self.neat.ask()
        self.gen = self.neat.gen
        return [self.create_network(cppn) for cppn in cppns]

    def tell(self, scores: list):
        self.neat.tell(scores)
        self.gen = self.neat.gen


if __name__ == "__main__":
    i = x = nn.Input((2, ))
    x = nn.Dense(2, activation='sigmoid')(x)
    x = nn.Dense(1, activation='sigmoid')(x)
    x = nn.Model(i, x)
    m_cfg = x.get_config()
    del i, x

    pop = None
    fit = None

    attempts = 100
    success = 0
    gens = 0

    for i in range(attempts):
        hn = HyperNeat(
            m_cfg, {
                'n_pop': 500,
                'max_species': 100,
示例#6
0
    predictions = model.predict(X)
    return getPredFrameAndScore(Y, predictions)


train, label, test = load_data(data_dir)

X = scale_data(train)
y = onehot_labels(label)

Xtrain = X[:59000]
Ytrain = y[:59000]

Xtest = X[59000:60000]  #unseen
Ytest = np.array(label)[59000:60000]  #unseen

layers_dim = [128, 64, 32, 16, 8, 8, 10]

model = nn.Model(layers_dim)
model.train(Xtrain,
            Ytrain,
            num_passes=3001,
            epsilon=0.0002,
            reg_lambda=0.01,
            print_loss=True)

testUnseenFrame, testUnseenAcc = generatePredictionScore(model, Xtest, Ytest)

#Review it's working by testing a subset of the train data
testSeenFrame, testSeenAcc = generatePredictionScore(model, Xtrain[:5000],
                                                     np.array(label)[:5000])
import numpy as np
import nn
from DataPreProcessing import DataPreProcessing as Data

model = nn.Model(nn.Layer(size=(4, 5), activation='Relu'),
                 nn.Layer(size=(5, 3), activation='Relu'),
                 nn.Layer(size=(3, 10), activation='sigmoid'),
                 nn.Layer(size=(10, 6), activation='ReLU'),
                 nn.Layer(size=(6, 1), activation='ReLU'))

# import and preprocess data
x, label = Data.get_data("data_banknote_authentication.csv")
x = Data.normalize(x)
X_train, X_test, label_train, label_test = Data.split_data(x, label)

# Train the model
model.fit(X_train,
          label_train,
          'SGD',
          'MSE',
          alpha=0.0001,
          epoch=15,
          graph_on=True)

# evaluate the model
[accuracy, f1_score, confusion_matrix
 ] = model.evaluate(X_test,
                    label_test,
                    metric=['accuracy', 'f1 score', 'confusion matrix'])
print(f"accuracy: {accuracy}")
print(f"f1_score: {f1_score}")
示例#8
0
epoch_size = 1000

print("Reading data")

# random sample input data
X = ds.read_train_images(epoch_size)

y = ds.read_train_labels(epoch_size)

X_test_images = ds.read_validation_images()
y_test_labels = ds.read_validation_labels()

print("Processing data")
p = []
for col in range(epoch_size):
    entry = (X[:, col].reshape(28 * 28, 1), y[:, col].reshape(10, 1))
    p.append(entry)

validation_data = []
for col in range(10000):
    entry = (X_test_images[:,
                           col].reshape(28 * 28,
                                        1), y_test_labels[:,
                                                          col].reshape(10, 1))
    validation_data.append(entry)

model = nn.Model([30], 28 * 28, 10)

print("Fitting model")
model.fit(p, 3, 5, 10, validation_data)