Пример #1
0
    def train(self):
        # Tell the neural network that all integers in (1..input) should map
        # to output.
        dataset = []
        n_inputs = 0
        for i in range(1, self.input + 1):
            example = self.make_input(i)
            n_inputs = len(example)
            dataset.append(Instance(example, self.output))

        settings = {
            "initial_bias_value": self.bias,
            "n_inputs": n_inputs,

            # The neural network in the challenge has two layers.
            "layers": [(3, sigmoid_function),
                       (len(self.output), sigmoid_function)]
        }

        network = NeuralNet(settings)
        training_set = dataset
        test_set = dataset
        cost_function = binary_cross_entropy_cost
        backpropagation(
            network,  # the network to train
            training_set,  # specify the training set
            test_set,  # specify the test set
            cost_function,  # specify the cost function to calculate error
            max_iterations=20000)

        self.network = network
        self.weights = self.network.weights
Пример #2
0
def initialize_network():
    # Train
    data, target = get_train_data(dataset_location)

    if data.dtype != float:
        data = StandardScaler().fit_transform(data)
        print 'Dataset scaled'

    train = []
    for i in range(len(data)):
        temp = np.array(data[i], dtype='float64')
        train.append(Instance(temp, [float(target[i])]))

    # Evaluation
    data, target = get_validation_data(dataset_location)

    if data.dtype != float:
        data = StandardScaler().fit_transform(data, target)
        print 'Dataset scaled'

    evaluation = []
    for i in range(len(data)):
        temp = np.array(data[i], dtype='float64')
        evaluation.append(Instance(temp, [float(target[i])]))

    settings = {
        "n_inputs":
        len(data[0]),
        "layers":
        [(80, tanh_function), (70, ReLU_function), (60, tanh_function),
         (50, ReLU_function), (40, tanh_function), (30, ReLU_function),
         (20, tanh_function), (10, tanh_function), (1, ReLU_function)]
    }

    temp_network = NeuralNet(settings)
    training_set = train
    test_set = evaluation
    cost_function = cross_entropy_cost

    scaled_conjugate_gradient(
        temp_network,  # the network to train
        training_set,  # specify the training set
        test_set,  # specify the test set
        cost_function,  # specify the cost function to calculate error
        print_rate=1,
        save_trained_network=True)

    return temp_network
Пример #3
0
def main():
    training_dataset = produce_dataset(
        os.path.join(os.path.dirname(os.path.realpath(__file__)),
                     'training_data', 'pickle', 'training_playlists',
                     'training_4.txt'))
    test_dataset = produce_dataset(
        os.path.join(os.path.dirname(os.path.realpath(__file__)),
                     'training_data', 'pickle', 'test_playlists',
                     'test_4.txt'))

    settings = {
        # Required settings
        "n_inputs":
        8,  # Number of input signals
        "layers": [
            (8, sigmoid_function
             ),  # First Hidden Layer (number of nodes, activation function)
            (1, sigmoid_function)  # Output layer
        ],

        # Optional settings
        "initial_bias_value":
        0.0,
        "weights_low":
        -0.1,
        "weights_high":
        0.1
    }

    network = NeuralNet.load_network_from_file("%s.pkl" % "training54point1")
    training_set = training_dataset
    test_set = test_dataset
    cost_function = cross_entropy_cost

    RMSprop(network,
            training_set,
            test_set,
            cost_function,
            ERROR_LIMIT=0.1,
            max_iterations=100000,
            batch_size=400)
    network.save_network_to_file("%s.pkl" % "training2")
Пример #4
0
g = open('test.csv', 'rt')
try:
    reader = csv.reader(g)
    for row in reader:
        X = (map(int, row[:625]))
        Y = map(int, row[625:])
        test_set.append(Instance(X, Y))
finally:
    g.close()

settings = {
    "n_inputs": col_len,
    "layers": [(666, softmax_function), (row_len, softmax_function)],
}

network = NeuralNet(settings)
cost_function = softmax_neg_loss

RMSprop2(network,
         training_set,
         test_set,
         cost_function,
         learning_rate=0.01,
         print_rate=10,
         ERROR_LIMIT=0.2)

#print_test( network, training_set, cost_function )
"""
Prediction Example
"""
prediction_set = test_set
Пример #5
0
    "layers": [(int(inputLayerSize * 0.4), sigmoid_function),
               (outputLayerSize, sigmoid_function)],
    # [ (number_of_neurons, activation_function) ]
    # The last pair in the list dictate the number of output signals

    # Optional settings
    "initial_bias_value":
    0.0,
    "weights_low":
    -0.1,  # Lower bound on the initial weight value
    "weights_high":
    0.1,  # Upper bound on the initial weight value
}

# initialize the neural network
network = NeuralNet(settings)
network.check_gradient(training_data, cost_function)
network.save_network_to_file("res/network_1.pckl")
## load a stored network configuration
# network           = NeuralNet.load_network_from_file( "res/network_1.pckl")

# Train the network using backpropagation
RMSprop(
    network,  # the network to train
    training_data,  # specify the training set
    test_data,  # specify the test set
    cost_function,  # specify the cost function to calculate error
    ERROR_LIMIT=0.1,  # define an acceptable error limit
    # max_iterations         = 100,      # continues until the error limit is reach if this argument is skipped
    batch_size=
    0,  # 1 := no batch learning, 0 := entire trainingset as a batch, anything else := batch size
Пример #6
0
f = ord('f') / 256.0
l = ord('l') / 256.0
a = ord('a') / 256.0
g = ord('g') / 256.0

dataset = [
    Instance(ins, [f, l, a, g])
]

settings = {
    "n_inputs" : 3,
    "layers" : [(4, sigmoid_function)] * 1
}

network        = NeuralNet(settings)
training_set   = dataset
test_set       = dataset
cost_function  = sum_squared_error

scipyoptimize(
    network,           # the network to train
    training_set,      # specify the training set
    test_set,          # specify the test set
    cost_function,     # specify the cost function to calculate error
)

layer = network.weights[0]

conn = remote('chal1.swampctf.com', 1900)
conn.sendline('1')
Пример #7
0
        trainingset, training_classes = wfdb.get_training_set()
        testset, test_classes = wfdb.get_organized(100)
        testset = np.transpose(testset)

        # create the network
        settings = {
            # Required settings
            "n_inputs": data_reduction,                                              # Number of network input signals
            "layers": [(hidden_nodes, sigmoid_function),
                       (4, sigmoid_function)],   # [ (number_of_neurons, activation_function) ]
            # Optional settings
            "initial_bias_value": 0.0,
            "weights_low": -0.1,                                        # Lower bound on the initial weight value
            "weights_high": 0.1,                                        # Upper bound on the initial weight value
        }
        network = NeuralNet(settings)
        expected_output = [
            [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]
        ]
        # Training the net
        test_set = [Instance(testset[i, :], expected_output[test_classes[i]]) for i in range(0, len(testset))]
        training_set = [Instance(trainingset[i, :], expected_output[training_classes[i]]) for i in range(0, len(trainingset))]
        cost_function = sum_squared_error
        print 'Starting to train...'
        backpropagation(
            # Required parameters
            network,                     # the neural network instance to train
            training_set,                # the training dataset
            test_set,                    # the test dataset
            cost_function,               # the cost function to optimize
            # Optional parameters
Пример #8
0
    test_data           = dataset

    layers = [ (hiddenNodes, sigmoid_function) for i in range(settings.numLayers) ]
    layers.append((outputNodes, sigmoid_function))
    print("Layers: {}".format(layers))
    mysettings            = {
        "n_inputs"              : inputNodes,       # Number of network input signals
        "layers"                : layers,
        "initial_bias_value"    : 0.01,
        "weights_low"           : -0.3,     # Lower bound on the initial weight value
        "weights_high"          : 0.3,
    }


    # initialize the neural network
    network             = NeuralNet( mysettings )

    network.check_gradient( training_data, cost_function )

    # Train the network using backpropagation
    learningAlgorithm(
            network,                            # the network to train
            training_data,                      # specify the training set
            test_data,                          # specify the test set
            cost_function,                      # specify the cost function to calculate error

            ERROR_LIMIT             = errorCriterion,     # define an acceptable error limit
            max_iterations         = maxIterations,      # continues until the error limit is reach if this argument is skipped

            batch_size              = batch_size,        # 1 := no batch learning, 0 := entire trainingset as a batch, anything else := batch size
            print_rate              = printRate,     # print error status every `print_rate` epoch.
cost_function = cross_entropy_cost

settings = {
    # Required settings
    "n_inputs": 14,  # Number of network input signals
    "layers": [(5, sigmoid_function), (4, sigmoid_function)],
    # [ (number_of_neurons, activation_function) ]
    # The last pair in the list dictate the number of output signals

    # Optional settings
    "weights_low": -0.1,  # Lower bound on the initial weight value
    "weights_high": 0.4,  # Upper bound on the initial weight value
}

# initialize the neural network
network = NeuralNet(settings)
network.check_gradient(training_data, cost_function)

## load a stored network configuration
network = NeuralNet.load_network_from_file("redsk_00009.pkl")

ctx = Context()
ctx.init()

# Create the user generator
user = UserGenerator()
user.create(ctx)

#Obtener imagen
depth = DepthGenerator()
depth.create(ctx)
# Training the net
test_set = [Instance(testset[i, :], expected_output[test_classes[i]]) for i in range(0, len(testset))]
training_set = [Instance(trainingset[i, :], expected_output[training_classes[i]]) for i in range(0, len(trainingset))]

# create the network
settings = {
    # Required settings
    "n_inputs": data_reduction,                                              # Number of network input signals
    "layers": [(hidden_nodes, sigmoid_function),
               (4, sigmoid_function)],   # [ (number_of_neurons, activation_function) ]
    # Optional settings
    "initial_bias_value": 0.0,
    "weights_low": -0.1,                                        # Lower bound on the initial weight value
    "weights_high": 0.1,                                        # Upper bound on the initial weight value
}
network = NeuralNet(settings)

cost_function = sum_squared_error
print 'Starting to train...'
backpropagation(
    # Required parameters
    network,                     # the neural network instance to train
    training_set,                # the training dataset
    test_set,                    # the test dataset
    cost_function,               # the cost function to optimize
    # Optional parameters
    ERROR_LIMIT=1e-3,                           # Error tolerance when terminating the learning
    max_iterations=(),                          # Regardless of the achieved error, terminate after max_iterations epochs. Default: infinite
    batch_size=0,                               # Set the batch size. 0 implies using the entire training_set as a batch, 1 equals no batch learning, and any other number dictate the batch size
    input_layer_dropout=input_layer_dropout,    # Dropout fraction of the input layer
    hidden_layer_dropout=hidden_layer_dropout,   # Dropout fraction of in the hidden layer(s)
Пример #11
0
cost_function       = cross_entropy_cost
settings            = {
    # Required settings
    "n_inputs"              : 2,       # Number of network input signals
    "layers"                : [  (3, sigmoid_function), (1, sigmoid_function) ],
                                        # [ (number_of_neurons, activation_function) ]
                                        # The last pair in the list dictate the number of output signals
    
    # Optional settings
    "weights_low"           : -0.1,     # Lower bound on the initial weight value
    "weights_high"          : 0.1,      # Upper bound on the initial weight value
}


# initialize the neural network
network             = NeuralNet( settings )
network.check_gradient( training_data, cost_function )



## load a stored network configuration
# network           = NeuralNet.load_network_from_file( "network0.pkl" )


# Train the network using backpropagation
backpropagation(
        network,                        # the network to train
        training_data,                  # specify the training set
        test_data,                      # specify the test set
        cost_function,                  # specify the cost function to calculate error
        
Пример #12
0
def load_network():
    return NeuralNet.load_network_from_file(network_file)
import sys
from Playlist import Playlist
from nimblenet.data_structures import Instance
from nimblenet.neuralnet import NeuralNet
from playlist_recommender_utilities import *

username = sys.argv[1]
playlist = sys.argv[2]

base_playlist = Playlist.Playlist(username, playlist)
base_playlist.get_playlist()
base_playlist.generate_playlist_vector()
base_playlist.generate_normalized_aggregate_vector()

# Funtion to return the 100 closet songs to playlist normalized_aggregate_vectors
hundred_song_set = gather_hundred_songs(base_playlist)

prediction_set = create_prediction_data(hundred_song_set)
network = NeuralNet.load_network_from_file("%s.pkl" % "training54point1")
recommended_values = network.predict(prediction_set)
returned_playlist = generate_top_twentyfive(recommended_values,
                                            hundred_song_set)

for song in returned_playlist:
    print song.attributes['title']
Пример #14
0
from nimblenet.activation_functions import sigmoid_function
from nimblenet.cost_functions import cross_entropy_cost
from nimblenet.learning_algorithms import RMSprop
from nimblenet.data_structures import Instance
from nimblenet.neuralnet import NeuralNet

dataset        = [
	Instance( [0,0], [0] ), Instance( [1,0], [1] ), Instance( [0,1], [1] ), Instance( [1,1], [0] )
]

settings       = {
	"n_inputs" : 2,
	"layers"   : [  (2, sigmoid_function), (1, sigmoid_function) ]
}

network        = NeuralNet( settings )
# make sure you use 80% of dataset as training data
# and the rest of dataset as testing data
# This is just a simple example.
training_set   = dataset
test_set       = dataset
cost_function  = cross_entropy_cost

# training neural nets
RMSprop(
	network,           # the network to train
	training_set,      # specify the training set
	test_set,          # specify the test set
	cost_function,     # specify the cost function to calculate error

	max_iterations = 5000, # specify the number of epoches
Пример #15
0
cost_function = cross_entropy_cost
settings = {
    # Required settings
    "n_inputs": 2,  # Number of network input signals
    "layers": [(3, sigmoid_function), (1, sigmoid_function)],
    # [ (number_of_neurons, activation_function) ]
    # The last pair in the list dictate the number of output signals

    # Optional settings
    "initial_bias_value": 0.0,
    "weights_low": -0.1,  # Lower bound on the initial weight value
    "weights_high": 0.1,  # Upper bound on the initial weight value
}

# initialize the neural network
network = NeuralNet(settings)
network.check_gradient(training_data, cost_function)

## load a stored network configuration
# network           = NeuralNet.load_network_from_file( "network0.pkl" )

# Train the network using backpropagation
RMSprop(
    network,  # the network to train
    training_data,  # specify the training set
    test_data,  # specify the test set
    cost_function,  # specify the cost function to calculate error
    ERROR_LIMIT=1e-2,  # define an acceptable error limit 
    #max_iterations         = 100,      # continues until the error limit is reach if this argument is skipped
    batch_size=
    0,  # 1 := no batch learning, 0 := entire trainingset as a batch, anything else := batch size
Пример #16
0
from nimblenet.data_structures import Instance
from nimblenet.tools import print_test
import random
import smbus
import math
import time
import os
import RPi.GPIO as GPIO
import requests
import json
import datetime
import numpy as np
##-------------------BELLE--------------------

## Loads Belle's beautiful brain
network = NeuralNet.load_network_from_file("Belle_5.pkl")

# Print a network test
#print_test( network, training_data, cost_function )

##------------------LISA--------------------
# Power management registers

power_mgmt_1 = 0x6b
power_mgmt_2 = 0x6c

gyro_scale = 131.0
accel_scale = 16384.0

address = 0x68  # This is the address value read via the i2cdetect command
Пример #17
0
    r = ui(0)
    for i in arange(64, dtype=ui):
        a = ui(0)
        for j in reversed(arange(n + 1, dtype=ui)):
            b = ui(0)
            for k in arange(i + 1, dtype=ui):
                c = a ^ (((i & n & ~j) | (i & ~n & j) & ONE) << k)
                a ^= (j & (ONE << k)) ^ b
                b = (((c & j) | (c & b) | (j & b)) & (ONE << k)) << ONE
        r |= (a & (ONE << i))
    return r


sample_x = []
sample_y = [ui(randint(1, (2**5) - 1)) for _ in range(SAMPLE_SIZE)]
# generate sample x
for instance in sample_y:
    sample_x.append(
        list(map(int, "{0:064b}".format(carvedToWritten(instance)))))
# adapt sample y
sample_y = [list(map(int, "{0:064b}".format(y))) for y in sample_y]
# generate dataset
dataset = [Instance(x, y) for x, y in zip(sample_x, sample_y)]
# config
settings = {
    "n_inputs": 64,
    "layers": [(2, sigmoid_function), (1, sigmoid_function)]
}

RMSprop(NeuralNet(settings), dataset, dataset, cross_entropy_cost)