Esempio n. 1
0
def train_xor_network():
    # two training sets
    training_one = [
        Instance([0, 0], [0]),
        Instance([0, 1], [1]),
        Instance([1, 0], [1]),
        Instance([1, 1], [0])
    ]
    training_two = [
        Instance([0, 0], [0, 0]),
        Instance([0, 1], [1, 1]),
        Instance([1, 0], [1, 1]),
        Instance([1, 1], [0, 0])
    ]

    settings = {
        # Required settings
        "n_inputs": 2,  # Number of network input signals
        "n_outputs": 1,  # Number of desired outputs from the network
        "n_hidden_layers": 1,  # Number of nodes in each hidden layer
        "n_hiddens": 2,  # Number of hidden layers in the network
        "activation_functions": [
            tanh_function, sigmoid_function
        ],  # specify activation functions per layer eg: [ hidden_layer, output_layer ]

        # Optional settings
        "weights_low": -0.1,  # Lower bound on initial weight range
        "weights_high": 0.1,  # Upper bound on initial weight range
        "save_trained_network":
        False,  # Whether to write the trained weights to disk
        "input_layer_dropout": 0.0,  # dropout fraction of the input layer
        "hidden_layer_dropout": 0.1,  # dropout fraction in all hidden layers
        "batch_size":
        0,  # 1 := online learning, 0 := entire trainingset as batch, else := batch learning size
    }

    # initialize the neural network
    global network
    network = NeuralNet(settings)

    # load a stored network configuration
    # network = NeuralNet.load_from_file( "xor_trained_configuration.pkl" )

    # start training on test set one
    network.backpropagation(
        training_one,  # specify the training set
        ERROR_LIMIT=1e-6,  # define an acceptable error limit 
        learning_rate=0.03,  # learning rate
        momentum_factor=0.95  # momentum
    )

    # Test the network by looping through the specified dataset and print the results.
    for instance in training_one:
        print "Input: {features} -> Output: {output} \t| target: {target}".format(
            features=str(instance.features),
            output=str(network.update(np.array([instance.features]))),
            target=str(instance.targets))

    # save the trained network
    network.save_to_file("networks/XOR_Operator/XOR_Operator.obj")
Esempio n. 2
0
def train_xor_network():
    # two training sets
    training_one    = [ Instance( [0,0], [0] ), Instance( [0,1], [1] ), Instance( [1,0], [1] ), Instance( [1,1], [0] ) ]
    training_two    = [ Instance( [0,0], [0,0] ), Instance( [0,1], [1,1] ), Instance( [1,0], [1,1] ), Instance( [1,1], [0,0] ) ]

    settings = {
        # Required settings
        "n_inputs"              : 2,        # Number of network input signals
        "n_outputs"             : 1,        # Number of desired outputs from the network
        "n_hidden_layers"       : 1,        # Number of nodes in each hidden layer
        "n_hiddens"             : 2,        # Number of hidden layers in the network
        "activation_functions"  : [ tanh_function, sigmoid_function ], # specify activation functions per layer eg: [ hidden_layer, output_layer ]
        
        # Optional settings
        "weights_low"           : -0.1,     # Lower bound on initial weight range
        "weights_high"          : 0.1,      # Upper bound on initial weight range
        "save_trained_network"  : False,    # Whether to write the trained weights to disk
        
        "input_layer_dropout"   : 0.0,      # dropout fraction of the input layer
        "hidden_layer_dropout"  : 0.1,      # dropout fraction in all hidden layers
        
        "batch_size"            : 0,        # 1 := online learning, 0 := entire trainingset as batch, else := batch learning size
    }


    # initialize the neural network
    global network 
    network = NeuralNet( settings )

    # load a stored network configuration
    # network = NeuralNet.load_from_file( "xor_trained_configuration.pkl" )


    # start training on test set one
    network.backpropagation( 
                    training_one,           # specify the training set
                    ERROR_LIMIT     = 1e-6, # define an acceptable error limit 
                    learning_rate   = 0.03, # learning rate
                    momentum_factor = 0.95  # momentum
                )


    # Test the network by looping through the specified dataset and print the results.
    for instance in training_one:
        print "Input: {features} -> Output: {output} \t| target: {target}".format( 
                    features = str(instance.features), 
                    output   = str(network.update( np.array([instance.features]) )), 
                    target   = str(instance.targets)
                )

    # save the trained network
    network.save_to_file("networks/XOR_Operator/XOR_Operator.obj")
Esempio n. 3
0
    Instance([1, 1], [0, 0])
]

n_inputs = 2
n_outputs = 1
n_hiddens = 2
n_hidden_layers = 1

# specify activation functions per layer eg: [ hidden_layer_1, hidden_layer_2, output_layer ]
activation_functions = [tanh_function] * n_hidden_layers + [sigmoid_function]

# initialize the neural network
network = NeuralNet(n_inputs, n_outputs, n_hiddens, n_hidden_layers,
                    activation_functions)

# start training on test set one
network.backpropagation(training_one,
                        ERROR_LIMIT=1e-4,
                        learning_rate=0.3,
                        momentum_factor=0.9)

# save the trained network
network.save_to_file("trained_configuration.pkl")

# load a stored network configuration
# network = NeuralNet.load_from_file( "trained_configuration.pkl" )

# print out the result
for instance in training_one:
    print instance.features, network.update(np.array(
        [instance.features])), "\ttarget:", instance.targets
Esempio n. 4
0
training_one = [
    Instance([0, 0], [0]),
    Instance([0, 1], [1]),
    Instance([1, 0], [1]),
    Instance([1, 1], [0])
]
training_two = [
    Instance([0, 0], [0, 0]),
    Instance([0, 1], [1, 1]),
    Instance([1, 0], [1, 1]),
    Instance([1, 1], [0, 0])
]

n_inputs = 2
n_outputs = 1
n_hiddens = 2
n_hidden_layers = 1

# specify activation functions per layer
activation_functions = [tanh_function] * n_hidden_layers + [sigmoid_function]

# initialize your neural network
network = NeuralNet(n_inputs, n_outputs, n_hiddens, n_hidden_layers,
                    activation_functions)

# start training
network.backpropagation(training_one, ERROR_LIMIT=1e-4)

for instance in training_one:
    print instance.features, network.update(np.array(
        [instance.features])), "\ttarget:", instance.targets
Esempio n. 5
0
n_hidden_layers = 2  # number of hidden layer
# here 2 Hidden layer with 8 node each and 1 output layer with 3 node

#------------------------DEclaration of activation or Transfer function at each layer --------------------------------------#
# specify activation functions per layer eg: [ hidden_layer_1, hidden_layer_2, output_layer ]
activation_functions = [
    symmetric_elliot_function,
] * n_hidden_layers + [sigmoid_function]

# initialize the neural network
network = NeuralNet(n_inputs, n_outputs, n_hiddens, n_hidden_layers,
                    activation_functions)
# network is Instance of class Neuralnet

# start training on test set one
network.backpropagation(training_one,
                        ERROR_LIMIT=.05,
                        learning_rate=0.2,
                        momentum_factor=0.2)

# save the trained network
network.save_to_file("trained_configuration.pkl")

# load a stored network configuration
# network = NeuralNet.load_from_file( "trained_configuration.pkl" )

# print out the result
for instance in training_one:
    print instance.features, network.forwordProp(np.array(
        [instance.features])), "\ttarget:", instance.targets
Esempio n. 6
0
    "input_layer_dropout": 0.0,  # dropout fraction of the input layer
    "hidden_layer_dropout": 0.0,  # dropout fraction in all hidden layers
}

# initialize the neural network
network = NeuralNet(settings)

# load a stored network configuration
# network = NeuralNet.load_from_file( "trained_configuration.pkl" )

## Train the network using backpropagation
network.backpropagation(
    training_one,  # specify the training set
    ERROR_LIMIT=1e-3,  # define an acceptable error limit 
    #max_iterations  = 100, # continues until the error limit is reach if this argument is skipped

    # optional parameters
    learning_rate=0.06,  # learning rate
    momentum_factor=0.9,  # momentum
)

# Train the network using SciPy
#network.scipyoptimize(
#                training_one,
#                method = "Newton-CG",
#                ERROR_LIMIT = 1e-4
#            )

## Train the network using Scaled Conjugate Gradient
#network.scg(
#                training_one,
Esempio n. 7
0
    "layers"                : [ (3, tanh_function), (3, sigmoid_function) ],
                                        # [ (number_of_neurons, activation_function) ]
                                        # The last pair in you list describes the number of output signals
    
    # Optional settings
    "weights_low"           : -0.1,     # Lower bound on initial weight range
    "weights_high"          : 0.1,      # Upper bound on initial weight range
    "save_trained_network"  : False,    # Whether to write the trained weights to disk
    
    "input_layer_dropout"   : 0.2,      # dropout fraction of the input layer
    "hidden_layer_dropout"  : 0.5,      # dropout fraction in all hidden layers
}


# initialize the neural network
network = NeuralNet( settings )

# load a stored network configuration
# network = NeuralNet.load_from_file( "trained_configuration.pkl" )


# start training on test set one
network.backpropagation( 
                training_wine,           # specify the training set
                ERROR_LIMIT     = 1e-3,  # define an acceptable error limit 
                learning_rate   = 0.03,  # learning rate
                momentum_factor = 0.45,   # momentum
                #max_iterations  = 100,  # continues until the error limit is reach if this argument is skipped
            )

print "Final MSE:", network.test( training_wine )
Esempio n. 8
0
import numpy as np


class Instance:
    def __init__(self, features, target):
        self.features = np.array(features)
        self.targets = np.array(target)
#end Instance


# training set
training_one =  [ Instance( [0,0], [0] ), Instance( [0,1], [1] ), Instance( [1,0], [1] ), Instance( [1,1], [0] ) ]
training_two =  [ Instance( [0,0], [0,0] ), Instance( [0,1], [1,1] ), Instance( [1,0], [1,1] ), Instance( [1,1], [0,0] ) ]

n_inputs = 2
n_outputs = 1
n_hiddens = 2
n_hidden_layers = 1

# specify activation functions per layer
activation_functions = [ tanh_function ]*n_hidden_layers + [ sigmoid_function ]

# initialize your neural network
network = NeuralNet(n_inputs, n_outputs, n_hiddens, n_hidden_layers, activation_functions)

# start training
network.backpropagation(training_one, ERROR_LIMIT=1e-4)


for instance in training_one:
    print instance.features, network.update( np.array([instance.features]) ), "\ttarget:", instance.targets
Esempio n. 9
0
    
    "batch_size"            : 0,        # 1 := online learning, 0 := entire trainingset as batch, else := batch learning size
}


# initialize the neural network
network = NeuralNet( settings )

# load a stored network configuration
# network = NeuralNet.load_from_file( "trained_configuration.pkl" )


# start training on test set one
network.backpropagation( 
                training_one,           # specify the training set
                ERROR_LIMIT     = 1e-6, # define an acceptable error limit 
                learning_rate   = 0.03, # learning rate
                momentum_factor = 0.95  # momentum
            )


# Test the network by looping through the specified dataset and print the results.
for instance in training_one:
    print "Input: {features} -> Output: {output} \t| target: {target}".format( 
                features = str(instance.features), 
                output   = str(network.update( np.array([instance.features]) )), 
                target   = str(instance.targets)
            )


if settings.get("save_trained_network", False):
    # save the trained network
Esempio n. 10
0
    False,  # Whether to write the trained weights to disk
    "input_layer_dropout": 0.0,  # dropout fraction of the input layer
    "hidden_layer_dropout": 0.1,  # dropout fraction in all hidden layers
    "batch_size":
    0,  # 1 := online learning, 0 := entire trainingset as batch, else := batch learning size
}

# initialize the neural network
network = NeuralNet(settings)

# load a stored network configuration
# network = NeuralNet.load_from_file( "trained_configuration.pkl" )

# start training on test set one
network.backpropagation(
    training_one,  # specify the training set
    ERROR_LIMIT=1e-6,  # define an acceptable error limit 
    learning_rate=0.03,  # learning rate
    momentum_factor=0.95  # momentum
)

# Test the network by looping through the specified dataset and print the results.
for instance in training_one:
    print "Input: {features} -> Output: {output} \t| target: {target}".format(
        features=str(instance.features),
        output=str(network.update(np.array([instance.features]))),
        target=str(instance.targets))

if settings.get("save_trained_network", False):
    # save the trained network
    network.save_to_file("trained_configuration.pkl")
Esempio n. 11
0
n_hiddens = 300
n_hidden_layers = 2

print n_inputs

# specify activation functions per layer
activation_functions = [tanh_function] * n_hidden_layers + [sigmoid_function]

# initialize your neural network
network = NeuralNet(n_inputs, n_outputs, n_hiddens, n_hidden_layers, activation_functions)

print network
print "...preparation is ready, start training..."

# start training
network.backpropagation(train, ERROR_LIMIT=1e-2, learning_rate=0.3, momentum_factor=0.9)

print "...modle is successfully trained..."


def output(input, target, ind):
    predict = []
    for i in range(ind):
        predict.append(int(round(network.update(input[i, :]))))
        print "predict:", predict[i], "\ttarget:", target[i]
    cor = sum([int(predict[k] == target[k]) for k in range(len(predict))]) / len(predict)
    print "correct rate is :", cor
    target = DataFrame(target, columns=["target"])
    predict = DataFrame(predict, columns=["predict"])
    result = pd.concat([target, predict], axis=1)
    return result
Esempio n. 12
0
    training_one.append(Instance(inp[i][0],inp[i][1])) #Encapsulation of a `input signal : output signal
#------------------------------------------------------------------------------

n_inputs = 4            # Number of  input feature 
n_outputs = 3           # Number of neuron output
n_hiddens = 8           # Number of neuron at each hidden layer
n_hidden_layers = 2     # number of hidden layer
# here 2 Hidden layer with 8 node each and 1 output layer with 3 node 

#------------------------DEclaration of activation or Transfer function at each layer --------------------------------------#
# specify activation functions per layer eg: [ hidden_layer_1, hidden_layer_2, output_layer ]
activation_functions = [symmetric_elliot_function,]*n_hidden_layers + [ sigmoid_function ]

# initialize the neural network
network = NeuralNet(n_inputs, n_outputs, n_hiddens, n_hidden_layers, activation_functions)
# network is Instance of class Neuralnet

# start training on test set one
network.backpropagation(training_one, ERROR_LIMIT=.05, learning_rate=0.2, momentum_factor=0.2  )

# save the trained network
network.save_to_file( "trained_configuration.pkl" )

# load a stored network configuration
# network = NeuralNet.load_from_file( "trained_configuration.pkl" )

# print out the result
for instance in training_one:
    print instance.features, network.forwordProp( np.array([instance.features]) ), "\ttarget:", instance.targets