コード例 #1
0
 def __init__(self, neuron_id, network):  # drop out
     self.neuron_id = neuron_id
     self.neuron_number = neuron_id[1]
     self.netinput = None
     self.output = None
     self.output_links = []
     self.network = network
     self.original_activation_function = LinearIO()  # drop out
     self.activation_function = self.original_activation_function
     self.probability_of_drop_out = 0.0  # drop out
     self.drop_out_activation_function = ConstantOutput()  # drop out
コード例 #2
0
 def __init__(self, neuron_id, n_inputs, network, activation_function,
              weight_init_function, learning_rate_function):  # drop out
     self.neuron_id = neuron_id
     self.neuron_number = neuron_id[1]
     self.n_inputs = n_inputs
     self.network = network
     self.links = [
         Link(weight_init_function) for _ in range(0, n_inputs + 1)
     ]  # +1 for bias link
     self.output_links = []
     self.activation_function = activation_function
     self.original_activation_function = activation_function  # drop out
     self.probability_of_drop_out = 0.0  # drop out
     self.drop_out_activation_function = ConstantOutput()  # drop out
     self.weight_init_function = weight_init_function
     self.learning_rate_function = learning_rate_function
     self.error = None
     self.error_at_input = None
     self.netinput = None
     self.output = None
コード例 #3
0
#TODO implement weight-sharing

from __future__ import division
import math, random
import numpy as np
import matplotlib.pyplot as plt
from pandas import Series, DataFrame
import pandas as pd

from jorg.neuralnet_v1 import \
NeuralNet, Instance, NetworkDataCollector, weight_init_function_random

from jorg.activation_classes import SigmoidIO, LinearIO, ConstantOutput, GaussGauss, Gauss, STDNonMonotonicIOFunction
sigmoid = SigmoidIO()
linear = LinearIO()
constant = ConstantOutput()
nonmon = STDNonMonotonicIOFunction()

drop_out_prob = 0.5
#disable_2nd_output_neuron = True
n_hidden_neurons = 3
learning_rate = -0.1
rotation = 0.0
n_trials = 10
max_epochs = 3000
error_criterion = 0.01
hidden_neurons_io_function = sigmoid
output_neurons_io_function = sigmoid
data_collection_interval = 1000
n_inputs = 2
n_outputs = 1
コード例 #4
0
#TODO implement weight-sharing

from __future__ import division
import math, random
import numpy as np
import matplotlib.pyplot as plt
from pandas import Series, DataFrame
import pandas as pd

from jorg.neuralnet_v1 import \
NeuralNet, Instance, NetworkDataCollector, weight_init_function_random

from jorg.activation_classes import SigmoidIO, LinearIO, ConstantOutput, GaussGauss, Gauss, STDNonMonotonicIOFunction
sigmoid = SigmoidIO()
linear = LinearIO()
constant = ConstantOutput()
nonmon = STDNonMonotonicIOFunction()

disable_2nd_output_neuron = False
n_hidden_neurons = 2
learning_rate = -0.5
rotation = 0.0
n_trials = 3
max_epochs = 10000
error_criterion = 0.00001
hidden_neurons_io_function = sigmoid
output_neurons_io_function = sigmoid
data_collection_interval = 1000
n_inputs = 2
n_outputs = 2
n_neurons_for_each_layer = [n_inputs, n_hidden_neurons, n_outputs]
コード例 #5
0
ファイル: science_net.py プロジェクト: noisyneurons/nn
    Instance([1.0, 0.0], [1.0, 0.0, 0.0]),
    Instance([1.0, 1.0], [1.0, 1.0, 1.0])
]

n_inputs = 2
n_outputs = 3
n_neurons_for_each_layer = [n_inputs, 6, n_outputs]

n_hidden_layers = 0
if len(n_neurons_for_each_layer) > 2:
    n_hidden_layers = len(n_neurons_for_each_layer) - 2

# specify neuron transforms, weight initialization, and learning rate functions... per layer eg: [ hidden_layer_1, hidden_layer_2, output_layer ]
sigmoid = SigmoidIO()
linear = LinearIO()
constant = ConstantOutput()

neurons_ios = [None] + [sigmoid] * n_hidden_layers + [sigmoid]
weight_init_functions = [
    None
] + [weight_init_function_random] * n_hidden_layers + [
    weight_init_function_random
]
learning_rate_functions = [
    None
] + [learning_rate_function] * n_hidden_layers + [learning_rate_function]

results = []

for seed_value in range(10):
    print "seed = ", seed_value,