コード例 #1
0
 def __init__(self, layers, input_size, cost_function):
     self.alpha = 0.1
     self.layers = []
     self.input_size = input_size
     prev = input_size
     for Layer, i, act in layers[:-1]:
         l = Layer(hidden_size=i, input_size=prev, activation=act)
         prev = i
         self.layers.append(l)
     Layer, i, act = layers[-1]
     self.layers.append(Layer(i, [prev], activation=act))
     self.cost_function = cost_function
     self.print_parameters()
コード例 #2
0
ファイル: ANN.py プロジェクト: achpalaman/nnlib
 def __init__(self,list_sizes,input_size, loss_fn=CrossEntropy):
     prev_size = input_size
     self.alpha = 0.1
     self.layers = []
     for Layer,i,act in list_sizes:
         self.layers.append(Layer(i,[prev_size],activation = act))
         prev_size = i
     self.loss_fn = loss_fn()
コード例 #3
0
 def __init__(self,hidden_size,input_size,activation=ReLU):
     self.hidden_size = hidden_size
     self.layer = Layer(hidden_size,[hidden_size,input_size],activation=activation)
     self.update = Layer(hidden_size,[hidden_size,input_size],activation=Sigmoid)
     self.reset = Layer(hidden_size,[hidden_size,input_size],activation=Sigmoid)
     self.stack = []
コード例 #4
0
ファイル: RNNLayer.py プロジェクト: achpalaman/nnlib
 def __init__(self, hidden_size, input_size, activation=ReLU):
     self.hidden_size = hidden_size
     self.layer = Layer(hidden_size, [hidden_size, input_size],
                        activation=activation)