示例#1
0
    def addLayers(self, neurons, activation_fun):
        self.epoch = 0
        log.logNN.info("neurons= " + str(neurons))
        self.nHidden = len(neurons)
        self.layers = []
        self.v = []
        self.act_fun = []
        for i in range(self.nHidden + 1):
            if activation_fun[i] == 'relu':
                self.act_fun.append(lambda x, der: af.ReLU(x, der))
            elif activation_fun[i] == 'sigmoid':
                self.act_fun.append(lambda x, der: af.sigmoid(x, der))
            elif activation_fun[i] == 'linear':
                self.act_fun.append(lambda x, der: af.linear(x, der))
            elif activation_fun[i] == 'tanh':
                self.act_fun.append(lambda x, der: af.tanh(x, der))
            elif activation_fun[i] == 'leakyrelu':
                self.act_fun.append(lambda x, der: af.LReLU(x, der))

        for i in range(self.nHidden):
            n = neurons[i]
            Wh = np.random.randn(N_FEATURES if i == 0 else neurons[i - 1],
                                 n) * math.sqrt(2.0 / self.numEx)
            bWh = np.random.randn(1, n) * math.sqrt(2.0 / self.numEx)
            self.layers.append([Wh, bWh])
            self.v.append([0, 0])
        Wo = np.random.randn(neurons[-1], N_CLASSES) * math.sqrt(
            2.0 / self.numEx)
        bWo = np.random.randn(1, N_CLASSES) * math.sqrt(2.0 / self.numEx)
        self.layers.append([Wo, bWo])
        self.v.append([0, 0])
示例#2
0
文件: NN.py 项目: giosumarin/nn_regr
    def addLayers(self, neurons, activation_fun, weights=None):
        self.epoch = 0
        log.logNN.info("neurons= " + str(neurons))
        self.nHidden = len(neurons)
        self.layers = []
        self.v = [[0, 0] for _ in range(self.nHidden + 1)]

        act_fun_factory = {
            "relu": lambda x, der: af.ReLU(x, der),
            "sigmoid": lambda x, der: af.sigmoid(x, der),
            "linear": lambda x, der: af.linear(x, der),
            "tanh": lambda x, der: af.tanh(x, der),
            "leakyrelu": lambda x, der: af.LReLU(x, der)
        }
        self.act_fun = [act_fun_factory[f] for f in activation_fun]

        if weights == None:
            weights_hidden_shapes = list(
                zip([N_FEATURES] + neurons[:-1], neurons))
            weights_hidden = [
                np.random.randn(row, col) * math.sqrt(2.0 / self.numEx)
                for row, col in weights_hidden_shapes
            ]
            #bias_hidden = [np.random.randn(1, n) * math.sqrt(2.0 / self.numEx) for n in neurons]
            #weights_hidden = [np.random.normal(scale=0.05, size=(row, col)) for row, col in weights_hidden_shapes]
            #bias_hidden = [np.random.normal(scale=0.05, size=(1, n)) for n in neurons]
            bias_hidden = [np.ones((1, n)) * 0.0001 for n in neurons]
            self.layers = [[w, b]
                           for w, b in list(zip(weights_hidden, bias_hidden))]
            Wo = np.random.randn(neurons[-1], N_CLASSES) * math.sqrt(
                2.0 / self.numEx)
            #Wo = np.random.normal(scale=0.05,size=(neurons[-1], N_CLASSES))
            # bWo = np.random.randn(1, N_CLASSES) * math.sqrt(2.0 / self.numEx)
            bWo = np.ones((1, N_CLASSES)) * 0.0001
            self.layers += [[Wo, bWo]]
        else:
            self.layers = weights
示例#3
0
 def test_sigmoid_min(self):
     self.assertAlmostEqual(af.sigmoid(-1000), 0)
示例#4
0
 def test_sigmoid_max(self):
     self.assertAlmostEqual(af.sigmoid(1000), 1)
示例#5
0
 def test_sigmoid_zero(self):
     self.assertEqual(af.sigmoid(0), 0.5)