示例#1
0
def train(x, y, iterations=10000, learning_rate=0.1):
    global W1, W2, W3, B1, B2, B3, error
    m = x.shape[0]
    error = []

    for _ in range(iterations):
        a0, z1, a1, z2, a2, z3, a3 = forward(x, predict=False)

        da3 = a3 - y.T
        dz3 = da3 * sigmoid(z3, derivative=True)
        dw3 = dz3.dot(a2.T) / m
        db3 = np.sum(dz3, axis=1, keepdims=True) / m

        da2 = W3.T.dot(dz3)
        dz2 = np.multiply(da2, sigmoid(z2, derivative=True))
        dw2 = dz2.dot(a1.T) / m
        db2 = np.sum(dz2, axis=1, keepdims=True) / m

        da1 = W2.T.dot(dz2)
        dz1 = np.multiply(da1, sigmoid(z1, derivative=True))
        dw1 = dz1.dot(a0.T) / m
        db1 = np.sum(dz1, axis=1, keepdims=True) / m

        W1 -= learning_rate * dw1
        B1 -= learning_rate * db1
        W2 -= learning_rate * dw2
        B2 -= learning_rate * db2
        W3 -= learning_rate * dw3
        B3 -= learning_rate * db3

        error.append(np.average(da2**2))

    return error
示例#2
0
def forward(nn, x):
    W1, W2, W3 = nn["W1"], nn["W2"], nn["W3"]
    b1, b2, b3 = nn["b1"], nn["b2"], nn["b3"]

    z1 = sigmoid(np.dot(x, W1) + b1)
    z2 = sigmoid(np.dot(z1, W2) + b2)
    z3 = np.dot(z2, W3) + b3
    return z3
def forward(x, predict=True):
    a0 = x.T
    z1 = W1.dot(a0) + B1
    a1 = sigmoid(z1)
    z2 = W2.dot(a1) + B2
    a2 = sigmoid(z2)
    if predict is False:
        return a0, z1, a1, z2, a2
    return a2
 def forward_propagation(self, X):
     #forward propagation through our network
     self.z = np.dot(
         X,
         self.W1)  # dot product of X (input) and first set of 14x9 weights
     self.z2 = sigmoid(self.z)  # activation function
     self.z3 = np.dot(
         self.z2, self.W2
     )  # dot product of hidden layer (z2) and second set of 9x1 weights
     output = sigmoid(self.z3)  # final activation function
     return output
def forward(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y  = identity(a3)
    return y
示例#6
0
def forward(network, x):
    Wait1, Wait2, Wait3 = network['Wait1'], network['Wait2'], network['Wait3']
    bias1, bias2, bias3 = network['bias1'], network['bias2'], network['bias3']

    a1 = np.dot(x, Wait1) + bias1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, Wait2) + bias2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, Wait3) + bias3
    y = softmax(a3)

    return y
示例#7
0
def forward(x, predict=True):
    a0 = x.T
    z1 = W1.dot(a0) + B1
    a1 = sigmoid(z1)
    z2 = W2.dot(a1) + B2
    a2 = sigmoid(z2)
    z3 = W3.dot(a2) + B3
    a3 = sigmoid(z3)

    if predict is False:
        return a0, z1, a1, z2, a2, z3, a3
    return a3
示例#8
0
def forward_propagate(parameters,X, L,dropout=False):
    
    """ computes the forward propagation of the nerual network """
    
    caches = {}
    
    caches["Z1"] = parameters["W1"].dot(X) + parameters["b1"]
    caches["a1"] = a.relu(caches["Z1"])

    if dropout == True:    
        caches["D1"] = np.random.rand(caches["a1"].shape[0],caches["a1"].shape[1]) < 0.8
        caches["a1"] *= caches["D1"]
        caches["a1"] /= 0.5

 
    caches["Z2"] = parameters["W2"].dot(caches["a1"]) + parameters["b2"]
    caches["a2"] = a.relu(caches["Z2"])
    

    if dropout == True:    
        caches["D2"] = np.random.rand(caches["a2"].shape[0],caches["a2"].shape[1]) < 0.8
        caches["a2"] *= caches["D2"]
        caches["a2"] /= 0.5    

    # on the last layer we would like to compute the sigmoid for each examples
    caches["Z3"] = parameters["W3"].dot(caches["a2"]) + parameters["b3"]
    caches["a3"] = a.sigmoid(caches["Z3"])
    
    return caches["a3"], caches
示例#9
0
 def activation_function(self, Z):
     if self.activation_function_name == 'relu':
         return relu(Z)
     elif self.activation_function_name == 'sigmoid':
         return sigmoid(Z)
     else:
         return tanh(Z)
示例#10
0
 def feedForward(self, inputs):
     if len(inputs) != self.input-1:
         raise ValueError('Wrong number of inputs')
     
     # input activations
     self.ai = np.append(inputs, [1]) # add bias node
     
     # hidden activations
     self.ah = sigmoid(self.ai.dot(self.wi))
     # self.ah = relu(self.ai.dot(self.wi))
     
     # output activations
     self.ao = sigmoid(self.ah.dot(self.wo))
     # self.ao = relu(self.ah.dot(self.wo))
     
     return softmax(self.ao)
示例#11
0
def backpropagation(x, y, wo, wh):
    mses = []
    while True:
        # x == > 515 * 8

        net_h = np.dot(x, wh.T)  # == > 515 * 10

        net_h_active = sigmoid(net_h)  # == > 515 * 10

        net_o = np.dot(net_h_active, wo.T)  # == > 515 * 1

        net_o_active = net_o  # == > 515 * 1

        sigma = y - net_o_active  # == > 515 * 1

        error = sum_error(sigma)

        mses.append(error)
        if mses[-1] < 10:
            return mses, wo, wh

        sigma_o = np.dot(sigma.T, net_h)  # == > 1 * 10

        sigma_h = np.dot((np.dot(sigma, wo) * sigmoid_dash(net_h_active)).T, x)  # == > 10 * 8

        wo = wo + 0.0001 * sigma_o  # == > 1 * 10
        wh = wh + 0.0001 * sigma_h  # == > 10 * 8
    def activation_forward(self, A_prev, W, b, activation):

        if activation == 'sigmoid':
            Z, linear_cache = self.linear_forward(A_prev, W, b)
            A, activation_cache = sigmoid(Z)

        elif activation == "tanh":
            Z, linear_cache = self.linear_forward(A_prev, W, b)
            A, activation_cache = tanh(Z)

        elif activation == "relu":
            Z, linear_cache = self.linear_forward(A_prev, W, b)
            A, activation_cache = relu(Z)

        elif activation == "leaky_relu":
            Z, linear_cache = self.linear_forward(A_prev, W, b)
            A, activation_cache = leaky_relu(Z)

        else:
            print('no activation function')

        assert (A.shape == (W.shape[0], A_prev.shape[1]))
        cache = (linear_cache, activation_cache)

        return A, cache
示例#13
0
    def backprop(self, x, y):
        new_biase = [np.zeros(b.shape) for b in self.biases]
        new_weight = [np.zeros(w.shape) for w in self.weights]

        activation = x
        activations = [x]  # list to store all the activations, layer by layer

        zs = []  # list to store all the z vectors, layer by layer

        for b, w in zip(self.biases, self.weights):
            z = np.dot(w, activation) + b
            zs.append(z)
            activation = sigmoid(z)
            activations.append(activation)
        # backward pass
        delta = self.cost_derivative(activations[-1], y) * sigmoid_prime(
            zs[-1])
        new_biase[-1] = delta
        new_weight[-1] = np.dot(delta, activations[-2].transpose())

        for l in range(2, self.num_layers):
            z = zs[-l]
            sp = sigmoid_prime(z)
            delta = np.dot(self.weights[-l + 1].transpose(), delta) * sp
            new_biase[-l] = delta
            new_weight[-l] = np.dot(delta, activations[-l - 1].transpose())
        return (new_biase, new_weight)
示例#14
0
 def testName(self):
     
     # read data
     data = reader.read("../data/log_1_fixed.txt", jstartline=15000, maxlines=5000)
             
     # preprocess data
     preprocess.preproc(data)
     
     # initialize model
     layers = [13, 8, 1]
     activf = [activation.linear(), activation.tanh(), activation.sigmoid()] #  ,  activation.tanh(1.75, 3./2.),
     net = ffnet.FFNet(layers, activf)
     net.initw(0.1) 
     
     # create training options
     opts = trainb.options()
     
     # write function
     f = open("../output/trainb_test.txt", "w+")
     writefcn = lambda s: f.write(s)
             
     # training
     trainb.train(data, opts, net, writefcn)
     
     # close file
     f.close()
示例#15
0
 def forwardPropagate(self, inputs):
     outputs = [inputs]
     for weights in self.network:
         outputs[-1] = np.c_[outputs[-1], np.ones(len(outputs[-1]))]
         outputs.append(act.sigmoid(np.dot(outputs[-1], weights)))
     print('outputs', outputs, '\n')
     return outputs
示例#16
0
    def testName(self):

        # read data
        data = reader.read("../data/log_1_fixed.txt",
                           jstartline=15000,
                           maxlines=5000)

        # preprocess data
        preprocess.preproc(data)

        # initialize model
        layers = [13, 8, 1]
        activf = [
            activation.linear(),
            activation.tanh(),
            activation.sigmoid()
        ]  # activation.tanh(1.75, 3./2.),
        net = ffnet.FFNet(layers, activf)
        net.initw(0.1)

        # create training options
        opts = trainsg.options()

        # write function
        f = open("../output/trainsg_test.txt", "w+")
        writefcn = lambda s: f.write(s)

        # training
        trainsg.train(data, opts, net, writefcn)

        # close file
        f.close()
示例#17
0
def train(x, y, iterations=50000, learning_rate=0.001):
    global W1, W2, B1, B2, error
    m = x.shape[0]
    error = []

    for _ in range(iterations):
        a0, z1, a1, z2, a2 = forward(x, predict=False)

        da2 = a2 - y.T
        dz2 = da2 * linear(z2, derivative=True)
        dw2 = dz2.dot(a1.T) / m
        db2 = np.sum(dz2, axis=1, keepdims=True) / m

        da1 = W2.T.dot(dz2)
        dz1 = np.multiply(da1, sigmoid(z1, derivative=True))
        dw1 = dz1.dot(a0.T) / m
        db1 = np.sum(dz1, axis=1, keepdims=True) / m

        W1 -= learning_rate * dw1
        B1 -= learning_rate * db1
        W2 -= learning_rate * dw2
        B2 -= learning_rate * db2

        error.append(np.average(da2**2))

    return error
示例#18
0
def _forward(z: np.array, W: np.array, b: np.array,
             activation: str) -> np.array:
    """Propagate the signal from the previous to the next layer."""
    a = np.dot(z, W) + b
    if activation == 'sigmoid':
        return sigmoid(a)
    elif activation == 'identity':
        return identity(a)
示例#19
0
    def backPropagate(self, inputs, predicted, log=True):
        outputs = self.forwardPropagate(inputs)
        errors = [predicted - outputs[-1]]
        print(errors[0])
        deltas = [
            act.sigmoid(outputs[-1], d=True) * errors[-1] * self.learningRate
        ]
        print(deltas[0])
        for i in range(len(self.network)):
            print()
            errors.insert(0, deltas[0].dot(self.network[1].T))
            print(errors[0])
            deltas.insert(0, errors[0] * act.sigmoid(outputs[i + 1], d=True))
            print(deltas[0])

        for i in range(len(deltas)):
            self.network[i] += deltas[i] * outputs[i]
示例#20
0
 def calc_output(self):
     """
     根据式1计算节点的输出
     """
     output = reduce(
         lambda ret, conn: ret + conn.upstream_node.output * conn.weight,
         self.upstream, 0)
     self.output = sigmoid(output)
示例#21
0
def linear_activation_forward(A_prev, W, b, activation):
    Z = np.dot(W, A_prev) + b
    if activation == "sigmoid":
        A = sigmoid(Z)
    elif activation == "relu":
        A = relu(Z)
    elif activation == "tanh":
        A = tanh(Z)
    return A, Z
示例#22
0
    def test_sigmoid_deriv():
        """Test sigmoid activation function derivative"""

        x = np.array([[0, 1, 3], [-1, 0, -5], [1, 0, 3], [10, -9, -7]])

        y = np.array([[0.25, 0.19661, 0.04518], [0.19661, 0.25, 0.00665],
                      [0.19661, 0.25, 0.04518], [0.00005, 0.00012, 0.00091]])

        assert np.allclose(sigmoid(x, deriv=True), y, atol=0.00001)
示例#23
0
    def test_sigmoid():
        """Test sigmoid activation function"""

        x = np.array([[0, 1, 3], [-1, 0, -5], [1, 0, 3], [10, -9, -7]])

        y = np.array([[0.5, 0.73106, 0.95257], [0.26894, 0.5, 0.00669],
                      [0.73106, 0.5, 0.95257], [0.99995, 0.00012, 0.00091]])

        assert np.allclose(sigmoid(x), y, atol=0.00001)
示例#24
0
    def predict(self, x):
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']

        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)

        return y
示例#25
0
 def test_sigmoid(self):
     rtol = 1e-5
     size = 10
     for _ in range(1000):
         x = np.random.uniform(low=-1000., high=1000., size=size).flatten()
         test_buffer = list_2_swig_float_pointer(x.tolist(), size)
         y_numpy = np.array(tf.keras.activations.sigmoid(x)).tolist()
         y_nn4mc = activation.sigmoid(test_buffer.cast(), size)
         y_nn4mc = swig_py_object_2_list(y_nn4mc, size)
         assert np.allclose(y_nn4mc, y_numpy, rtol)
     print("sigmoid passed!")
示例#26
0
 def test_sigmoid(self):
     self.assertEqual(
         list(a.sigmoid([1, 2, 3, 4, 5])),
         [
             0.7310585786300049,
             0.8807970779778823,
             0.9525741268224334,
             0.9820137900379085,
             0.9933071490757153
         ]
     )
    def step(z_t, zi_t, zf_t, zo_t, c_tm1, h_tm1):
        # new information
        Z_t = tanh(z_t + T.dot(h_tm1, U))

        # input gate
        Zi_t = sigmoid(zi_t + T.dot(h_tm1, Ui) + T.dot(c_tm1, Vi))

        # forget gate
        Zf_t = sigmoid(zf_t + T.dot(h_tm1, Uf) + T.dot(c_tm1, Vf))

        # new plus old/unforgetten memory
        c_t = Z_t * Zi_t + c_tm1 * Zf_t

        # output gate
        Zo_t = sigmoid(zo_t + T.dot(h_tm1, Uo) + T.dot(c_t, Vo))

        # output information
        h_t = tanh(c_t) * Zo_t

        return c_t, h_t
示例#28
0
    def test_apply(self):

        # create a simple network
        net = ffnet.FFNet(
            [k, q, m],
            [activation.linear(),
             activation.tanh(),
             activation.sigmoid()])

        # some input
        x = [1] * k
        net.apply(x)
示例#29
0
    def forwardPropagation(self, row, count):
        listInputs = row
        for layer in self.network:
            listHidLayerInputs = []
            for neuron in layer:
                activation = self.activate(neuron,
                                           listInputs) + self.bias[count]
                output = sigmoid(activation)
                listHidLayerInputs.append(output)
            listInputs = listHidLayerInputs

        return listInputs
示例#30
0
def backprop(Theta1, Theta2, X, y):
    N = X.shape[0]
    K = Theta2.shape[0]

    J = 0

    Delta2 = np.zeros(Theta2.shape)
    Delta1 = np.zeros(Theta1.shape)

    for i in range(N):
        # Forward propagation, saving intermediate results
        a1 = np.concatenate(([1], X[i]))  # Input layer

        z2 = np.dot(Theta1, a1)
        a2 = np.concatenate(([1], sigmoid(z2)))  # Hidden Layer

        z3 = np.dot(Theta2, a2)
        a3 = sigmoid(z3)  # Output layer

        y0 = one_hot(K, y[i])

        # Cross entropy
        J -= np.dot(y0.T, np.log(a3))+np.dot((1-y0).T, np.log(1-a3))

        # Calculate the weight deltas
        delta_3 = a3-y0
        delta_2 = np.dot(Theta2.T, delta_3)[1:]*sigmoidGradient(z2)

        Delta2 += np.outer(delta_3, a2)
        Delta1 += np.outer(delta_2, a1)

    J /= N

    Theta1[:, 0] = np.zeros(Theta1.shape[0])
    Theta2[:, 0] = np.zeros(Theta2.shape[0])

    Theta1_grad = Delta1/N
    Theta2_grad = Delta2/N

    return [J, Theta1_grad, Theta2_grad]
示例#31
0
def get_precision(weights, bias, test_data):
    '''
    通过输入的权重和偏置得到准确率,测试集为test_data
    '''
    test_alpha = Normalize(test_data[0])
    for w, b in zip(weights, bias):
        #print(test_alpha.shape,w.shape)
        test_alpha = sigmoid(np.dot(test_alpha, w) + b.T)
    test_result = np.array(
        [np.argmax(test_alpha[i, :]) for i in range(len(test_alpha))])
    #统计预测结果
    return sum(int(i == j) for (i, j) in zip(test_result, test_data[1])) / len(
        test_data[1])
示例#32
0
    def test_sigmoid(self):
        
        sigm = activation.sigmoid(-2.3, 10.4)

        x = 0.2;
        dx = 1.e-7;

        f = sigm.f(x)
        
        df_a = sigm.df(f)
        df_n = (sigm.f(x + dx) - sigm.f(x - dx)) / 2 / dx
        
        print "Numerical: %e, Analytical: %e" % (df_a, df_n)
        
        self.assertAlmostEqual(df_a, df_n, 8, "Numerical derivative is not equal to analytical")
示例#33
0
def train(k, data):
    
    # initialize new model
    layers = [13, 8, 1]
    activf = [activation.linear(), activation.tanh(), activation.sigmoid()]  
    net = ffnet.FFNet(layers, activf)
    net.initw(0.1) 
    
    # use default training options
    opts = trainsg.options()
    opts.rate = 2.e-4
    
    # write function
    f = open("../output/train-%s.txt" % k, "w+")
    writefcn = lambda s: f.write(s)

    # training
    net = trainsg.train(data, opts, net, writefcn)

    # close file
    f.close()
    
    # return trained network
    return net
示例#34
0
 def forward(self, prev_layer):
     if prev_layer.layer_property == Layer_Property['conv']:
         self.neurons = act.sigmoid(self.weight.dot(prev_layer.neurons.flatten()))
     else:
         self.neurons = act.sigmoid(self.weight.dot(prev_layer.neurons))
示例#35
0
 def forward(self, prev_layer):
     self.neurons = act.sigmoid(self.weight.dot(prev_layer.neurons))
示例#36
0
 def forward(self, prev_layer):
     prev_matrix = prev_layer.neurons
     for cur_feature in range(self.map_num):
         for prev_feature in range(prev_layer.map_num):
             self.neurons[cur_feature] += signal.convolve2d(prev_matrix[prev_feature], np.rot90(self.conv_filter[prev_feature][cur_feature], 2), mode='valid')
             self.neurons[cur_feature] = act.sigmoid(self.neurons[cur_feature])