Exemple #1
0
    def train(self, debug=False, interval=1000):
        f = lambda x: self.feed_forward()
        start_time = datetime.now()

        if debug:
            print("Test System :")
            print("learning rate:", self.learning_rate, "iteration count:",
                  self.iteration_count)
            print(self.name, "initial loss value :", self.loss_val(), "\n")

        for step in range(self.iteration_count + 1):
            self.W2 -= self.learning_rate * numerical_derivative(f, self.W2)
            self.b2 -= self.learning_rate * numerical_derivative(f, self.b2)
            self.W3 -= self.learning_rate * numerical_derivative(f, self.W3)
            self.b3 -= self.learning_rate * numerical_derivative(f, self.b3)
            if debug and step % interval == 0:
                print(self.name, "step :", step, ", loss_value :",
                      self.loss_val())

        if debug:
            print(self.name, 'train result:')
            print("final loss value :", self.loss_val())
            print("final W2 =", self.W2)
            print("final b2 =", self.b2)
            print("final W3 =", self.W3)
            print("final b3 =", self.b3)
            print(self.name, "Elapsed time :",
                  datetime.now() - start_time, "\n")
Exemple #2
0
    def train(self, input, target):
        self.input_data = input
        self.target_data = target

        f = lambda x: self.feed_forward()
        self.W2 -= self.learning_rate * numerical_derivative(f, self.W2)
        self.b2 -= self.learning_rate * numerical_derivative(f, self.b2)
        self.W3 -= self.learning_rate * numerical_derivative(f, self.W3)
        self.b3 -= self.learning_rate * numerical_derivative(f, self.b3)
Exemple #3
0
    def train(self, debug=False, interval=5):
        for step in range(self.epochs):
            for i in range(len(self.xdata)):
                self.input_data = self.xdata[i]
                self.target_data = self.tdata[i]

                f = lambda x: self.feed_forward()
                for i in range(len(self.W)):
                    self.W[i] -= self.learning_rate * numerical_derivative(
                        f, self.W[i])
                    self.b[i] -= self.learning_rate * numerical_derivative(
                        f, self.b[i])
            if debug and step % interval == 0:
                print("epoch:", step, "loss_val:", self.loss_val())
Exemple #4
0
    def train(self, debug=False, interval=1000):
        if debug:
            print("initial error value :", self.error_val(), "initial W =",
                  self.W, "\nb =", self.b)

        start_time = datetime.now()
        f = lambda x: self.loss_func()
        for step in range(self.iteration_count + 1):
            self.W -= self.learning_rate * numerical_derivative(f, self.W)
            self.b -= self.learning_rate * numerical_derivative(f, self.b)
            if debug and step % interval == 0:
                print("step :", step, "error value :", self.error_val(), "W =",
                      self.W, "b =", self.b)

        if debug:
            print("final error value :", self.error_val(), "final W =", self.W,
                  "\nb =", self.b)
            print("Elapsed time :", datetime.now() - start_time)
    xdata = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
    and_tdata = np.array([0, 0, 0, 1]).reshape(4, 1)
    or_tdata = np.array([0, 1, 1, 1]).reshape(4, 1)
    nand_tdata = np.array([1, 1, 1, 0]).reshape(4, 1)
    xor_tdata = np.array([0, 1, 1, 0]).reshape(4, 1)

    test_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])

    input_node_num = 2
    hidden_node_num = 4
    output_node_num = 1

    W2 = np.random.rand(input_node_num, hidden_node_num)
    b2 = np.random.rand(hidden_node_num)
    W3 = np.random.rand(hidden_node_num, output_node_num)
    b3 = np.random.rand(output_node_num)

    # xor problem solved!
    learning_rate = 1e-2
    f = lambda x: feed_forward(xdata, xor_tdata)
    for step in range(30000):
        W2 -= learning_rate * numerical_derivative(f, W2)
        b2 -= learning_rate * numerical_derivative(f, b2)
        W3 -= learning_rate * numerical_derivative(f, W3)
        b3 -= learning_rate * numerical_derivative(f, b3)
        if step % 1000 == 0:
            print("step :", step, "loss_value :", loss_val(xdata, xor_tdata))

    for data in test_data:
        print('predict', data, ':', predict(data))
# error_val does exactly same as what loss_func function does.
def error_val(x, t):
    y = np.dot(x, W) + b
    return np.sum((t - y)**2) / len(x)


def predict(x):
    y = np.dot(x, W) + b
    return y


if __name__ == "__main__":
    # 1e-2 ~ 1e-8
    learning_rate = 1e-5
    f = lambda x: loss_func(x_data, t_data)
    print("initial error value :", error_val(x_data, t_data), "W =", W,
          "\nb =", b)
    # 8000 ~ 500000
    for step in range(20001):
        W -= learning_rate * numerical_derivative(f, W)
        b -= learning_rate * numerical_derivative(f, b)

        if step % 10000 == 0:
            print("step :", step, "error value :", error_val(x_data, t_data),
                  " W =", W, "\nb =", b)

    print("final error value :", error_val(x_data, t_data), " W =", W, "\nb =",
          b)
    test_data = [[4, 4, 4, 4], [-3, 0, 9, -1], [7, -9, 2, -8]]
    for data in test_data:
        print('predict', data, ':', predict(np.array(data)))