예제 #1
0
def create_layer():
    layers = layer.Layers()

    layers.append_affine(tensor.create_randomly([18, 13]),
                         tensor.create_zeros([13]))
    layers.append_batchnormalization(tensor.create_ones([13]),
                                     tensor.create_zeros([13]))
    layers.append_sigmoid()

    layers.append_affine(tensor.create_randomly([13, 7]),
                         tensor.create_zeros([7]))
    layers.append_batchnormalization(tensor.create_ones([7]),
                                     tensor.create_zeros([7]))
    layers.append_sigmoid()

    layers.append_affine(tensor.create_randomly([7, 2]),
                         tensor.create_randomly([2]))
    layers.append_softmax()

    layers.set_train_mode(True)

    loss_list = []

    with open("ttt_layers.bin", 'wb') as f:
        pickle.dump(layers, f)
    with open("ttt_loss_list.bin", 'wb') as f:
        pickle.dump(loss_list, f)
    print("done")
예제 #2
0
 def init(self, batch_size):
     x_batch_shape = self._x.shape.copy()
     y_batch_shape = self._y.shape.copy()
     x_batch_shape[0] = batch_size
     y_batch_shape[0] = batch_size
     self.x_batch = tensor.create_zeros(x_batch_shape)
     self.y_batch = tensor.create_zeros(y_batch_shape)
     self.batch_size = batch_size
     self.batch_count = self._x.shape[0] // self.batch_size
     return self
예제 #3
0
def affine_forward(x_shape, w_shape):
    x = tensor.create_gauss(x_shape)
    w = tensor.create_gauss(w_shape)
    b = tensor.create_gauss([w_shape[-1]])
    standard_out = tensor.create_matrix_product(x, w)
    new_out = tensor.create_zeros(standard_out.shape.copy(), float)
    compare_out = tensor.create_zeros(standard_out.shape.copy(), bool)

    tensor.matmul(x, w, standard_out)
    tensor.add(standard_out, b, standard_out)
    affine.forward(x.array, w.array, b.array, new_out.array)

    tensor.function_element_wise(standard_out, new_out, isSame, compare_out)

    print(compare_out)
예제 #4
0
    def forward(self, x):
        #최적화 기법
        def multiply_momentum_and_add(left, right):
            return left * self.momentum + (1 -
                                           self.momentum) * right  #momentum 캡쳐

        if (self.out.shape[0] != x.shape[0]):
            self.xc = x.copy()
            self.xn = x.copy()
            self.out = x.copy()
            self.tmp_out_shape = x.copy()
            self.batch_size = x.shape[0]

        if self.running_mean is None:
            D = len(x.array) // x.shape[0]
            self.running_mean = tensor.create_zeros([D])
            self.running_var = tensor.create_zeros([D])
            self.std = self.running_mean.copy()
            #self.tmp_sum_axis = self.running_mean.copy()
            self.dbeta = self.running_mean.copy()
            self.dgamma = self.running_mean.copy()

        if self.train_flg:
            #tmp_sum_axis가 나중에 추가되서 값이 이상하게 나오면 바꾸자.
            tensor.mean_axis(x, 0, self.std)  #std가 임시 객체로 활용. (mu에 해당)
            #self.running_mean = self.momentum * self.running_mean + (1-self.momentum) * mu #(넘파이 버전)
            tensor.function_elment_wise(self.running_mean, self.std,
                                        multiply_momentum_and_add,
                                        self.running_mean)
            tensor.sub(x, self.std, self.xc)
            tensor.function(self.xc, BatchNormalization.jegop,
                            self.xn)  #xn도 계산에 필요한 임시 객체로 사용
            tensor.mean_axis(self.xn, 0, self.std)  #std가 임시 객체로 활용(var에 해당)
            #self.running_var = self.momentum * self.running_var + (1-self.momentum) * var #넘파이 버전 알고리즘
            tensor.function_elment_wise(self.running_var, self.std,
                                        multiply_momentum_and_add,
                                        self.running_var)
            tensor.function(self.std, BatchNormalization.sqrt,
                            self.std)  # std가 가져야 할 값
            tensor.div(self.xc, self.std, self.xn)  #xn이 가져야 할 값
        else:
            tensor.sub(x, self.running_mean, self.xc)
            tensor.function_elment_wise(self.xc, self.running_var,
                                        BatchNormalization.sqrt_and_div,
                                        self.xn)
        tensor.mul(self.gamma, self.xn, self.out)
        tensor.add(self.out, self.beta, self.out)
        return self.out
예제 #5
0
 def initForward(self, x):
     if (x.shape[0] != self.out.shape[0]):
         self.out = tensor.create_zeros(
             computing.create_shape(x.shape, self.filter.shape, self.stride,
                                    self.pad))
     self.x = x
     return self.out
예제 #6
0
def test_compare_alg1(data_shape, filter_shape, stride, pad, padding):
    x = tensor.create_gauss(data_shape)
    bias = tensor.create_ones([filter_shape[0]])
    filter = tensor.create_gauss(filter_shape)
    out1 = tensor.create_zeros(
        conv3d_module.create_shape(x.shape, filter.shape, stride, pad))
    out2 = tensor.create_zeros(
        conv3d_module.create_shape(x.shape, filter.shape, stride, pad))
    time1 = time.time_ns()
    #conv3d_module.forward_test(x.array, x.shape, filter.array, filter.shape, bias.array,stride, pad, padding, out1.array, out1.shape)
    time2 = time.time_ns()
    conv3d_module.forward(x.array, x.shape, filter.array, filter.shape,
                          bias.array, stride, pad, padding, out2.array,
                          out2.shape)
    time3 = time.time_ns()
    print('{0}, {1}'.format(time2 - time1, time3 - time2))
    print('{0}'.format(tensor.isSame(out1, out2)))
예제 #7
0
def test_softmax(y_shape, one_hot_table, error=0.001):
    tensor_y = tensor.create_randomly(y_shape)
    numpy_y = parseNumpy(tensor_y)

    if (one_hot_table):
        tensor_t = tensor.create_zeros(y_shape, int)

        for i in range(y_shape[0]):
            tensor_t.array[i * y_shape[-1] +
                           rand.randint(0, y_shape[-1] - 1)] = 1

    else:
        tensor_t = tensor.create_zeros([y_shape[0], 1], int)
        for i in range(y_shape[0]):
            tensor_t.array[i] = rand.randint(0, y_shape[-1] - 1)

    numpy_t = parseNumpy(tensor_t)
    print(numpy_t)

    tensor_layer = layer.Softmax()
    numpy_layer = common.layers.SoftmaxWithLoss()

    tensor_layer.forward(tensor_y)
    numpy_loss = numpy_layer.forward(numpy_y, numpy_t)

    tensor_y = tensor_layer.out.copy()
    numpy_y = numpy_layer.y

    tensor_layer.init_table(tensor_t)
    tensor_dout = tensor_layer.backward(1)
    numpy_dout = numpy_layer.backward()

    print("loss")
    print(tensor_layer.loss)
    print(numpy_loss)
    print("forward")
    print(tensor_y)
    print(numpy_y)
    print(compare(tensor_y, numpy_y, error))
    print("backward")
    print(tensor_dout)
    print(numpy_dout)
    print(compare(tensor_dout, numpy_dout, error))
예제 #8
0
def test_conv3d_forward(data_shape, filter_shape, stride, pad, padding):
    x = tensor.create_gauss(data_shape)
    b = tensor.create_ones([filter_shape[0]])
    filter = tensor.create_gauss(filter_shape)
    out = tensor.create_zeros(
        conv3d_module.create_shape(x.shape, filter.shape, stride, pad))
    conv3d_module.forward(x.array, x.shape, filter.array, filter_shape,
                          b.array, stride, pad, padding, out.array, out.shape)
    print(x)
    print(filter)
    print(b)
    print(out)
예제 #9
0
def affine_backward(x_shape, w_shape):
    """backward에서 뒷쪽 노드에 보내줄 dout을 테스트합니다."""
    x = tensor.create_gauss(x_shape)
    w = tensor.create_gauss(w_shape)
    w_t = tensor.create_transpose(w)
    forward_out = tensor.create_matrix_product(x, w)
    forward_out = tensor.create_gauss(forward_out.shape)
    standard_out = tensor.create_matrix_product(forward_out, w_t)
    new_out = tensor.create_zeros(standard_out.shape.copy(), float)
    compare_out = tensor.create_zeros(standard_out.shape.copy(), bool)

    #잘 알려진 방법
    tensor.transpose(w, w_t)
    tensor.matmul(forward_out, w_t, standard_out)

    #새로운 방법
    affine.backward(forward_out.array, w.array, w.shape, new_out.array)

    tensor.function_element_wise(standard_out, new_out, isSame, compare_out)

    print(compare_out)
예제 #10
0
 def append_affine(self, *output):
     """출력 뉴런 구조를 정의하여 affine레이어를 뒤에 추가합니다."""
     tmp = self.input.pop()
     while(len(self.input) != 0):
         tmp *= self.input.pop()
     self.input.append(tmp)
     self.input.extend(output)
     w = tensor.create_gauss(self.input)
     self.input = list(output)
     b = tensor.create_zeros(self.input)
     self.input = self.input.copy()
     return self.append(nn.layer.Affine(w,b))
예제 #11
0
def affine_backward_variables(x_shape, w_shape):
    """backward에서 업데이트를 위한 변수들의 미분값 테스트입니다."""
    x = tensor.create_gauss(x_shape)
    w = tensor.create_gauss(w_shape)
    b = tensor.create_gauss([w_shape[-1]])
    x_t = tensor.create_transpose(x)
    forward_out = tensor.create_matrix_product(x, w)
    forward_out = tensor.create_gauss(forward_out.shape)
    standard_dw = tensor.create_zeros(w.shape, float)
    new_dw = tensor.create_zeros(w.shape, float)
    compare_dw = tensor.create_zeros(w.shape, bool)
    standard_db = tensor.create_zeros(b.shape, float)
    new_db = tensor.create_zeros(b.shape, float)
    compare_db = tensor.create_zeros(b.shape, float)

    #잘 알려진 방법
    tensor.transpose(x, x_t)
    tensor.matmul(x_t, forward_out, standard_dw)
    tensor.sum_axis(forward_out, 0, standard_db)

    #새로운 방법
    affine.backward_variables(x.array, forward_out.array, new_dw.array,
                              new_db.array)

    tensor.function_element_wise(standard_dw, new_dw, isSame, compare_dw)
    tensor.function_element_wise(standard_db, new_db, isSame, compare_db)

    print(compare_dw)
    print(compare_db)
예제 #12
0
def init():
    array = []
    table = []
    for line in urlopen(
            "https://archive.ics.uci.edu/ml/machine-learning-databases/tic-tac-toe/tic-tac-toe.data"
    ):
        decoded_line = line.decode('UTF-8').lower().strip()
        for i in range(0, 17, 2):
            if (decoded_line[i] == 'x'):
                array.extend([1., 0., 0.])
            elif (decoded_line[i] == 'o'):
                array.extend([0., 1., 0.])
            else:
                array.extend([0., 0., 0.])

        if (decoded_line[18] == 'p'):
            table.extend([1, 0])  #one - hot
        else:
            table.extend([0, 1])

    data_count = len(table) // 2
    if (len(table) != len(array)):
        print("error")
    train_count = data_count * 4 // 5
    test_count = data_count - train_count

    table = tensor.Tensor(table, [data_count, 2])
    data = tensor.Tensor(array, [data_count, 27])

    train_data = tensor.create_zeros([train_count, 27])
    train_table = tensor.create_zeros([train_count, 2])

    test_data = tensor.create_zeros([test_count, 27])
    test_table = tensor.create_zeros([test_count, 2])

    choice_list = tensor.create_arange(0, data_count)
    tensor.set_shuffle(choice_list)

    train_choice = tensor.create_zeros([train_count], int)
    test_choice = tensor.create_zeros([test_count], int)

    tensor.copy(choice_list, 0, train_count, train_choice)
    tensor.copy(choice_list, train_count, test_count, test_choice)

    tensor.copy_row(data, train_choice, train_data)
    tensor.copy_row(table, train_choice, train_table)
    tensor.copy_row(data, test_choice, test_data)
    tensor.copy_row(table, test_choice, test_table)

    with open('ttt_train_data.bin', 'wb') as f:
        pickle.dump(train_data, f)
    with open('ttt_train_table.bin', 'wb') as f:
        pickle.dump(train_table, f)
    with open('ttt_test_data.bin', 'wb') as f:
        pickle.dump(test_data, f)
    with open('ttt_test_table.bin', 'wb') as f:
        pickle.dump(test_table, f)
    print("done")
예제 #13
0
    def initBackward(self, dout, t):
        if (self.dx.shape[0] != t.shape[0]):
            self.dx = tensor.create_zeros(t.shape)

        if (type(dout) == tensor.Tensor):
            if (len(dout.array) == len(t.array)):
                self.backward = self._backward2
                self.partialBackward = self._partialBackward2
        else:
            self.backward = self._backward1
            self.partialBackward = self._partialBackward1

        self.dout = dout
        self.t = t
        return self.dx
예제 #14
0
from tkinter import *
import tensor
import layer
import pickle

with open('ttt_layers.bin', 'rb') as f:
    layers = pickle.load(f)
    layers.set_train_mode(False)

ttt_map = tensor.create_zeros([1, 18])
다음수 = tensor.create_zeros([9, 18])
선택수 = tensor.create_zeros([1, 2])
제외수 = tensor.create_zeros([9, 2])
phase = 0


def click_player_vs_player(event):
    global player_sw
    event.widget.configure(background=buttons_color_player1 * (1 - player_sw) +
                           buttons_color_player2 * player_sw)
    player_sw = (player_sw + 1) % 2
    print(event.widget.winfo_name())


def click_player_vs_ai(event):
    event.widget.configure(background=buttons_color_player1)
    select = int(event.widget.winfo_name())
    ttt_map.array[select * 2] = 1.0
    ttt_map.shape = [9, 2]
    print(ttt_map)
    ttt_map.shape = [1, 18]
예제 #15
0
def test(data_shape, w_shape):
    x = tensor.create_gauss(data_shape)
    w = tensor.create_gauss(w_shape)
    b = tensor.create_gauss([w_shape[-1]])

    out_affine = tensor.create_matrix_product(x, w)
    t = tensor.create_zeros(out_affine.shape.copy(), int)
    for i in range(t.shape[0]):
        t.array[i * t.shape[1] + random.randint(0, t.shape[1] - 1)] = 1

    out_sigmoid = out_affine.copy()

    dw1 = w.copy()
    dw2 = w.copy()
    db1 = b.copy()

    dout_sigmoid = out_affine.copy()

    dout1 = x.copy()
    dout2 = x.copy()

    NN.layer.computing.affine_module.forward(x.array, w.array, b.array,
                                             out_affine.array)
    NN.layer.computing.sigmoid_module.forward(out_affine.array,
                                              out_sigmoid.array)
    NN.layer.computing.cross_entropy_module.backward(out_sigmoid.array,
                                                     t.array,
                                                     dout_sigmoid.array)
    NN.layer.computing.sigmoid_module.backward(dout_sigmoid.array,
                                               out_sigmoid.array)
    NN.layer.computing.affine_module.backward(out_sigmoid.array, w.array,
                                              w.shape, dout1.array)
    #NN.layer.computing.affine_module.backward_variables(x.array, out_sigmoid.array, dw1.array, db1.array)
    NN.layer.computing.affine_module.backward_dw(x.array, x.shape,
                                                 out_sigmoid.array, dw1.array,
                                                 dw1.shape)

    for i in range(len(x.array)):
        origen = x.array[i]
        x.array[i] = origen + 0.0001
        NN.layer.computing.affine_module.forward(x.array, w.array, b.array,
                                                 out_affine.array)
        NN.layer.computing.sigmoid_module.forward(out_affine.array,
                                                  out_sigmoid.array)
        out1 = NN.layer.computing.cross_entropy_module.forward(
            out_sigmoid.array, t.array)

        x.array[i] = origen - 0.0001
        NN.layer.computing.affine_module.forward(x.array, w.array, b.array,
                                                 out_affine.array)
        NN.layer.computing.sigmoid_module.forward(out_affine.array,
                                                  out_sigmoid.array)
        out2 = NN.layer.computing.cross_entropy_module.forward(
            out_sigmoid.array, t.array)
        dout2.array[i] = (out1 - out2) / (2 * 0.0001)
        x.array[i] = origen

    for i in range(len(dw2.array)):
        origen = w.array[i]
        w.array[i] = origen + 0.0001
        NN.layer.computing.affine_module.forward(x.array, w.array, b.array,
                                                 out_affine.array)
        NN.layer.computing.sigmoid_module.forward(out_affine.array,
                                                  out_sigmoid.array)
        out1 = NN.layer.computing.cross_entropy_module.forward(
            out_sigmoid.array, t.array)

        w.array[i] = origen - 0.0001
        NN.layer.computing.affine_module.forward(x.array, w.array, b.array,
                                                 out_affine.array)
        NN.layer.computing.sigmoid_module.forward(out_affine.array,
                                                  out_sigmoid.array)
        out2 = NN.layer.computing.cross_entropy_module.forward(
            out_sigmoid.array, t.array)
        dw2.array[i] = (out1 - out2) / (2 * 0.0001)
        w.array[i] = origen
    """
    print("최종 역전파 결과")
    print(dout1)
    print("편미분 최정 결과")
    print(dout2)
    print('최종 w 역전파 결과')
    print(dw1)
    print('최종 w 편미분 결과')
    print(dw2)
    """
    print(tensor.isSame(dout1, dout2))
    print(tensor.isSame(dw1, dw2))
예제 #16
0
def test_layer_with_batchNormalization(data_shape,
                                       layer1_shape,
                                       layer2_shape,
                                       loop_count,
                                       error=0.001):
    tensor_x = tensor.create_randomly(data_shape)
    numpy_x = parseNumpy(tensor_x)

    tensor_t = tensor.create_zeros([data_shape[0], layer2_shape[-1]], int)
    for i in range(data_shape[0]):
        tensor_t.array[i * layer2_shape[-1] +
                       rand.randint(0, layer2_shape[-1] - 1)] = 1

    numpy_t = parseNumpy(tensor_t)

    tensor_w1 = tensor.create_randomly(layer1_shape)
    tensor_b1 = tensor.create_randomly([layer1_shape[-1]])

    tensor_w2 = tensor.create_randomly(layer2_shape)
    tensor_b2 = tensor.create_randomly([layer2_shape[-1]])

    tensor_gamma1 = tensor.create_ones([layer1_shape[-1]])
    tensor_beta1 = tensor.create_zeros([layer1_shape[-1]])

    numpy_w1 = parseNumpy(tensor_w1)
    numpy_b1 = parseNumpy(tensor_b1)

    numpy_w2 = parseNumpy(tensor_w2)
    numpy_b2 = parseNumpy(tensor_b2)

    numpy_gamma1 = parseNumpy(tensor_gamma1)
    numpy_beta1 = parseNumpy(tensor_beta1)

    #layer
    import layer
    tensor_layer = layer.Layers()
    tensor_layer.append_affine(tensor_w1, tensor_b1)
    tensor_layer.append_batchnormalization(tensor_gamma1, tensor_beta1)
    tensor_layer.append_sigmoid()
    tensor_layer.append_affine(tensor_w2, tensor_b2)
    tensor_layer.append_softmax()

    numpy_layers = []
    numpy_layers.append(common.layers.Affine(numpy_w1, numpy_b1))
    numpy_layers.append(
        common.layers.BatchNormalization(numpy_gamma1, numpy_beta1))
    numpy_layers.append(common.layers.Sigmoid())
    numpy_layers.append(common.layers.Affine(numpy_w2, numpy_b2))
    numpy_last_layer = common.layers.SoftmaxWithLoss()

    tensor_layer.set_train_mode(True)

    for i in range(loop_count):
        #forward
        t = time.time()
        tensor_forward = tensor_layer.forward(tensor_x).copy()
        print("tensor forward time : ", time.time() - t)
        t = time.time()
        numpy_X = numpy_x
        for layer in numpy_layers:
            numpy_X = layer.forward(numpy_X)
        numpy_loss = numpy_last_layer.forward(numpy_X, numpy_t)
        numpy_forward = numpy_last_layer.y
        print("numpy forward time : ", time.time() - t)
        #print("pre_batch : ", tensor_layer.layers[0].out)
        #print("batch : ", tensor_layer.layers[1].out)

        #backward
        t = time.time()
        tensor_dout = tensor_layer.backward(tensor_t)
        tensor_loss = tensor_layer.layers[-1].loss
        print("tensor backward time : ", time.time() - t)

        t = time.time()
        numpy_dout = numpy_last_layer.backward(1)
        for i in range(len(numpy_layers)):
            numpy_dout = numpy_layers[-1 - i].backward(numpy_dout)
        print("numpy backward time : ", time.time() - t)

        #update
        t = time.time()
        tensor_layer.update(tensor.Tensor([0.1], [1]))
        print("tensor update time : ", time.time() - t)
        t = time.time()
        numpy_layers[0].W -= 0.1 * numpy_layers[0].dW
        numpy_layers[0].b -= 0.1 * numpy_layers[0].db
        numpy_layers[3].W -= 0.1 * numpy_layers[3].dW
        numpy_layers[3].b -= 0.1 * numpy_layers[3].db
        numpy_layers[1].gamma -= 0.1 * numpy_layers[1].dgamma
        numpy_layers[1].beta -= 0.1 * numpy_layers[1].dbeta
        print("numpy update time : ", time.time() - t)

        print("loss")
        print("tensor : ", tensor_loss)
        print("numpy : ", numpy_loss)
        print("forward")
        print(compare(tensor_forward, numpy_forward, error))
        print("backward")
        print(compare(tensor_dout, numpy_dout, error))

        print("update")
        print("new w1")
        print(compare(tensor_layer.layers[0].W, numpy_layers[0].W, error))
        print("new b1")
        print(compare(tensor_layer.layers[0].b, numpy_layers[0].b, error))
        print("new w2")
        print(compare(tensor_layer.layers[3].W, numpy_layers[3].W, error))
        print("new b2")
        print(compare(tensor_layer.layers[3].b, numpy_layers[3].b, error))
    return tensor_layer, numpy_layers
예제 #17
0
def test_layer(data_shape,
               layer1_shape,
               layer2_shape,
               loop_count,
               error=0.001):
    tensor_x = tensor.create_randomly(data_shape)
    numpy_x = parseNumpy(tensor_x)

    tensor_t = tensor.create_zeros([data_shape[0], layer2_shape[-1]], int)
    for i in range(data_shape[0]):
        tensor_t.array[i * layer2_shape[-1] +
                       rand.randint(0, layer2_shape[-1] - 1)] = 1

    numpy_t = parseNumpy(tensor_t)

    tensor_w1 = tensor.create_randomly(layer1_shape)
    tensor_b1 = tensor.create_randomly([layer1_shape[-1]])

    tensor_w2 = tensor.create_randomly(layer2_shape)
    tensor_b2 = tensor.create_randomly([layer2_shape[-1]])

    numpy_w1 = parseNumpy(tensor_w1)
    numpy_b1 = parseNumpy(tensor_b1)

    numpy_w2 = parseNumpy(tensor_w2)
    numpy_b2 = parseNumpy(tensor_b2)

    #layer
    import layer
    tensor_layer = layer.Layers()
    tensor_layer.append_affine(tensor_w1, tensor_b1)
    tensor_layer.append_relu()
    tensor_layer.append_affine(tensor_w2, tensor_b2)
    tensor_layer.append_softmax()

    numpy_layers = []
    numpy_layers.append(common.layers.Affine(numpy_w1, numpy_b1))
    numpy_layers.append(common.layers.Relu())
    numpy_layers.append(common.layers.Affine(numpy_w2, numpy_b2))
    numpy_last_layer = common.layers.SoftmaxWithLoss()

    for i in range(loop_count):
        #forward
        t = time.time()
        tensor_forward = tensor_layer.forward(tensor_x).copy()
        print("tensor forward time : ", time.time() - t)
        t = time.time()
        numpy_X = numpy_x
        for layer in numpy_layers:
            numpy_X = layer.forward(numpy_X)
        numpy_loss = numpy_last_layer.forward(numpy_X, numpy_t)
        numpy_forward = numpy_last_layer.y
        print("numpy forward time : ", time.time() - t)

        #backward
        t = time.time()
        tensor_dout = tensor_layer.backward(tensor_t)
        tensor_loss = tensor_layer.layers[-1].loss
        print("tensor backward time : ", time.time() - t)

        t = time.time()
        numpy_dout = numpy_last_layer.backward(1)
        for i in range(len(numpy_layers)):
            numpy_dout = numpy_layers[-1 - i].backward(numpy_dout)
        print("numpy backward time : ", time.time() - t)

        #update
        t = time.time()
        tensor_layer.update(tensor.Tensor([0.1], [1]))
        print("tensor update time : ", time.time() - t)
        t = time.time()
        numpy_layers[0].W -= 0.1 * numpy_layers[0].dW
        numpy_layers[0].b -= 0.1 * numpy_layers[0].db
        numpy_layers[2].W -= 0.1 * numpy_layers[2].dW
        numpy_layers[2].b -= 0.1 * numpy_layers[2].db
        print("numpy update time : ", time.time() - t)

        print("loss")
        print("tensor : ", tensor_loss)
        print("numpy : ", numpy_loss)
        print("forward")
        print(compare(tensor_forward, numpy_forward, error))
        print("backward")
        print(compare(tensor_dout, numpy_dout, error))

        print("update")
        print("new w1")
        print(compare(tensor_layer.layers[0].W, numpy_layers[0].W, error))
        print("new b1")
        print(compare(tensor_layer.layers[0].b, numpy_layers[0].b, error))
        print("new w2")
        print(compare(tensor_layer.layers[2].W, numpy_layers[2].W, error))
        print("new b2")
        print(compare(tensor_layer.layers[2].b, numpy_layers[2].b, error))
예제 #18
0
 def append_shift(self):
     """가중치와 편향으로 이동시키는 레이어를 추가합니다."""
     w = tensor.create_ones(self.input)
     b = tensor.create_zeros(self.input)
     return self.append(nn.layer.Shift(w,b))
예제 #19
0
 def initForward(self, x):
     if (self.out.shape[0] != x.shape[0]):
         self.out = x.copy()
         self.dispersion = tensor.create_zeros([len(x.array) // x.shape[0]])
     self.x = x
     return self.out
예제 #20
0
파일: ex1_xor1.py 프로젝트: pirate1111/test
import sys

sys.path.append(__file__.replace('\\NN\\example\\ex1_xor1.py', ''))

import NN as nn

import tensor

# xor 데이터
x = tensor.Tensor([0., 0., 0., 1., 1., 0., 1., 1.], [4, 2])
y = tensor.Tensor([0., 1., 1., 0., 1., 0., 0., 1.], [4, 2])

# 변수 초기화
w1 = tensor.create_gauss([2, 4])
b1 = tensor.create_zeros([4])
w2 = tensor.create_gauss([4, 2])
b2 = tensor.create_zeros([2])

# 네트워크 생성
network = nn.Network()
network.append(nn.layer.Affine(w1, b1))
network.append(nn.layer.Sigmoid())
network.append(nn.layer.Affine(w2, b2))
network.append(nn.layer.Softmax())

optimizer = nn.optimizer.SGD(0.05)

# 초기화
network.initForward(x)
network.initBackward(1, y)