コード例 #1
0
def nn_predict(nn, batch_x):
    batch_x = batch_x.T
    m = batch_x.shape[1]
    nn.a[0] = batch_x
    for k in range(1, nn.depth):
        y = np.dot(nn.W[k - 1], nn.a[k - 1]) + np.tile(nn.b[k - 1], (1, m))
        if nn.batch_normalization:
            y = (y - np.tile(nn.E[k - 1], (1, m))) / np.tile(
                nn.S[k - 1] + 0.0001 * np.ones(nn.S[k - 1].shape), (1, m))
            y = nn.Gamma[k - 1] * y + nn.Beta[k - 1]

        if k == nn.depth - 1:
            f = nn.output_function
            if f == 'sigmoid':
                nn.a[k] = sigmoid(y)
            elif f == 'tanh':
                nn.a[k] = np.tanh(y)
            elif f == 'relu':
                nn.a[k] = np.maximum(y, 0)
            elif f == 'softmax':
                nn.a[k] = softmax(y)

        else:
            f = nn.active_function
            if f == 'sigmoid':
                nn.a[k] = sigmoid(y)
            elif f == 'tanh':
                nn.a[k] = np.tanh(y)
            elif f == 'relu':
                nn.a[k] = np.maximum(y, 0)

    return nn
コード例 #2
0
ファイル: rnnlm_gen.py プロジェクト: RyoJ/wordart
    def generate(self, start_id, skip_ids=None, sample_size=500):
        word_ids = [start_id]

        x = start_id
        while len(word_ids) < sample_size:
            x = np.array(x).reshape(1, 1)
            '''if fl.pas(12):
                print('score.in.x:{}'.format(x))
                print('score.in.x:{}'.format(x.shape))
            #(1,1)=[[316]]'''
            score = self.predict(x)
            #if fl.pas(12):
             #   print('score:{}'.format(score.shape))
              #  print('score:{}'.format(score))
            p = softmax(score.flatten())
            #if fl.pas(13):
             #   print('score.flatten:{}'.format(score.flatten().shape))
              #  print('score.flatten:{}'.format(score.flatten()))
               # print('p:{}'.format(p.shape))
                #print('p:{}'.format(p))
            sampled = np.random.choice(len(p), size=1, p=p)
            #print('sampled:{}'.format(sampled))

            if (skip_ids is None) or (sampled not in skip_ids):
                x = sampled
                word_ids.append(int(x))

        return word_ids
コード例 #3
0
 def test(self, inputs, labels):
     input_layer = np.dot(inputs, self.weight1)
     hidden_layer = function.relu(input_layer + self.bias1)
     scores = np.dot(hidden_layer, self.weight2) + self.bias2
     probs = function.softmax(scores)
     acc = float(np.sum(np.argmax(probs, 1) == labels)) / float(len(labels))
     print('Test accuracy: {:.2f}%').format(acc * 100)
コード例 #4
0
    def predict(self, x):  # x 가들어가면 현재 가지고 있는 weight 와 bias 값을 가지고 y(output)을 예측한다.
        # x(input layer) -> hidden layer
        z2 = np.dot(x, self.params['W1']) + self.params['b1']
        a2 = sigmoid(z2)  # 계산 값에 sigmoid 함수를 적용한다.
        # hidden layer -> y(output layer)
        z3 = np.dot(a2, self.params['W2']) + self.params['b2']
        y = softmax(z3)  # y값에 softmax 함수를 적용하여 확룰값으로 바꾼다.

        return y  # (10,3)
コード例 #5
0
def predict(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, W1) + b1
    z1 = function.sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = function.sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = function.softmax(a3)
    return y
コード例 #6
0
    def predict(self, X_test, y_test):  # 새로운 input 들어오면 예측
        z = np.dot(X_test, self.W)  # m(data 수)개의 (W^T)*X
        hx = sigmoid(z)  # (m,t) data, 예측 값을 sigmoid 연산을 통해 0~1 사이의 값으로 변환한다.
        sx = np.array([softmax(hx[i]) for i in range(hx.shape[0])
                       ])  # 예측값을 softmax 함수를 통해 확률값으로 바꿔준다.

        cnt = 0  # 실험값이 실제값과 얼마나 같은지 개수를 저장하는 변수
        for i in range(sx.shape[0]):  # test data 개수 만큼 반복한다.
            max_index = np.argmax(sx[i])  # 가장 큰 확률의 index 가 몇번째 인지 찾는다.
            if y_test[i][max_index] == 1:  # index 와 target 이 같은지 확인한다
                cnt += 1  # 같으면 1증가
        print("Accuracy: ", cnt / hx.shape[0])  # 몇퍼센트 만큼 적중에 성공했는지 출력
コード例 #7
0
    def predict(self, x):
        w1, w2, w3 = self.network['W1'], self.network['W2'], self.network['W3']
        b1, b2, b3 = self.network['b1'], self.network['b2'], self.network['b3']

        a1 = np.dot(x, w1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, w2) + b2
        z2 = sigmoid(a2)
        a3 = np.dot(z2, w3) + b3
        y = softmax(a3)

        return y
コード例 #8
0
    def train(self, inputs, labels):

        for epoch in range(self.num_epochs):  # training begin
            iteration = 0
            while iteration < len(inputs):

                # batch input
                inputs_batch = inputs[iteration:iteration + self.batch_size]
                labels_batch = labels[iteration:iteration + self.batch_size]

                # forward pass
                z1 = np.dot(inputs_batch, self.weight1) + self.bias1
                a1 = function.relu(z1)
                z2 = np.dot(a1, self.weight2) + self.bias2
                y = function.softmax(z2)

                # calculate loss
                loss = function.cross_entropy(y, labels_batch)
                loss += function.L2_regularization(0.01, self.weight1,
                                                   self.weight2)  #lambda
                self.loss.append(loss)

                # backward pass
                delta_y = (y - labels_batch) / y.shape[0]
                delta_hidden_layer = np.dot(delta_y, self.weight2.T)
                delta_hidden_layer[a1 <= 0] = 0  # derivatives of relu

                # backpropagation
                weight2_gradient = np.dot(a1.T, delta_y)  # forward * backward
                bias2_gradient = np.sum(delta_y, axis=0, keepdims=True)

                weight1_gradient = np.dot(inputs_batch.T, delta_hidden_layer)
                bias1_gradient = np.sum(delta_hidden_layer,
                                        axis=0,
                                        keepdims=True)

                # L2 regularization
                weight2_gradient += 0.01 * self.weight2
                weight1_gradient += 0.01 * self.weight1

                # stochastic gradient descent
                self.weight1 -= self.learning_rate * weight1_gradient  #update weight and bias
                self.bias1 -= self.learning_rate * bias1_gradient
                self.weight2 -= self.learning_rate * weight2_gradient
                self.bias2 -= self.learning_rate * bias2_gradient

                print('=== Epoch: {:d}/{:d}\tIteration:{:d}\tLoss: {:.2f} ==='
                      ).format(epoch + 1, self.num_epochs, iteration + 1, loss)
                iteration += self.batch_size
        '''
コード例 #9
0
ファイル: mnist.py プロジェクト: ayusher/mnist-ga
    def evaluate(self, board):

        z1 = np.dot(board, self.weights[0]) + self.biases[0]
        a1 = function.relu(z1)
        z2 = np.dot(a1, self.weights[1]) + self.biases[1]
        last_layer = function.softmax(z2)[0]
        #print("Evaluated")
        if sum(last_layer)==0: per = 0
        else: per = max(last_layer)/sum(last_layer)
        if per == 1: per = .999
        if per >= 1: print("Error in evaluation")
        if per<0: print("Negative throughput")
        #print(np.argmax(last_layer)+per)
        return np.argmax(last_layer)+per
コード例 #10
0
ファイル: network.py プロジェクト: yamatakudesu/enshu
    def forprop(self, x):
        #input:入力層ユニット(mnistなら大きさ784の配列)
        #output:活性化前後のデータをlayer数まとめたもの
        B = self.B
        W = self.W
        L = self.layers
        Z = [np.array([]) for l in range(L)]
        U = [np.array([]) for l in range(L)]
        Z[0] = x

        #出力層以外の活性化関数をrelu
        for l in range(1, L - 1):
            U[l - 1] = np.dot(W[l - 1].T, Z[l - 1]) + B[l - 1]
            Z[l] = relu(U[l - 1])

        #出力層での活性化関数をsoftmax
        U[L - 2] = np.dot(W[L - 2].T, Z[L - 2]) + B[L - 2]
        Z[L - 1] = softmax(U[L - 2])

        return U, Z
コード例 #11
0
    def loss(self, x, t):
        z = self.predict(x)
        y = softmax(z)
        loss = cross_entropy_error(y, t)

        return loss
コード例 #12
0
seq.add(link.BatchNormalization(64))
seq.add(function.Activation("relu"))
seq.add(link.Convolution2D(64, 128, ksize=4, stride=2, pad=0))
seq.add(link.BatchNormalization(128))
seq.add(function.Activation("relu"))
seq.add(link.Convolution2D(128, 256, ksize=4, stride=2, pad=0))
seq.add(link.BatchNormalization(256))
seq.add(function.Activation("relu"))
seq.add(link.Convolution2D(256, 512, ksize=4, stride=2, pad=0))
seq.add(link.BatchNormalization(512))
seq.add(function.Activation("relu"))
seq.add(link.Convolution2D(512, 1024, ksize=4, stride=2, pad=0))
seq.add(link.BatchNormalization(1024))
seq.add(function.Activation("relu"))
seq.add(link.Linear(None, 10, use_weightnorm=True))
seq.add(function.softmax())
seq.build()

y = seq(x)
print y.data.shape

# Deconv test
x = np.random.normal(scale=1, size=(2, 100)).astype(np.float32)
x = Variable(x)

image_size = 96
# compute projection width
input_size = util.get_in_size_of_deconv_layers(image_size,
                                               num_layers=3,
                                               ksize=4,
                                               stride=2)
コード例 #13
0
with open('weights.pkl', 'rb') as handle:
    b = pickle.load(handle)

weight1 = b[0]
bias1 = b[1]
weight2 = b[2]
bias2 = b[3]

num = 0

while num < test_images.shape[0]:
    input_layer = np.dot(x_test[num:num + 1], weight1)
    hidden_layer = function.relu(input_layer + bias1)
    scores = np.dot(hidden_layer, weight2) + bias2
    probs = function.softmax(scores)
    predict = np.argmax(probs)

    img = np.zeros([28, 28, 3])

    img[:, :, 0] = test_images[num]
    img[:, :, 1] = test_images[num]
    img[:, :, 2] = test_images[num]

    resized_image = cv2.resize(img, (100, 100))
    cv2.putText(resized_image, str(predict), (5, 20), cv2.FONT_HERSHEY_DUPLEX,
                .7, (0, 255, 0), 1)
    cv2.imshow('input', resized_image)
    k = cv2.waitKey(0)
    if k == 27:  # Esc key to stop
        break
コード例 #14
0
def nn_forward(nn, batch_x, batch_y):
    s = len(nn.cost) + 1
    batch_x = batch_x.T
    batch_y = batch_y.T
    m = batch_x.shape[1]
    nn.a[0] = batch_x

    cost2 = 0
    for k in range(1, nn.depth):
        y = np.dot(nn.W[k - 1], nn.a[k - 1]) + np.tile(
            nn.b[k - 1], (1, m))  #np.tile就是matlab中的repmat(replicate matrix)

        if nn.batch_normalization:
            nn.E[k -
                 1] = nn.E[k - 1] * nn.vecNum + np.array([np.sum(y, axis=1)]).T
            nn.S[k - 1] = nn.S[k - 1]**2 * (nn.vecNum - 1) + np.array(
                [(m - 1) * np.std(y, ddof=1, axis=1)**2]).T  #ddof=1计算无偏估计
            nn.vecNum = nn.vecNum + m
            nn.E[k - 1] = nn.E[k - 1] / nn.vecNum
            nn.S[k - 1] = np.sqrt(nn.S[k - 1] / (nn.vecNum - 1))
            y = (y - np.tile(nn.E[k - 1], (1, m))) / np.tile(
                nn.S[k - 1] + 0.0001 * np.ones(nn.S[k - 1].shape), (1, m))
            y = nn.Gamma[k - 1] * y + nn.Beta[k - 1]

        if k == nn.depth - 1:
            f = nn.output_function
            if f == 'sigmoid':
                nn.a[k] = sigmoid(y)
            elif f == 'tanh':
                nn.a[k] = np.tanh(y)
            elif f == 'relu':
                nn.a[k] = np.maximum(y, 0)
            elif f == 'softmax':
                nn.a[k] = softmax(y)

        else:
            f = nn.active_function
            if f == 'sigmoid':
                nn.a[k] = sigmoid(y)
            elif f == 'tanh':
                nn.a[k] = np.tanh(y)
            elif f == 'relu':
                nn.a[k] = np.maximum(y, 0)

        cost2 = cost2 + np.sum(nn.W[k - 1]**2)

    if nn.encoder == 1:
        roj = np.sum(nn.a[2], axis=1) / m
        nn.cost[s] = 0.5 * np.sum(
            (nn.a[k] - batch_y)**
            2) / m + 0.5 * nn.weight_decay * cost2 + 3 * sum(
                nn.sparsity * np.log(nn.sparsity / roj) +
                (1 - nn.sparsity) * np.log((1 - nn.sparsity) / (1 - roj)))
    else:
        if nn.objective_function == 'MSE':
            nn.cost[s] = 0.5 / m * sum(sum(
                (nn.a[k] - batch_y)**2)) + 0.5 * nn.weight_decay * cost2
        elif nn.objective_function == 'Cross Entropy':
            nn.cost[s] = -0.5 * sum(sum(
                batch_y * np.log(nn.a[k]))) / m + 0.5 * nn.weight_decay * cost2
    # nn.cost[s]

    return nn
コード例 #15
0
ファイル: layer.py プロジェクト: FrankLeeC/nlp
 def call(self, v):
     return fn.softmax(np.dot(self.w, v) + self.b)
コード例 #16
0
    def forward(self, x, t):
        self.t = t
        self.y = function.softmax(x)
        self.loss = loss.cross_entropy_error(self.y, self.t)

        return self.loss
コード例 #17
0
ファイル: SoftmaxWithLoss.py プロジェクト: kaito0223/shakyou
 def forword(self , x, t):
     self.t = t
     self.y = softmax(x)
     self.loss = cross_entropy_error(self.y, self.t)
     return self.loss
コード例 #18
0
x1 = float(input("x1:"))
x2 = float(input("x2:"))

X = np.array([x1, x2])
W1 = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])
B1 = np.array([0.1, 0.2, 0.3])

A1 = np.dot(X, W1) + B1
#입력과 가중치를 곱하고 편향을 더함

Z1 = func.sigmoid(A1)
print(A1)
print(Z1)

W2 = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])
B2 = np.array([0.1, 0.2])

A2 = np.dot(Z1, W2) + B2
Z2 = func.sigmoid(A2)
print(A2)
print(Z2)
#은닉층 2

W3 = np.array([[0.1, 0.2], [0.4, 0.2]])
B3 = np.array([0.2, 0.1])

A3 = np.dot(Z2, W3) + B3
Y = func.softmax(A3)  #sigma함수 == 소프트맥스 함수로

print(Y)
print(np.sum(Y))
コード例 #19
0
#10クラス分類でクラス2の教師信号なら[0,1,0,0,0,0,0,0,0,0]となる
def one_of_k(data):
    labels = np.zeros((len(data), 10))
    for i in range(len(data)):
        labels[i][data[i]] = 1

    return labels


train_labels = one_of_k(train_labels)
test_labels = one_of_k(test_labels)

w = np.random.normal(0, 0.1, (784, 10))
learning_rate = 0.01

for x, t in zip(train_images, train_labels):
    p = softmax(np.dot(w.T, x))
    x = np.reshape(x, (784, 1))
    q = np.reshape(p.T - t, (1, 10))
    w -= learning_rate * np.dot(x, q)

acc = 0.0
for x, t in zip(test_images, test_labels):
    p = softmax(np.dot(w.T, x))
    if t[np.argmax(p)] == 1:
        acc += 1.0
acc /= len(test_images)

print(acc)
コード例 #20
0
    def get_max(self, board):
        self.nodes += 1

        state1 = np.reshape(board, [1, self.board_size * self.board_size])
        depth_count = 0
        for i in state1[0]:
            if i != "_":
                depth_count += 1
# =============================================================================
#         print(board)
#         print(depth_count)
# =============================================================================
# =============================================================================
#         if depth_count-1 >=self.depth:
#             self.depth = depth_count-1
# =============================================================================
        hash_state = function.convert(board)
        if hash_state in self.memory:
            return self.memory[hash_state]
        if depth_count - 1 < self.limit:

            s = function.score(board, self.symbol, self.other_symbol)
            next_action = -1
            if s != 0:
                next_action = -1
                self.memory[hash_state] = (s, next_action)
            elif s == 0 and "_" not in board:
                next_action = -1
                self.memory[hash_state] = (s, next_action)
            else:
                max_value = 0

                for i in range(self.board_size * self.board_size):
                    coordinate = function.get_choice(i, self.board_size)
                    if board[coordinate[0]][coordinate[1]] == "_":
                        theta, possible_choice = self.available_area(board, i)
                        expect_value = 0.
                        for action in possible_choice:
                            children = cp.deepcopy(board)
                            sub_coordinate = function.get_choice(
                                action, self.board_size)
                            children[sub_coordinate[0]][
                                sub_coordinate[1]] = self.symbol
                            value, _ = self.get_min(children)
                            expect_value += value / theta

                        if expect_value > max_value or next_action == -1:
                            max_value = expect_value
                            #print(expect_value)
                            next_action = i

                            if max_value == 1:
                                self.memory[hash_state] = (max_value,
                                                           next_action)
                                return max_value, next_action
                        self.memory[hash_state] = (max_value, next_action)

            return self.memory[hash_state]

        else:

            state = cp.deepcopy(board)
            s = function.score(state, self.symbol, self.other_symbol)
            next_action = -1
            if s != 0:
                next_action = -1
                self.memory[hash_state] = (s, next_action)
            elif s == 0 and "_" not in board:
                next_action = -1
                self.memory[hash_state] = (s, next_action)

            while True:

                state = self.agent.move(state)
                s = function.score(state, self.symbol, self.other_symbol)
                if s == 1:
                    self.agent.fallback(state, s)
                    break
                if s == 0 and "_" not in state:
                    self.agent.fallback(state, s)
                    break
                if "_" in state:
                    playerx, playery = random.randint(
                        0, self.board_size - 1), random.randint(
                            0, self.board_size - 1)
                    statep = function.move(state, playerx, playery,
                                           self.other_symbol)
                    while statep is False:
                        playerx, playery = random.randint(
                            0, self.board_size - 1), random.randint(
                                0, self.board_size - 1)
                        statep = function.move(state, playerx, playery,
                                               self.other_symbol)
                    state = statep
                s = function.score(state, self.symbol, self.other_symbol)
                if s == -1:
                    self.agent.fallback(state, s)
                    break
                if s == 0 and "_" not in state:
                    self.agent.fallback(state, s)
                    break
            action = self.agent.history[0]
            softmax_value = function.softmax(self.agent.value[0])
            max_value = np.max(softmax_value)

            self.memory[hash_state] = (max_value, action)
            self.agent.clean()
        return self.memory[hash_state]
コード例 #21
0
 def forward(self, X):
     Z = np.tanh(X.dot(self.W1) + self.b1)
     return softmax(Z.dot(self.W2) + self.b2), Z