def predict(network, x, mode="INT_MODE"):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    if mode == "FLOAT_MODE":
        a1 = np.dot(x, W1)
        a1 += b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2)
        a2 += b2
        z2 = sigmoid(a2)
        a3 = np.dot(z2, W3)
        a3 += b3
        y = softmax(a3)
    else:
        a1 = np.dot(x, W1)
        a1 = (a1 // P_INT) + b1
        a1 = np.clip(a1, -(1 << (P_INT_BITS + 4)), (1 << (P_INT_BITS + 4)) - 1)
        z1 = sigmoid_int(a1)

        a2 = np.dot(z1, W2)
        a2 = (a2 // P_INT) + b2
        a2 = np.clip(a2, -(1 << (P_INT_BITS + 4)), (1 << (P_INT_BITS + 4)) - 1)
        z2 = sigmoid_int(a2)

        a3 = np.dot(z2, W3)
        a3 = (a3 // P_INT) + b3
        a3 = np.clip(a3, -(1 << (P_INT_BITS + 4)), (1 << (P_INT_BITS + 4)) - 1)
        y = softmax(a3)

    return y, (a1, a2, a3)
def predict(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    #pprint_elm('x', x)
    #pprint_elm('W1', W1)
    #pprint_elm('b1', b1)
    a1 = np.dot(x, W1) + b1
    #pprint_elm('a1', a1)
    z1 = sigmoid(a1)
    #pprint_elm('z1', z1)
    #pprint_elm('W2', W2)
    #pprint_elm('b2', b2)
    a2 = np.dot(z1, W2) + b2
    #pprint_elm('a2', a2)
    z2 = sigmoid(a2)
    #pprint_elm('z2', z2)
    #pprint_elm('W3', W3)
    #pprint_elm('b3', b3)
    a3 = np.dot(z2, W3) + b3
    #pprint_elm('a3', a3)
    y = softmax(a3)
    #pprint_elm('y', y)

    return y
def predict(network, x):
    '''
    推論を行う

    Parameters
    ----------
    network
    x

    Returns
    -------

    '''
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, W1) + b1
    logger.debug(a1)
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = softmax(a3)

    return y
Example #4
0
    def forward(self, x, h_prev, c_prev):
        Wx, Wh, b = self.params
        N, H = h_prev.shape

        # アフィン変換
        A = np.dot(x, Wx) + np.dot(h_prev, Wh) + b

        # slice
        f = A[:, :H]
        g = A[:, H:2 * H]
        i = A[:, 2 * H:3 * H]
        o = A[:, 3 * H:]

        f = sigmoid(f)
        g = np.tanh(g)
        i = sigmoid(i)
        o = sigmoid(o)

        # 次の時刻の記憶セル:cの値を算出
        c_next = f * c_prev + g * i
        # 次の時刻の隠れ状態:hの値を算出
        h_next = o * np.tanh(c_next)

        self.cache = (x, h_prev, c_prev, i, f, g, o, c_next)
        return h_next, c_next
Example #5
0
    def forward(self, x, h_prev):
        # x (N, D) = (バッチサイズ, 1時刻あたりの特徴ベクトルの次元)
        # h_prev (N, H) = (バッチサイズ, 1時刻あたりの隠れベクトルの次元)
        # Wx (D, 3H)
        # Wh (H, 3H)
        # Wx_z (D, H)
        # Wx_r (D, H)
        # Wx_h (D, H)
        # Wh_z (H, H)
        # Wh_r (H, H)
        # Wh_h (H, H)
        Wx, Wh, b = self.params
        H = Wh.shape[0]

        Wxz, Wxr, Wxh = Wx[:, :H], Wx[:, H:2*H], Wx[:, 2*H:]
        Whz, Whr, Whh = Wh[:, :H], Wh[:, H:2*H], Wh[:, 2*H:]
        bz, br, bh = b[:, :H], b[:, H:2*H], b[:, 2*H:]

        z = sigmoid(np.dot(x, Wxz) + np.dot(h_prev, Whz) + bz)
        r = sigmoid(np.dot(x, Wxr) + np.dot(h_prev, Whr) + br)
        h_hat = np.tanh(np.dot(x, Wxh) + np.dot(r*h_prev, Whh) + bh)
        h_next = (1 - z) * h_prev + z * h_hat

        self.cache = (x, h_prev, r, z, h_hat)
        return h_next
Example #6
0
    def forward(self, x, h_prev, c_prev):
        # 순전파
        # 현 시각의 입력 x, 이전 시각의 은닉 상태 h_prev, 이전 시각의 기억셀 c_prev
        Wx, Wh, b = self.params
        N, H = h_prev.shape

        A = np.dot(x, Wx) + np.dot(h_prev, Wh) + b

        # slice  
        # 아핀 변환의 결과 행렬을 균등하게 4조각으로 나눠서 꺼내주는 단순한 노드 
        f = A[:, :H]
        g = A[:, H:2*H]
        i = A[:, 2*H:3*H]
        o = A[:, 3*H:]

        # 활성화 함수 (sigmoid 또는 tanh)
        f = sigmoid(f)
        g = np.tanh(g)
        i = sigmoid(i)
        o = sigmoid(o)

        c_next = f * c_prev + g * i
        h_next = o * np.tanh(c_next)

        self.cache = (x, h_prev, c_prev, i, f, g, o, c_next)
        return h_next, c_next
Example #7
0
  def forward(self, x, h_prev, c_prev):
    Wx, Wh, b = self.params
    N, H = h_prev.shape

    # 4つ分の重みをまとめてアフィン変換
    A = np.dot(x, Wx) + np.dot(h_prev, Wh) + b

    # そしてそれぞれを分割
    f = A[:, :H]
    g = A[:, H:2*H]
    i = A[:, 2*H:3*H]
    o = A[:, 3*H:4*H]

    # ゲート計算
    f = sigmoid(f)
    g = np.tanh(g)
    i = sigmoid(i)
    o = sigmoid(o)

    # 伝播
    c_next = f * c_prev + g * i
    h_next = o * np.tanh(c_next)

    self.cache = (x, h_prev, c_prev, i, f, g, o, c_next)
    return h_next, c_next
Example #8
0
    def predict(self, x):
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']

        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = sigmoid(a2)
        return y
def predict(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']
    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = softmax(a3)
    return y
def predict(network, x):
    W1, W2, W3 = network["W1"], network["W2"], network["W3"]
    b1, b2, b3 = network["b1"], network["b2"], network["b3"]
    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = softmax(a3)
    return y
Example #11
0
def forward(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']
    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = a3

    return y
Example #12
0
def forward(network, x):
    w1, w2, w3 = network['w1'], network['w2'], network['w3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, w1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, w2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, w3) + b3
    y = identity_function(a3)
    
    return y
def predict(network, x):
    #Access the parameters and store them in variables.
    W1, W2, W3 = network["W1"], network["W2"], network["W3"]
    b1, b2, b3 = network["b1"], network["b2"], network["b3"]
    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = softmax(a3)

    return y
Example #14
0
def predict(network, x):
    w1, w2, w3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, w1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, w2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, w3) + b3
    y = softmax(a3)

    return y
Example #15
0
def predict_sigmoid(network, x):
    w1, w2, w3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, w1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, w2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, w3) + b3
    y = softmax(a3)

    return y
Example #16
0
def forword(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = identity_function(a3)

    return y
Example #17
0
    def forward(self, x, h_prev):
        H, H3 = self.Wh.shape
        Wxz, Wxr, Wx = self.Wx[:, :H], self.Wx[:, H:2 * H], self.Wx[:, 2 * H:]
        Whz, Whr, Wh = self.Wh[:, :H], self.Wh[:, H:2 * H], self.Wh[:, 2 * H:]

        z = sigmoid(np.dot(x, Wxz) + np.dot(h_prev, Whz))
        r = sigmoid(np.dot(x, Wxr) + np.dot(h_prev, Whr))
        h_hat = np.tanh(np.dot(x, Wx) + np.dot(r*h_prev, Wh))
        h_next = (1-z) * h_prev + z * h_hat

        self.cache = (x, h_prev, z, r, h_hat)

        return h_next
Example #18
0
def predict(network, x):
    '''模型的推理(前向传播)'''
    W1, W2, W3 = network['W1'], network['W2'], network['W3']  #取出权重参数
    b1, b2, b3 = network['b1'], network['b2'], network['b3']  #取出偏置

    a1 = np.dot(x, W1) + b1  #输入(第0层)到第1层的计算
    z1 = sigmoid(a1)  #使用激活函数非线性化
    a2 = np.dot(z1, W2) + b2  #第1层到第2层的计算
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3  #第2层到输出层
    y = softmax(a3)  #使用softmax处理输出,转换成了概率

    return y
def forward(network, x):
    '''前向传播'''
    W1, W2, W3 = network['W1'], network['W2'], network['W3']  #从字典取出参数
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, W1) + b1  #利用点积进行从输入层到第1层的信号传递
    z1 = sigmoid(a1)  #使用激活函数转换第1层的输出
    a2 = np.dot(z1, W2) + b2  #同上
    z2 = sigmoid(a2)  #同上
    a3 = np.dot(z2, W3) + b3
    y = identity_function(a3)  #这里使用恒等函数,只是为了和上面保持格式一致

    return y  #返回网络的计算结果
def predict(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    # 由于现在使用了batch,因此输入的x就是100x784,而W1是784x50,所以不用转置
    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = softmax(a3)

    return np.argmax(y, axis=1)
Example #21
0
def predict(network, x):  # 예측값을 출력하는 함수
    W1, W2, W3 = network['W1'], network['W2'], network[
        'W3']  # network에서 가져온 가중치를 각 변수마다 할당
    b1, b2, b3 = network['b1'], network['b2'], network[
        'b3']  # network에서 가져온 바이어스를 각 변수마다 할당

    a1 = np.dot(x, W1) + b1  # x와 w1을 내적한 값에 b1을 더하여 a1에 저장
    z1 = sigmoid(a1)  # softmax 함수를 통과한 a1을 z1에 저장
    a2 = np.dot(z1, W2) + b2  # z1과 w2을 내적한 값에 b2을 더하여 a2에 저장
    z2 = sigmoid(a2)  # softmax 함수를 통과한 a2를 z2에 저장
    a3 = np.dot(z2, W3) + b3  # z2와 w3을 내적한 값에 b3을 더하여 a3에 저장
    y = softmax(a3)  # softmax 함수를 통과한 a3을 y에 저장

    return y  # 값을 반환
def predict(network, x):

    w1, w2, w3 = network["W1"], network["W2"], network["W3"]
    b1, b2, b3 = network["b1"], network["b2"], network["b3"]

    a1 = np.dot(x, w1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, w2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, w3) + b3
    z3 = sigmoid(a3)
    y = softmax(a3)

    return y
Example #23
0
    def forward(self, x, h_prev):
        Wx, Wh, b = self.params
        H = Wh.shape[0]
        Wxz, Wxr, Wxh = Wx[:, :H], Wx[:, H:2 * H], Wx[:, 2 * H:]
        Whz, Whr, Whh = Wh[:, :H], Wh[:, H:2 * H], Wh[:, 2 * H:]
        bz, br, bh = b[:H], b[H:2 * H], b[2 * H:]

        z = sigmoid(np.dot(x, Wxz) + np.dot(h_prev, Whz) + bz)
        r = sigmoid(np.dot(x, Wxr) + np.dot(h_prev, Whr) + br)
        h_hat = np.tanh(np.dot(x, Wxh) + np.dot(r*h_prev, Whh) + bh)
        h_next = (1-z) * h_prev + z * h_hat

        self.cache = (x, h_prev, z, r, h_hat)

        return h_next
    def predict(self, x):
        # 1層目
        W1 = self.params['W1']
        b1 = self.params['b1']
        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)

        # 2層目
        W2 = self.params['W2']
        b2 = self.params['b2']
        a2 = np.dot(z1, W2) + b2
        z2 = sigmoid(a2)

        # 出力層
        return softmax(z2)
def forward(network, x):
    print ("##### 順伝播開始 #####")

    W1, W2, W3 = network ['W1'], network ['W2'], network ['W3']
    b1, b2, b3 = network ['b1'], network ['b2'], network ['b3']

    # 隠れ層の総入力
    u1 = np.dot (x, W1) + b1
    # 隠れ層1の総出力
    z1 = functions.relu (u1)
    # 隠れ層2層への総入力
    u2 = np.dot (z1, W2) + b2
    # 隠れ層2の出力
    z2 = functions.relu (u2)

    u3 = np.dot (z2, W3) + b3
    z3 = functions.sigmoid (u3)
    y = z3
    print_vec ("総入力1", u1)
    print_vec ("中間層出力1", z1)
    print_vec ("総入力2", u2)
    print_vec ("出力1", y)
    print ("出力合計: " + str (np.sum (y)))

    return y, z1
Example #26
0
def predict(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    # a1 = np.dot(x, W1) + b1
    # z1 = sigmoid(a1)
    # a2 = np.dot(z1, W2) + b2
    # z2 = sigmoid(a2)
    # a3 = np.dot(z2, W3) + b3
    # y = softmax(a3)

    z1 = sigmoid(np.dot(x,  W1) + b1)
    z2 = sigmoid(np.dot(z1, W2) + b2)
    y  = softmax(np.dot(z2, W3) + b3)

    return y
Example #27
0
    def forward(self, x, t):
        self.t = t
        self.y = sigmoid(x)

        y = np.c_[1.0 - self.y, self.y]
        loss = cross_entropy_error(y, self.t)
        return loss
Example #28
0
def predict(network, x):
    B1, B2, B3 = network['b1'], network['b2'], network['b3']
    W1, W2, W3 = network['W1'], network['W2'], network['W3']

    a1 = np.dot(x, W1) + B1
    z1 = sigmoid(a1)
    #    print(z1.shape)

    a2 = np.dot(z1, W2) + B2
    z2 = sigmoid(a2)

    a3 = np.dot(z2, W3) + B3

    y = softmax(a3)

    return y
Example #29
0
    def CompletePlot(self,
                     time=0,
                     save=False,
                     start_time=9,
                     path='~/Data/ShadeRatio/Machida3h/tmp'):
        xyt = np.empty((0, self.dimension + 1), float)
        for x in self.xgrid:
            for y in self.ygrid:
                xyt = np.append(xyt, np.array([[x, y, time]]), axis=0)

        self.LayerBuild()

        q = np.array([self.predict(x) \
                      for x in xyt])
        z = sigmoid(self.a * q + self.b)
        zgrid = z.reshape(self.grid[0], self.grid[1])

        sns.heatmap(zgrid,
                    annot=False,
                    vmin=0,
                    vmax=1,
                    fmt='g',
                    cmap='YlGnBu_r')
        if (save == True):
            sns.plt.savefig(path + str(start_time) + str(2.5 * time) + '.png')

        sns.plt.show()
Example #30
0
    def Plot(self, frame=np.arange(5), axtype='contourf'):
        self.LayerBuild()

        q = np.array([self.predict(x) \
                      for x in self.xyt])

        frame_mask = np.array([self.xyt[:, 2] == f \
                               for f in self.frame])
        x = self.xyt[frame_mask[0], 0]
        y = self.xyt[frame_mask[0], 1]
        Z = sigmoid(self.a * q + self.b)

        xgrid = x.reshape(self.grid[0], self.grid[1])
        ygrid = y.reshape(self.grid[0], self.grid[1])

        sns.set_palette('YlGnBu_r')

        for f in frame:
            fig = plt.figure()
            ax = Axes3D(fig)
            z = Z[frame_mask[f]]
            zgrid = z.reshape(self.grid[0], self.grid[1])
            if (axtype == 'wireframe'): ax.plot_wireframe(x, y, z)
            elif (axtype == 'contour'): ax.contour3D(xgrid, ygrid, zgrid)
            elif (axtype == 'contourf'): ax.contourf3D(xgrid, ygrid, zgrid)
            plt.show()
Example #31
0
def predict(network, x):
    w1, w2, w3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    # 네트워크 구조
    ## 입력층: 784개 뉴런
    ## 첫 번째 은닉층: 50개 뉴런, sigmoid
    ## 두 번째 은닉층: 100개 뉴런, sigmoid
    ## 출력층: 10개 뉴런, softmax
    a1 = np.dot(x, w1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, w2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, w3) + b3
    y = softmax(a3)

    return y