Example #1
0
def nn_predict(nn, batch_x):
    batch_x = batch_x.T
    m = batch_x.shape[1]
    nn.a[0] = batch_x
    for k in range(1, nn.depth):
        y = np.dot(nn.W[k - 1], nn.a[k - 1]) + np.tile(nn.b[k - 1], (1, m))
        if nn.batch_normalization:
            y = (y - np.tile(nn.E[k - 1], (1, m))) / np.tile(
                nn.S[k - 1] + 0.0001 * np.ones(nn.S[k - 1].shape), (1, m))
            y = nn.Gamma[k - 1] * y + nn.Beta[k - 1]

        if k == nn.depth - 1:
            f = nn.output_function
            if f == 'sigmoid':
                nn.a[k] = sigmoid(y)
            elif f == 'tanh':
                nn.a[k] = np.tanh(y)
            elif f == 'relu':
                nn.a[k] = np.maximum(y, 0)
            elif f == 'softmax':
                nn.a[k] = softmax(y)

        else:
            f = nn.active_function
            if f == 'sigmoid':
                nn.a[k] = sigmoid(y)
            elif f == 'tanh':
                nn.a[k] = np.tanh(y)
            elif f == 'relu':
                nn.a[k] = np.maximum(y, 0)

    return nn
Example #2
0
def useFunction(data, function_number, beta):
    if (function_number == "1"):
        return function.sigmoid(data)
    elif (function_number == "2"):
        return function.hyperbolicTangent(data)
    elif (function_number == "3"):
        return function.unitStep(data, beta)
    elif (function_number == "4"):
        return function.sigmoid(data, beta)
def predict(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, W1) + b1
    z1 = function.sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = function.sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = function.softmax(a3)
    return y
Example #4
0
 def test_sigmoid(self):
     x1 = np.array([-1, 0, 1])
     expected1 = [0.2689414213699951, 0.5, 0.7310585786300049]
     actual1 = sigmoid(x1).tolist()
     self.assertEqual(expected1, actual1)
     expected2 = repr(sigmoid(100))
     actual2 = repr(sigmoid(10))
     self.assertEqual(expected2, actual2)
     expected3 = repr(sigmoid(-100))
     actual3 = repr(sigmoid(-10))
     self.assertEqual(expected3, actual3)
Example #5
0
    def predict(self, x):
        w1, w2, w3 = self.network['W1'], self.network['W2'], self.network['W3']
        b1, b2, b3 = self.network['b1'], self.network['b2'], self.network['b3']

        a1 = np.dot(x, w1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, w2) + b2
        z2 = sigmoid(a2)
        a3 = np.dot(z2, w3) + b3
        y = softmax(a3)

        return y
Example #6
0
def main(n_aggregation, dim_feature, n_epochs, batch_size, eps):
    W = np.random.normal(0, 0.4, [dim_feature, dim_feature])
    A = np.random.normal(0, 0.4, dim_feature)
    b = np.array([0.])
    model = GraphNeuralNetwork(W, A, b, n_aggregation=n_aggregation)
    optimizer = Adam(model)

    dataset = util.get_train_data('../../datasets')
    train_data, valid_data = util.random_split(dataset, train_ratio=0.5)
    print('train_size: %d, valid_size: %d' %
          (len(train_data), len(valid_data)))

    for epoch in range(n_epochs):
        train_loss = util.AverageMeter()
        train_acc = util.AverageMeter()
        for graphs, labels in util.get_shuffled_batches(
                train_data, batch_size):
            grads_flat = 0
            for graph, label in zip(graphs, labels):
                x = np.zeros([len(graph), dim_feature])
                x[:, 0] = 1
                grads_flat += calc_grads(model, graph, x, label,
                                         bce_with_logit, eps) / batch_size

                outputs = model(graph, x)
                train_loss.update(bce_with_logit(outputs, label), 1)
                train_acc.update((sigmoid(outputs) > 0.5) == label, 1)

            optimizer.update(grads_flat)

        valid_loss, valid_acc = test(model, valid_data, dim_feature)
        print(
            'epoch: %d, train_loss: %f, train_acc: %f, valid_loss: %f, vald_acc: %f'
            % (epoch, train_loss.avg, train_acc.avg, valid_loss, valid_acc))
Example #7
0
File: sgd.py Project: uczlgx0/MLR
    def cal_derivative(self, W_w, W_u, item):
        """
        calculate derivative only with l2 norm
        :param weight:
        :return:
        """
        label, featureDic = item
        dir_W = []
        dir_U = []
        temp_eux = []
        temp_sywx = []
        sum_eux = 0.0
        sum_eux_sywx = 0.0
        for i in range(self.M):
            #get all the temp exp(uj * x) and sigmoid(y * wj * x)
            #and get sum at the same time
            eux = np.exp(fc.dotProduct(W_u[i], featureDic))
            sywx = fc.sigmoid(label * fc.dotProduct(W_w[i], featureDic))
            temp_eux.append(eux)
            temp_sywx.append(sywx)
            sum_eux += eux
            sum_eux_sywx += eux * sywx
        for i in range(self.M):
            #calculate array uj and array wj
            dir_w = {}
            dir_u = {}
            for index in featureDic:
                dir_u[index] = temp_eux[i] * featureDic[index] / sum_eux - \
                    temp_eux[i] * temp_sywx[i] * featureDic[index] / sum_eux_sywx
                dir_w[index] = label * temp_sywx[i] * (
                    temp_sywx[i] - 1) * featureDic[index] / sum_eux_sywx
            dir_W.append(dir_w)
            dir_U.append(dir_u)

        return dir_W, dir_U
    def learn(self, epoch, learning_rate
              ):  # epoch 학습 반복횟수, learning rate 를 입력받아 초기화된 train data 로 학습시킨다
        cost_list = list()  # cost 변화 그래프에 값을 주기 위해 cost 를 저장하는 list
        for epo in range(epoch):  # epoch 만큼 반복하여 학습한다.
            z = np.dot(self.X, self.W)  # m*t(data*label 수)개의 (W^T)*X
            hx = sigmoid(
                z)  # (m,t) data, 예측 값을 sigmoid 연산을 통해 0~1 사이의 값으로 변환한다.
            for i in range(
                    self.W.shape[0]
            ):  # n(feature)번 반복문 돌며 각 weight 에 대하여 cost 를 편미분한 값을 빼서 학습한다.
                xj = self.X[:, i]  # (m,) 편미분을 위해 j번째 x값을 data 수 만큼 잘라가져온다.
                # xxj = xj.reshape(xj.shape[0], 1)  # (113,1)
                # hx-self.y (m,t) -> (t, m) transpose 하여 (m,) 과 dot 연산을 통해 (t,) data 로 합쳐 편미분 값을 구하여 빼준다.
                self.W[i] -= learning_rate * (np.dot(np.transpose(hx - self.y),
                                                     xj))
            cost_list.append(list(self.cost(hx,
                                            self.y)))  # cost 값을 list 에 추가해준다.
            print("epoch: ", epo, " cost: ",
                  cost_list[epo])  # 몇번째 학습인지, cost 는 몇인지 매 학습 때 마다 출력한다.
        cost_list = np.array(cost_list)

        for i in range(
                cost_list.shape[1]):  # 10개의 target 에 대한 cost 값을 그래프로 표시한다.
            plt.plot(np.arange(0, epoch),
                     cost_list[:, i],
                     label=("target class " + str(i)))
        plt.xlabel("number of iterations")  # x축의 이름 설정
        plt.ylabel("cost")  # y축의 이름 설정
        plt.legend()
        plt.show()  # 그래프를 보여준다.
Example #9
0
def visualize_sigmoid():
    x = np.arange(-5.0, 5.0, 0.1)
    y = sigmoid(x)

    plt.plot(x, y)
    plt.ylim(-0.1, 1.1)
    plt.title('sigmoid')
    plt.show()
Example #10
0
def train_rbm(visible, b, c, w, iterr_rbm, mode, epsilon, alpha, x_test, dataset_size, cd, f):
    ind=[]
    e=[]
    E=[]
    fe=[]
    d=0
    g=0
    
    t1=0
    t2=0
    iterr_rbm=iterr_rbm//cd
    for i in range(iterr_rbm):
        visible_batch=visible[:,d:d+32]
        #visible_batch=visible_batch.reshape(visible.shape[0],1)
        g+=1
        if g==32:
            d+=32
            g=0
        if d>dataset_size-34:
            d=0
        actualise_weight(visible_batch, b, c, w, cd, epsilon, alpha, mode)
        if i%(iterr_rbm//20)==0:
            ind.append(i)
            if mode[0]==0:
                tt=time.time()
                e.append(rbmv.energy_grbm(x_test, b, c, w))
                E.append(rbmv.energy_grbm(visible, b, c, w))
                t1+=time.time()-tt
                tt=time.time()
                fe.append(rbmv.pseudo_likelihood_grbm(x_test, b, c, w, f))
                #fE.append(rbmv.pseudo_likelihood_grbm(visible, b, c, w, 3))
                t2+=time.time()-tt
            else:
                tt=time.time()
                e.append(rbmv.energy_rbm(x_test, b, c, w))
                E.append(rbmv.energy_rbm(visible, b, c, w))
                t1+=time.time()-tt
                tt=time.time()
                fe.append(rbmv.pseudo_likelihood_rbm(x_test, b, c, w, f))
                #fE.append(rbmv.pseudo_likelihood_rbm(visible, b, c, w, 3))
                t2+=time.time()-tt   
    plt.hist(np.mean(func.sigmoid(c+w.dot(visible)), axis=1), bins=30)
    plt.title("mean neurons activation")
    plt.legend()
    plt.show() 
    plt.plot(ind, e,label="test set")
    plt.plot(ind, E, label="training set")
    plt.legend(loc='upper right')
    if mode[0] == 1:
        plt.yscale('symlog')
    plt.show()
    plt.plot(ind, fe, label="free energy test set")
    #plt.plot(ind, fE, label="free energy training set")
    plt.legend(loc='lower right')
    plt.show()
    print("free-energy", fe[len(fe)-1])
    print("energy time and pseudo likelihood time ", t1, t2)
    return [ind,fe]
Example #11
0
    def predict(self, x):  # x 가들어가면 현재 가지고 있는 weight 와 bias 값을 가지고 y(output)을 예측한다.
        # x(input layer) -> hidden layer
        z2 = np.dot(x, self.params['W1']) + self.params['b1']
        a2 = sigmoid(z2)  # 계산 값에 sigmoid 함수를 적용한다.
        # hidden layer -> y(output layer)
        z3 = np.dot(a2, self.params['W2']) + self.params['b2']
        y = softmax(z3)  # y값에 softmax 함수를 적용하여 확룰값으로 바꾼다.

        return y  # (10,3)
 def predict(self, X_test, y_test):  # 새로운 input 들어오면 예측
     z = np.dot(X_test, self.W)  # m(data 수)개의 (W^T)*X
     hx = sigmoid(z)  # (m,) data, 예측 값을 sigmoid 연산을 통해 0~1 사이의 값으로 변환한다.
     cnt = 0  # 실험값이 실제값과 얼마나 같은지 개수를 저장하는 변수
     for i in range(hx.shape[0]):
         # target 이 맞은걸 맞다고 예측하거나, 틀린걸 틀리다고 예측하였는지 실제 값과 비교한다.
         if (hx[i] > 0.5 and y_test[i] == 1) or (hx[i] <= 0.5
                                                 and y_test[i] == 0):
             cnt += 1  # 맞으면 1증가
     print("Accuracy: ", cnt / hx.shape[0])  # 몇퍼센트 만큼 적중에 성공했는지 출력
Example #13
0
 def forward(self, inputs):
     y = self.conv1.forward(inputs)
     y = self.max_pool1.forward(y)
     y = self.conv2.forward(y)
     y = self.max_pool2.forward(y)
     y = y.reshape(y.shape[0], -1)
     y = self.fc1.forward(y)
     y = F.sigmoid(y)
     self.cache = y
     return y
 def forward(self, inputs, train=True):
     if (train): self.activations.append(inputs)
     x = self.fc1.forward(inputs)
     if (train): self.hidden_state.append(x)
     x = F.reLu(x)
     if (train): self.activations.append(x)
     x = self.fc2.forward(x)
     if (train): self.hidden_state.append(x)
     x = F.sigmoid(x)
     return x
Example #15
0
 def test_binary_cross_entropy(self):
     x = np.arange(-160, 160, 40)
     expected1 = [0., 0., 0., 0., 0.6931471805599453, 40., 80., 120.]
     actual1 = binary_cross_entropy(x, 0).tolist()
     self.assertEqual(expected1, actual1)
     expected2 = [160., 120., 80., 40., 0.6931471805599453, 0., 0., 0.]
     actual2 = binary_cross_entropy(x, 1).tolist()
     self.assertEqual(expected2, actual2)
     expected3 = 7.686442000846165
     actual3 = binary_cross_entropy(sigmoid(x), 0).sum(0).tolist()
     self.assertEqual(expected3, actual3)
Example #16
0
def test(model, dataset, dim_feature):
    """Function for model evaluation"""
    acc = util.AverageMeter()
    loss = util.AverageMeter()
    for graph, label in dataset:
        x = np.zeros([len(graph), dim_feature])
        x[:, 0] = 1
        outputs = model(graph, x)
        loss.update(bce_with_logit(outputs, label), 1)
        acc.update((sigmoid(outputs) > 0.5) == label, 1)
    return loss.avg, acc.avg
    def predict(self, X_test, y_test):  # 새로운 input 들어오면 예측
        z = np.dot(X_test, self.W)  # m(data 수)개의 (W^T)*X
        hx = sigmoid(z)  # (m,t) data, 예측 값을 sigmoid 연산을 통해 0~1 사이의 값으로 변환한다.
        sx = np.array([softmax(hx[i]) for i in range(hx.shape[0])
                       ])  # 예측값을 softmax 함수를 통해 확률값으로 바꿔준다.

        cnt = 0  # 실험값이 실제값과 얼마나 같은지 개수를 저장하는 변수
        for i in range(sx.shape[0]):  # test data 개수 만큼 반복한다.
            max_index = np.argmax(sx[i])  # 가장 큰 확률의 index 가 몇번째 인지 찾는다.
            if y_test[i][max_index] == 1:  # index 와 target 이 같은지 확인한다
                cnt += 1  # 같으면 1증가
        print("Accuracy: ", cnt / hx.shape[0])  # 몇퍼센트 만큼 적중에 성공했는지 출력
Example #18
0
def actualise_weight(visible, b, c, w, k, epsilon, alpha, mode):#proceed to contrastive divergence and update weigts
    hidden=sample_rbm_forward(visible, c, w)
    hidden_c, visible_c=backandforw(hidden, b, c, w, k, mode[0])
    w+=(1/(visible.shape[1]))*epsilon*(hidden.dot(np.transpose(visible))-hidden_c.dot(np.transpose(visible_c)))
    b+=(1/(visible.shape[1]))*epsilon*(np.sum(visible-visible_c, axis=1)).reshape(visible.shape[0],1)
    c+=(1/(visible.shape[1]))*epsilon*(np.sum(hidden-hidden_c, axis=1)).reshape(w.shape[0],1)
    if mode[1]!=0:#sparsity
        cwv=c+w.dot(visible)
        dsigm=func.dsigmoid(cwv)
        q=((2*mode[1]-1)-(2*np.mean(func.sigmoid(cwv), axis=1)-1)).reshape(w.shape[0],1)
        w+=epsilon*alpha*q*(1/visible.shape[1])*(dsigm.dot(np.transpose(visible)))
        c+=epsilon*alpha*q*(1/visible.shape[1])*(np.sum(dsigm, axis=1).reshape(w.shape[0],1))
Example #19
0
def main(n_aggregation, dim_feature, n_epochs, batch_size, eps, outputfile):
    W = np.random.normal(0, 0.4, [dim_feature, dim_feature])
    A = np.random.normal(0, 0.4, dim_feature)
    b = np.array([0.])
    model = GraphNeuralNetwork(W, A, b, n_aggregation=n_aggregation)
    optimizer = Adam(model)

    # Training
    train_data = util.get_train_data('../../datasets')
    print('train_size: %d' % len(train_data))
    for epoch in range(n_epochs):
        train_loss = util.AverageMeter()
        train_acc = util.AverageMeter()
        for graphs, labels in util.get_shuffled_batches(
                train_data, batch_size):
            grads_flat = 0
            for graph, label in zip(graphs, labels):
                x = np.zeros([len(graph), dim_feature])
                x[:, 0] = 1
                grads_flat += calc_grads(model, graph, x, label,
                                         bce_with_logit, eps) / batch_size

                outputs = model(graph, x)
                train_loss.update(bce_with_logit(outputs, label), 1)
                train_acc.update((sigmoid(outputs) > 0.5) == label, 1)

            optimizer.update(grads_flat)

        print('epoch: %d, train_loss: %f, train_acc: %f' %
              (epoch, train_loss.avg, train_acc.avg))

    # Prediction
    test_data = util.get_test_data('../../datasets')
    with open(outputfile, 'w') as o:
        for graph in test_data:
            x = np.zeros([len(graph), dim_feature])
            x[:, 0] = 1
            logit = model(graph, x)
            pred = sigmoid(logit) > 0.5
            o.write(str(int(pred[0])) + '\n')
Example #20
0
def GRUyuce(traindata1, k, Wy, W, U, Wz, Uz, Wr, Ur, w, pianchabu):
    # 初始化网络结构
    uNum = k  # 数据结构单元个数
    hdim = k
    eta = 0.1  # 学习率

    #训练数据

    traindata = [0 for i in range(len(traindata1))]
    for i in range(len(traindata1)):
        if (max(traindata1) == min(traindata1) and max(traindata1) == 0):
            traindata[i] = 0.0
        if (max(traindata1) == min(traindata1) and max(traindata1) != 0):
            traindata[i] = 1.0
        if (max(traindata1) != min(traindata1)):
            traindata[i] = (traindata1[i] - min(traindata1)
                            ) / float(max(traindata1) - min(traindata1))
    #print(traindata)

    # cell数据存储变量
    rvalues = [[0 for col in range(hdim)] for row in range(uNum + 1)]
    zvalues = [[0 for col in range(hdim)] for row in range(uNum + 1)]
    hbarvalues = [[0 for col in range(hdim)] for row in range(uNum)]
    hvalues = [[0 for col in range(hdim)] for row in range(uNum)]
    yvalues = [0 for i in range(uNum)]

    # 前向计算
    rvalues[0] = function.sigmoid(function.xchenlist(traindata[0], Wr))
    hbarvalues[0] = function.tanh(function.xchenlist(traindata[0], W))
    zvalues[0] = function.sigmoid(function.xchenlist(traindata[0], Wz))
    hvalues[0] = function.listchenlist(zvalues[0], hbarvalues[0])
    yvalues[0] = function.sigmoid(function.hangchenlie(hvalues[0], Wy))
    for t in range(1, uNum):
        rvalues[t] = function.sigmoid(
            function.listjialist(function.xchenlist(traindata[t], Wr),
                                 function.hangchenjuzhen(hvalues[t - 1], Ur)))
        hbarvalues[t] = function.tanh(
            function.listjialist(
                function.xchenlist(traindata[t], W),
                function.hangchenjuzhen(
                    function.listchenlist(rvalues[t], hvalues[t - 1]), U)))
        zvalues[t] = function.sigmoid(
            function.listjialist(function.xchenlist(traindata[t], Wz),
                                 function.hangchenjuzhen(hvalues[t - 1], Uz)))
        hvalues[t] = function.listjialist(
            function.listchenlist(function.kjianlist(1, zvalues[t]),
                                  hvalues[t - 1]),
            function.listchenlist(zvalues[t], hbarvalues[t]))
        yvalues[t] = function.sigmoid(function.hangchenlie(hvalues[t], Wy))

        x = function.kjialist(
            min(traindata1),
            function.xchenlist((max(traindata1) - min(traindata1)), yvalues))
    yucezhi = function.hangchenlie(w, function.listjialist(x, pianchabu))

    return yucezhi
Example #21
0
File: sgd.py Project: uczlgx0/MLR
 def cal_loss(self, W_w, W_u, data):
     """
     calculate the loss over all data
     :param w_w:
     :param w_u:
     :param data:
     :return:
     """
     loss = 0.0
     for label, featureDic in data:
         #sum_u is sum(exp(uj * x))
         #sum_us is sum(exp(uj * x) * sigmoid(y * wi * x))
         sum_u = 0
         sum_us = 0
         for i in range(self.M):
             wx = fc.dotProduct(W_w[i], featureDic)
             eux = np.exp(fc.dotProduct(W_u[i], featureDic))
             sum_u += eux
             sum_us += eux * fc.sigmoid(label * wx)
         loss += np.log(sum_u) - np.log(sum_us)
     print("loss is:  %s" % loss)
Example #22
0
    def predict(self, graph, vertex_size, step_size=2):
        """
        入力データに対する分類結果を2値で取得する

        Parameters
        ----------
        graph : array-like
            隣接行列で表したグラフ構造
        vertex_size : int
            グラフに含まれるノードの個数
        step_size : int, default 2
            集約する回数

        Return
        ------
         : int
            分類結果(2値)
        """
        s = self.forward(graph, vertex_size, step_size)
        p = sigmoid(s)
        return np.int((p > 0.5)[0])
 def learn(self, epoch, learning_rate
           ):  # epoch 학습 반복횟수, learning rate 를 입력받아 초기화된 train data 로 학습시킨다
     cost_list = list()  # cost 변화 그래프에 값을 주기 위해 cost 를 저장하는 list
     for epo in range(epoch):  # epoch 만큼 반복하여 학습한다.
         z = np.dot(self.X, self.W)  # m(data 수)개의 (W^T)*X
         hx = sigmoid(
             z)  # (m,) data, 예측 값을 sigmoid 연산을 통해 0~1 사이의 값으로 변환한다.
         for i in range(
                 self.W.shape[0]
         ):  # n(feature)번 반복문 돌며 각 weight 에 대하여 cost 를 편미분한 값을 빼서 학습한다.
             xj = self.X[:, i]  # (m,) 편미분을 위해 j번째 x값을 data 수 만큼 잘라가져온다.
             # hx-self.y (m,) 과 xj를 dot 연산을 통해 모두 더하여 편미분 값을 구하여 빼준다.
             self.W[i] -= learning_rate * (np.dot((hx - self.y), xj))
         cost_list.append(self.cost(hx, self.y))  # cost 값을 list 에 추가해준다.
         print("epoch: ", epo, " cost: ",
               cost_list[epo])  # 몇번째 학습인지, cost 는 몇인지 매 학습 때 마다 출력한다.
     cost_list = np.array(cost_list)
     plt.plot(np.arange(0, epoch), cost_list,
              label="binary class")  # cost 값을 그래프로 표시한다.
     plt.xlabel("number of iterations")  # x축의 이름 설정
     plt.ylabel("cost")  # y축의 이름 설정
     plt.legend()
     plt.show()  # 그래프를 보여준다.
Example #24
0
    def fine_tuning(self, features, labels, alpha=0.05):
        num_weights = len(self.weights)
        for x, z in zip(features, labels):
            outputs = [numpy.array([x]).transpose()]
            for w in self.weights:
                # TODO: other activate function
                outputs.append(sigmoid(numpy.dot(w, outputs[-1])))

            # calc. delta
            deltas = []
            for i in xrange(num_weights, 0, -1):
                if i is num_weights:
                    # top level
                    z = numpy.array([z])
                    deltas.append(z - outputs[i])
                else:
                    deltas.append(
                        numpy.multiply(sigmoid_prime(outputs[i]).transpose(), numpy.dot(deltas[-1], self.weights[i]))
                    )
            deltas.reverse()

            # update weight
            for i in xrange(num_weights):
                self.weights[i] += alpha * numpy.dot(outputs[i], deltas[i]).transpose()
Example #25
0
def pseudo_likelihood_grbm(visible, b, c, w, n):
    l=0
    for i in range(n):
        l+=np.log((func.sigmoid(free_energy_grbm(flip(visible),b,c,w)-free_energy_grbm(visible,b,c,w))+1)/2)
    return visible.shape[0]*l/n
Example #26
0
def sample_rbm_backward(hidden, c, w):
    return np.where(np.random.rand(w.shape[1],hidden.shape[1]) < func.sigmoid(np.tile(c,(1,hidden.shape[1]))+np.transpose(w).dot(hidden)), 1, -1)
Example #27
0
 def predict(self, x):
     outputs = [numpy.array([x]).transpose()]
     for w in self.weights:
         # TODO: other activate function
         outputs.append(sigmoid(numpy.dot(w, outputs[-1])))
     return outputs[-1]
Example #28
0
def nn_forward(nn, batch_x, batch_y):
    s = len(nn.cost) + 1
    batch_x = batch_x.T
    batch_y = batch_y.T
    m = batch_x.shape[1]
    nn.a[0] = batch_x

    cost2 = 0
    for k in range(1, nn.depth):
        y = np.dot(nn.W[k - 1], nn.a[k - 1]) + np.tile(
            nn.b[k - 1], (1, m))  #np.tile就是matlab中的repmat(replicate matrix)

        if nn.batch_normalization:
            nn.E[k -
                 1] = nn.E[k - 1] * nn.vecNum + np.array([np.sum(y, axis=1)]).T
            nn.S[k - 1] = nn.S[k - 1]**2 * (nn.vecNum - 1) + np.array(
                [(m - 1) * np.std(y, ddof=1, axis=1)**2]).T  #ddof=1计算无偏估计
            nn.vecNum = nn.vecNum + m
            nn.E[k - 1] = nn.E[k - 1] / nn.vecNum
            nn.S[k - 1] = np.sqrt(nn.S[k - 1] / (nn.vecNum - 1))
            y = (y - np.tile(nn.E[k - 1], (1, m))) / np.tile(
                nn.S[k - 1] + 0.0001 * np.ones(nn.S[k - 1].shape), (1, m))
            y = nn.Gamma[k - 1] * y + nn.Beta[k - 1]

        if k == nn.depth - 1:
            f = nn.output_function
            if f == 'sigmoid':
                nn.a[k] = sigmoid(y)
            elif f == 'tanh':
                nn.a[k] = np.tanh(y)
            elif f == 'relu':
                nn.a[k] = np.maximum(y, 0)
            elif f == 'softmax':
                nn.a[k] = softmax(y)

        else:
            f = nn.active_function
            if f == 'sigmoid':
                nn.a[k] = sigmoid(y)
            elif f == 'tanh':
                nn.a[k] = np.tanh(y)
            elif f == 'relu':
                nn.a[k] = np.maximum(y, 0)

        cost2 = cost2 + np.sum(nn.W[k - 1]**2)

    if nn.encoder == 1:
        roj = np.sum(nn.a[2], axis=1) / m
        nn.cost[s] = 0.5 * np.sum(
            (nn.a[k] - batch_y)**
            2) / m + 0.5 * nn.weight_decay * cost2 + 3 * sum(
                nn.sparsity * np.log(nn.sparsity / roj) +
                (1 - nn.sparsity) * np.log((1 - nn.sparsity) / (1 - roj)))
    else:
        if nn.objective_function == 'MSE':
            nn.cost[s] = 0.5 / m * sum(sum(
                (nn.a[k] - batch_y)**2)) + 0.5 * nn.weight_decay * cost2
        elif nn.objective_function == 'Cross Entropy':
            nn.cost[s] = -0.5 * sum(sum(
                batch_y * np.log(nn.a[k]))) / m + 0.5 * nn.weight_decay * cost2
    # nn.cost[s]

    return nn
Example #29
0
 def step(self, x, h_prev):
     r = fn.sigmoid(np.dot(self.param.wr, x) + self.param.ur*h_prev + np.dot(self.param.cr, self.c))
     z = fn.sigmoid(np.dot(self.param.wz, x) + self.param.uz*h_prev + np.dot(self.param.cz, self.c))
     hs = fn.tanh(np.dot(self.param.whs, x) + r*(self.param.uhs*h_prev + np.dot(self.param.chs, self.c)))
     h = z*h_prev + (1-z)*hs
     return r, z, hs, h
Example #30
0
 def step(self, x, h_prev):
     r = fn.sigmoid(np.dot(self.param.wr, x) + self.param.ur*h_prev + self.param.br)
     z = fn.sigmoid(np.dot(self.param.wz, x) + self.param.uz*h_prev + self.param.bz)
     hs = fn.tanh(np.dot(self.param.whs, x) + self.param.uhs*(r*h_prev) + self.param.bhs)
     h = z*h_prev + (1-z)*hs
     return r, z, hs, h
Example #31
0
 def encode(self, x):
     h1 = numpy.dot(self.weight, x)
     return sigmoid(h1)
Example #32
0
def sample_rbm_forward(visible, c, w):
   return np.where(np.random.rand(w.shape[0],visible.shape[1]) < func.sigmoid(np.tile(c,(1,visible.shape[1]))+w.dot(visible)), 1, -1)
Example #33
0
 def decode(self, y):
     h2 = numpy.dot(self.weight.transpose(), y)
     return sigmoid(h2)
        def test_sample(filename):
            with open(filename, 'r') as o:
                logit = float(o.readline().strip())
                ans = float(o.readline().strip())

            self.assertTrue(np.isclose(function.sigmoid(logit), ans).all(), filename)