示例#1
0
def convolutionFeatureMap(data, bias, w):
    featureMap = []
    size = len(w)
    for index in range(size):
        result = w[index].dot(data) + bias[index, :]
        featureMap.append(nnet.sigmoid(result))
    return featureMap
示例#2
0
def convolutionFeatureMap(data, bias, w):
    featureMap = []
    size = len(w)
    for index in range(size):
        result = w[index].dot(data) + bias[index,:]
        featureMap.append(nnet.sigmoid(result))
    return featureMap
示例#3
0
 def test1_sigmoid(self):
     s = nnet.sigmoid(np.array([[0, 1, 2], [2, 4, -1]]))
     self.assertEqual(s.shape[0], 2)
     self.assertEqual(s.shape[1], 3)
     self.assertAlmostEqual(s[0, 0], 0.5)
     self.assertAlmostEqual(s[0, 1], 0.7310585786300049)
     self.assertAlmostEqual(s[0, 2], 0.8807970779778823)
     self.assertAlmostEqual(s[1, 2], 0.2689414213699951)
示例#4
0
文件: test.py 项目: seeM/nnet
def test_forward_prop():
    np.random.seed(1)

    X = np.random.randn(3, 3)

    params = init_params([3, 2, 1])

    W1, W2 = params['W']
    b1, b2 = params['b']

    Y_pred, cache = forward_prop(X, params)

    A0 = X
    Z1 = np.dot(W1, A0) + b1
    A1 = sigmoid(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)

    assert (Z1 == cache['Z'][0]).all()
    assert (A1 == cache['A'][1]).all()
    assert (Z2 == cache['Z'][1]).all()
    assert (A2 == Y_pred).all()
示例#5
0
def convolutionFeatureMapMulti(data, bias, w):
    featNum = len(data)  # how many L1 feature maps to be convoluted
    size = len(w[0])  # how many L2 features(covolution cores) dose each L1 feature map have
    # so variable w is the array that [L1-feat-nums * L2-feat-nums * each-convlution-weight]

    featureMap = []
    for index in range(size):
        for i in range(featNum):
            if i == 0:
                tmp = w[i][index].dot(data[i])
            else:
                tmp += w[i][index].dot(data[i])

        result = tmp + bias[index, :]
        featureMap.append(nnet.sigmoid(result))
    return featureMap
示例#6
0
def convolutionFeatureMapMulti(data, bias, w):
    featNum = len(data)  #how many L1 feature maps to be convoluted
    size = len(w[0]) #how many L2 features(covolution cores) dose each L1 feature map have
    #so variable w is the array that [L1-feat-nums * L2-feat-nums * each-convlution-weight]

    featureMap = []
    for index in range(size):
        for i in range(featNum):
            if i == 0:
                tmp = w[i][index].dot(data[i])
            else:
                tmp += w[i][index].dot(data[i])

        result = tmp + bias[index,:]
        featureMap.append(nnet.sigmoid(result))
    return featureMap
示例#7
0
    #step4 plot the learned feature
    fig = plt.figure(2)

    for index in range(hiddenSize/10):
        weight = W1[index,:]
        weight = np.reshape(weight,(28,28))
        ax = fig.add_subplot(5,4,1+index)
        ax.imshow(weight,mpl.cm.gray)

    plt.show()

    #step5 extract features from test & training data
    #TODO move sigmoid to miscellaneous
    labeled_dataset = nnet.normalization(labeled_dataset)
    test_data = nnet.normalization(test_data)
    train_a1 = nnet.sigmoid(W1.dot(labeled_dataset)+b1)
    test_a1 = nnet.sigmoid(W1.dot(test_data)+b1)

    #step 6 softmax regression
    W = softmax.softmax_regression(hiddenSize,numLabels,lmd,train_a1,labeled_labelset,100)

    #step 7 testing
    theta = W.reshape((numLabels, hiddenSize))
    predict = (theta.dot(test_a1)).argmax(0)
    print predict
    print test_labels.flatten()
    accuracy = (predict == test_labels.flatten())
    print 'Accuracy:',accuracy.mean()
    print 'done'
    
    
示例#8
0
def predict(im, params):
    mu, log_sigmasq = map(np.squeeze, gmlp(im, params))
    return 2*np.pi*sigmoid(mu), 2.*np.tanh(log_sigmasq / 2.)
示例#9
0
    start = time.time()
    
    #step2 L1 feature learning using sparse autoencoder
    #TODO move normalization to miscellaneous
    training_data = nnet.normalization(training_data)
    #W = nnet.sparseAutoencoder(inputSize,hiddenSizeL1,sparsityParam,lmd,beta,alpha,training_data,iters=500)
    W = load_data('weightL1')
    W = np.array(W)
    W = W.transpose()
    #savedata(W,'weightL1')
    W11 = np.reshape(W[:hiddenSizeL1*inputSize,], (hiddenSizeL1, inputSize))
    b11 = np.reshape(W[2*hiddenSizeL1*inputSize:2*hiddenSizeL1*inputSize+hiddenSizeL1,],(hiddenSizeL1,1))

    #step3 L2 feature learning using sparse autoencoder
    training_a1 = nnet.sigmoid(W11.dot(training_data)+b11)
    #W = nnet.sparseAutoencoder(hiddenSizeL1,hiddenSizeL2,sparsityParam,lmd,beta,0.009,training_a1,iters=500)
    W = load_data('weightL2')
    W = np.array(W)
    W = W.transpose()
    #savedata(W,'weightL2')
    W21 = np.reshape(W[:hiddenSizeL2*hiddenSizeL1,], (hiddenSizeL2, hiddenSizeL1))
    b21 = np.reshape(W[2*hiddenSizeL2*hiddenSizeL1:2*hiddenSizeL2*hiddenSizeL1+hiddenSizeL2,],(hiddenSizeL2,1))

    #step4 plot the learned feature
    #fig = plt.figure(2)

    #for index in range(hiddenSizeL1/10):
    #    weight = W11[index,:]
    #    weight = np.reshape(weight,(28,28))
    #    #print weight.shape
示例#10
0
def predict(im, params):
    mu, log_sigmasq = map(np.squeeze, gmlp(im, params))
    return 2 * np.pi * sigmoid(mu), 2. * np.tanh(log_sigmasq / 2.)
示例#11
0
    #step2 L1 feature learning using sparse autoencoder
    #TODO move normalization to miscellaneous
    training_data = nnet.normalization(training_data)
    #W = nnet.sparseAutoencoder(inputSize,hiddenSizeL1,sparsityParam,lmd,beta,alpha,training_data,iters=500)
    W = load_data('weightL1')
    W = np.array(W)
    W = W.transpose()
    #savedata(W,'weightL1')
    W11 = np.reshape(W[:hiddenSizeL1 * inputSize, ], (hiddenSizeL1, inputSize))
    b11 = np.reshape(
        W[2 * hiddenSizeL1 * inputSize:2 * hiddenSizeL1 * inputSize +
          hiddenSizeL1, ], (hiddenSizeL1, 1))

    #step3 L2 feature learning using sparse autoencoder
    training_a1 = nnet.sigmoid(W11.dot(training_data) + b11)
    #W = nnet.sparseAutoencoder(hiddenSizeL1,hiddenSizeL2,sparsityParam,lmd,beta,0.009,training_a1,iters=500)
    W = load_data('weightL2')
    W = np.array(W)
    W = W.transpose()
    #savedata(W,'weightL2')
    W21 = np.reshape(W[:hiddenSizeL2 * hiddenSizeL1, ],
                     (hiddenSizeL2, hiddenSizeL1))
    b21 = np.reshape(
        W[2 * hiddenSizeL2 * hiddenSizeL1:2 * hiddenSizeL2 * hiddenSizeL1 +
          hiddenSizeL2, ], (hiddenSizeL2, 1))

    #step4 plot the learned feature
    #fig = plt.figure(2)

    #for index in range(hiddenSizeL1/10):