Ejemplo n.º 1
0
if __name__ == '__main__':
    #step1 参数初始化
    inputSize = 28 * 28
    numClasses = 5
    hiddenSize = 200
    sparsityParam = 0.1
    la = 3e-3
    beta = 3

    #step2 获取无标签数据集, 有标签训练数据集, 有标签测试数据集
    unlabeledData, trainData, trainLabels, testData, testLabels = GetDivideSets(
    )

    #step3 用无标签数据集训练自编码的特征
    theta = sparseAutoencoder.initiallize(hiddenSize, inputSize)
    [X,cost,d]=sop.fmin_l_bfgs_b(lambda (x) :sparseAutoencoder.sparseAutoencoderCost(x, inputSize, \
    hiddenSize, la, sparsityParam, beta, unlabeledData),x0=theta,maxiter=400,disp=1)
    W1 = X[0:hiddenSize * inputSize].reshape(hiddenSize, inputSize)
    W1 = W1.T
    opttheta = X

    #step4 获得有标签训练集的激活值并用来训练softmax分类器的权值
    trainImages = feedforward(opttheta, hiddenSize, inputSize, trainData)
    thetaSoftmax = softmax.initiallize(numClasses, hiddenSize)
    la = 1e-4
    [X, cost, d] = sop.fmin_l_bfgs_b(lambda (x): softmax.SoftmaxCost(
        trainImages, trainLabels, x, la, hiddenSize, numClasses),
                                     x0=thetaSoftmax,
                                     maxiter=100,
                                     disp=1)
Ejemplo n.º 2
0
if __name__ == '__main__':
    #step1 两层隐含层的栈式神经网络的参数设置
    inputSize = 28 * 28
    numClasses = 10
    hiddenSizeL1 = 200
    hiddenSizeL2 = 200
    sparsityParam = 0.1
    la = 3e-3
    beta = 3

    #step2 读取MNIST数据集,只取1/3的数据集防止内存溢出
    trainData, trainLabels, testData, testLabels = ReadMNIST()
    length = trainData.shape[1] / 3
    trainData = trainData[:, 0:length]
    trainLabels = trainLabels[0:length]
    sae1Theta = sparseAutoencoder.initiallize(hiddenSizeL1, inputSize)

    #step3 训练第一层神经网络的权值
    [sae1OptTheta, cost, d]=sop.fmin_l_bfgs_b(lambda (x) :sparseAutoencoder.sparseAutoencoderCost(x, inputSize, \
    hiddenSizeL1, la, sparsityParam, beta, trainData),x0=sae1Theta,maxiter=400,disp=1)
    sae1Features = feedforward(sae1OptTheta, hiddenSizeL1, inputSize,
                               trainData)

    #step4 训练第二层神经网络的权值
    sae2Theta = sparseAutoencoder.initiallize(hiddenSizeL2, hiddenSizeL1)
    [sae2OptTheta, cost, d]=sop.fmin_l_bfgs_b(lambda (x) : sparseAutoencoder.sparseAutoencoderCost(x, hiddenSizeL1, \
    hiddenSizeL2, la, sparsityParam, beta, sae1Features),x0=sae2Theta, maxiter=400,disp=1)
    sae2Features = feedforward(sae2OptTheta, hiddenSizeL2, hiddenSizeL1,
                               sae1Features)

    #step5 训练softmax权值
Ejemplo n.º 3
0

if __name__ == '__main__':
    #step1 参数初始化
    inputSize = 28*28
    numClasses = 5
    hiddenSize = 200
    sparsityParam = 0.1
    la = 3e-3
    beta = 3

    #step2 获取无标签数据集, 有标签训练数据集, 有标签测试数据集
    unlabeledData, trainData, trainLabels, testData, testLabels = GetDivideSets()

    #step3 用无标签数据集训练自编码的特征
    theta = sparseAutoencoder.initiallize(hiddenSize, inputSize)
    [X,cost,d]=sop.fmin_l_bfgs_b(lambda (x) :sparseAutoencoder.sparseAutoencoderCost(x, inputSize, \
    hiddenSize, la, sparsityParam, beta, unlabeledData),x0=theta,maxiter=400,disp=1)
    W1 = X[0:hiddenSize*inputSize].reshape(hiddenSize, inputSize)
    W1 = W1.T
    opttheta = X

    #step4 获得有标签训练集的激活值并用来训练softmax分类器的权值
    trainImages = feedforward(opttheta, hiddenSize, inputSize, trainData)
    thetaSoftmax = softmax.initiallize(numClasses, hiddenSize)
    la = 1e-4
    [X,cost,d] = sop.fmin_l_bfgs_b(lambda (x) :softmax.SoftmaxCost(trainImages, trainLabels,x,la,hiddenSize,numClasses),x0=thetaSoftmax,maxiter=100,disp=1)

    #step5 获得有标签测试集的激活值并且给出softmax分类准确率, 5分类的准确率应该在98%附近
    testImages = feedforward(opttheta, hiddenSize, inputSize,          testData)
    optthetaSoftmax = X
if __name__ == '__main__':
    #step1 两层隐含层的栈式神经网络的参数设置
    inputSize = 28*28
    numClasses = 10
    hiddenSizeL1 = 200
    hiddenSizeL2 = 200
    sparsityParam = 0.1
    la = 3e-3
    beta = 3

    #step2 读取MNIST数据集,只取1/3的数据集防止内存溢出
    trainData, trainLabels, testData, testLabels = ReadMNIST()
    length = trainData.shape[1]/3
    trainData = trainData[:, 0:length]
    trainLabels = trainLabels[0:length]
    sae1Theta = sparseAutoencoder.initiallize(hiddenSizeL1, inputSize)

    #step3 训练第一层神经网络的权值
    [sae1OptTheta, cost, d]=sop.fmin_l_bfgs_b(lambda (x) :sparseAutoencoder.sparseAutoencoderCost(x, inputSize, \
    hiddenSizeL1, la, sparsityParam, beta, trainData),x0=sae1Theta,maxiter=400,disp=1)
    sae1Features = feedforward(sae1OptTheta, hiddenSizeL1, inputSize, trainData)

    #step4 训练第二层神经网络的权值
    sae2Theta = sparseAutoencoder.initiallize(hiddenSizeL2, hiddenSizeL1)
    [sae2OptTheta, cost, d]=sop.fmin_l_bfgs_b(lambda (x) : sparseAutoencoder.sparseAutoencoderCost(x, hiddenSizeL1, \
    hiddenSizeL2, la, sparsityParam, beta, sae1Features),x0=sae2Theta, maxiter=400,disp=1)
    sae2Features = feedforward(sae2OptTheta, hiddenSizeL2, hiddenSizeL1, sae1Features)

    #step5 训练softmax权值
    saeSoftmaxTheta = softmax.initiallize(numClasses, hiddenSizeL2)
    laSoftmax = 1e-4