Exemplo n.º 1
0
            training_set = train
            testing_set = test
        else:
            training_set = np.c_[training_set, train]
            testing_set = np.c_[testing_set, test]

        labels = 10 * [index]
        testing_lables += labels
        training_labels += 8 * labels
    testing_lables = np.array(testing_lables)
    training_labels = np.array(training_labels)

    training_data = samplingImg(training_set)

    # step2 L1 feature learning using sparse autoencoder
    W = nnet.sparseAutoencoder(inputSize, hiddenSizeL1, sparsityParam, lmd, beta, alpha, training_data)
    W1 = np.reshape(W[: hiddenSizeL1 * inputSize,], (hiddenSizeL1, inputSize))
    b1 = np.reshape(W[2 * hiddenSizeL1 * inputSize : 2 * hiddenSizeL1 * inputSize + hiddenSizeL1,], (hiddenSizeL1, 1))

    # step3 convolution layer, compute feature map
    # TODO extract imagesize
    step = 1
    imagesize = 50
    convWeight = convolutionWeight(W1, patchsize, imagesize, step)
    featureMap = convolutionFeatureMap(training_set, b1, convWeight)
    # step4 pooling layer
    poolingSize = 2
    poolingCore = 1.0 / math.pow(poolingSize, 2) * np.ones((1, poolingSize * poolingSize))
    featureSize = math.sqrt(featureMap[0].shape[0])
    poolingWeight = convolutionWeight(poolingCore, poolingSize, featureSize, poolingSize)
    poolingWeight = poolingWeight[0]
Exemplo n.º 2
0
    #step2 divide training set into feature learning set & supervised training set
    labeled_set = np.nonzero(labels>=5)
    unlabeled_set = np.nonzero(labels<=4)
    
    labeled_dataset = data[:,(labels<=4).flatten()]
    labeled_labelset = labels[(labels<=4).flatten()]
    unlabeled_dataset = data[:,(labels>=5).flatten()]

    unlabeled_dataset = unlabeled_dataset[:,:unlabeled_dataset.shape[1]/3]
    print unlabeled_dataset.shape

    #step3 feature learning using sparse autoencoder
    #TODO move normalization to miscellaneous
    unlabeled_dataset = nnet.normalization(unlabeled_dataset)
    W = nnet.sparseAutoencoder(inputSize,hiddenSize,sparsityParam,lmd,beta,alpha,unlabeled_dataset)
    W1 = np.reshape(W[:hiddenSize*inputSize,], (hiddenSize, inputSize))
    b1 = np.reshape(W[2*hiddenSize*inputSize:2*hiddenSize*inputSize+hiddenSize,],(hiddenSize,1))

    #step4 plot the learned feature
    fig = plt.figure(2)

    for index in range(hiddenSize/10):
        weight = W1[index,:]
        weight = np.reshape(weight,(28,28))
        ax = fig.add_subplot(5,4,1+index)
        ax.imshow(weight,mpl.cm.gray)

    plt.show()

    #step5 extract features from test & training data
Exemplo n.º 3
0
            training_set = train 
            testing_set = test
        else:
            training_set = np.c_[training_set, train]
            testing_set = np.c_[testing_set, test]

        labels = 10*[index]
        testing_lables += labels
        training_labels += 8*labels
    testing_lables = np.array(testing_lables)
    training_labels = np.array(training_labels)

    training_data = samplingImg(training_set)

    #step2 L1 feature learning using sparse autoencoder
    W = nnet.sparseAutoencoder(inputSize,hiddenSizeL1,sparsityParam,lmd,beta,alpha,training_data)
    W1 = np.reshape(W[:hiddenSizeL1*inputSize,], (hiddenSizeL1, inputSize))
    b1 = np.reshape(W[2*hiddenSizeL1*inputSize:2*hiddenSizeL1*inputSize+hiddenSizeL1,],(hiddenSizeL1,1))

    #step3 convolution layer, compute feature map
    #TODO extract imagesize
    step =1
    imagesize = 50
    convWeight = convolutionWeight(W1, patchsize, imagesize, step)
    featureMap = convolutionFeatureMap(training_set, b1, convWeight)
    #step4 pooling layer
    poolingSize = 2
    poolingCore = 1.0/math.pow(poolingSize, 2) * np.ones((1, poolingSize*poolingSize))
    featureSize = math.sqrt(featureMap[0].shape[0])
    poolingWeight = convolutionWeight(poolingCore, poolingSize, featureSize, poolingSize)
    poolingWeight = poolingWeight[0]