Exemple #1
0
        SparseAutoencoder.ACTIVATION_FUNCTION_SIGMOID,
        SparseAutoencoder.ACTIVATION_FUNCTION_SIGMOID
    ]
    params_ae['debug'] = 1

    # Parameters for softmax
    params_softmax = {}
    params_softmax['id'] = 'softmax'
    params_softmax['outputDim'] = outputDim
    params_softmax['featureDim'] = 200
    params_softmax['debug'] = 1

    layerParams = [params_ae, params_ae, params_softmax]

    # Read data from file
    labels_training = DataInputOutput.loadMNISTLabels(
        mnist_lbl_filename_training, nSamples_max_train)
    images_training = DataInputOutput.loadMNISTImages(
        mnist_img_filename_training, nSamples_max_train)
    labels_test = DataInputOutput.loadMNISTLabels(mnist_lbl_filename_test,
                                                  nSamples_max_test)
    images_test = DataInputOutput.loadMNISTImages(mnist_img_filename_test,
                                                  nSamples_max_test)

    # Normalize data
    images_training = images_training / 255.0
    images_test = images_test / 255.0

    DNN = DeepNetwork(inputDim,
                      layerParams,
                      lambd=lambd,
                      doFineTuning=doFineTuning,
Exemple #2
0
        return pred


if __name__ == '__main__':

    # --------------------------
    # Example:
    # Housing price prediction
    # --------------------------

    housing_filename = '/home/cem/develop/UFL/data/housing.bin'
    debug = 1
    doPlot = 1

    # Read data from file
    data = DataInputOutput.loadHousingData(housing_filename)

    # Include a row of 1s as an additional intercept feature.
    data = np.vstack((np.ones((1, data.shape[1])), data))

    # Shuffle examples.
    data = data[:, np.random.permutation(data.shape[1])]

    # Split into train and test sets
    # The last row of 'data' is the median home price.
    data_train_X = data[0:-1, 0:400]
    data_train_y = data[-1:-2:-1, 0:400]

    data_test_X = data[0:-1, 401:-1]
    data_test_y = data[-1:-2:-1, 401:-1]
Exemple #3
0
        imChannels = 1
        inputDim = patchWidth * patchHeight * imChannels
        numFeatures = 25
        lambda_w = 0.0001
        # weight decay parameter
        beta = 3
        # weight of sparsity penalty term
        sparsityParam = 0.01
        actFunctions = [
            ACTIVATION_FUNCTION_SIGMOID, ACTIVATION_FUNCTION_SIGMOID
        ]

        # Read data from file
        data = scipy.io.loadmat(filename_data)

        patches = DataInputOutput.samplePatches(data['IMAGES'], patchWidth,
                                                patchHeight, numPatches)

        # Normalize data
        patches = DataNormalization.normZeroToOne(patches)

        if debug > 1:
            Visualization.displayNetwork(patches[:, 0:100])

        if debug:
            print 'Number of samples: ', patches.shape[1]

        dimLayers = [inputDim, numFeatures, inputDim]

        SAE = SparseAutoencoder(dimLayers=dimLayers,
                                lambda_w=lambda_w,
                                beta=beta,
Exemple #4
0
    # Test gradient computation?
    debug = 1
    numImages = 10000
    imWidth = 28
    imHeight = 28
    numPatches = 10000
    imageChannels = 1
    patchWidth = 9
    patchHeight = 9
    inputDim = patchWidth * patchHeight * imageChannels
    numFeatures = 50
    epsilon = 1e-2
    lambd = 0.99

    # Read data from file
    images_training = DataInputOutput.loadMNISTImages(
        mnist_img_filename_training, numImages)
    images_training = np.reshape(images_training,
                                 [imHeight, imWidth, images_training.shape[1]])

    # Sample patches
    patches = DataInputOutput.samplePatches(images_training, patchWidth,
                                            patchHeight, numPatches)

    if debug > 1:
        Visualization.displayNetwork(patches[:, 0:100])

    # Normalize data: ZCA whiten patches
    patches = patches / 255.0
    instance_pca = PCA.PCA(inputDim, 0.99, debug)
    patches_ZCAwhite = instance_pca.doZCAWhitening(patches)
Exemple #5
0
    inputDim_img = imWidth * imHeight * imageChannels
    numFeatures = 32
    nClasses = 10
    epsilon = 1e-2
    lambd = 0.99
    poolDim = 5

    #-------------------------
    #       Load Data
    #-------------------------
    if debug: print "Loading data..."

    # Read data from file
    numImages = numImages_unlabeled + numImages_training + numImages_test

    images = DataInputOutput.loadMNISTImages(mnist_img_filename_training,
                                             numImages)
    images = np.reshape(images, [imHeight, imWidth, images.shape[1]])
    images_unlabeled = images[:, :, 0:numImages_unlabeled]
    images_training = images[:, :, numImages_unlabeled:numImages_unlabeled +
                             numImages_training]
    images_test = images[:, :, numImages_unlabeled +
                         numImages_training:numImages_unlabeled +
                         numImages_training + numImages_test]
    labels = DataInputOutput.loadMNISTLabels(mnist_lbl_filename_training,
                                             numImages)
    labels_training = labels[numImages_unlabeled:numImages_unlabeled +
                             numImages_training]
    labels_test = labels[numImages_unlabeled +
                         numImages_training:numImages_unlabeled +
                         numImages_training + numImages_test]
Exemple #6
0
		patchWidth 		= 28;
		patchHeight 	= 28;
		outputDim		= 10;
		
		params_layer1						= {}
		params_layer1['numFilters']			= 2;
		params_layer1['filterDim']			= [2, 2];
		params_layer1['poolDim']			= [3, 3];
		params_layer1['poolingFunction']	= POOLING_MEAN;
		params_layer1['debug']				= 1;
		
		params_layers = [params_layer1];
		
		#testlabel = DataInputOutput.loadMNISTLabels(mnist_lbl_filename_training, numPatches);	
		testlabel = np.array([1,2,3,4,3,2,1,2,3,4])-1;
		testdata = DataInputOutput.loadMNISTImages(mnist_img_filename_training, numPatches);
		testdata = testdata / 255.0;
		testdata = np.reshape(testdata, [patchWidth, patchHeight, testdata.shape[1]]);
		
		ConvNet_test = CNN( [patchWidth, patchHeight], outputDim, params_layers, debug=debug);
		
		print 'Checking gradient...'
		
		ConvNet_test.testGradient(testdata, testlabel);
	
	debug 				= 1;
	nSamples_max_train 	= 10000;
	nSamples_max_test 	= 10000;
	imWidth				= 28;
	imHeight			= 28;
	outputDim			= 10;