Пример #1
0
def dictionary_learn_ex():

    patch_shape = (18, 18)
    n_atoms = 225
    n_plot_atoms = 225
    n_nonzero_coefs = 2
    n_jobs = 8
    lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4,color=False)
    n_imgs, h, w = lfw_people.images.shape

    imgs = []
    for i in range(n_imgs):
        img = lfw_people.images[i, :, :].reshape((h, w))
        img /= 255.
        imgs.append(img)

    print 'Extracting reference patches...'
    X = extract_patches(imgs, patch_size=patch_shape[0],scale=False,n_patches=int(1e5),verbose=True,n_jobs=n_jobs)
    print "number of patches:", X.shape[1]

    se = sparse_encoder(algorithm='bomp',params={'n_nonzero_coefs': n_nonzero_coefs}, n_jobs=n_jobs)

    odc = online_dictionary_coder(n_atoms=n_atoms, sparse_coder=se, n_epochs=2,
                                  batch_size=1000, non_neg=False, verbose=True, n_jobs=n_jobs)
    odc.fit(X)
    D = odc.D
    plt.figure(figsize=(4.2, 4))
    for i in range(n_plot_atoms):
        plt.subplot(15, 15, i + 1)
        plt.imshow(D[:, i].reshape(patch_shape), cmap=plt.cm.gray)
        plt.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0, wspace=0.0, hspace=0.0)
        plt.xticks(())
        plt.yticks(())
    plt.show()
Пример #2
0
def dictionary_learn_ex():

    patch_shape = (18, 18)
    n_atoms = 225
    n_plot_atoms = 225
    n_nonzero_coefs = 2
    n_jobs = 8
    lfw_people = fetch_lfw_people(min_faces_per_person=70,
                                  resize=0.4,
                                  color=False)
    n_imgs, h, w = lfw_people.images.shape

    imgs = []
    for i in range(n_imgs):
        img = lfw_people.images[i, :, :].reshape((h, w))
        img /= 255.
        imgs.append(img)

    print 'Extracting reference patches...'
    X = extract_patches(imgs,
                        patch_size=patch_shape[0],
                        scale=False,
                        n_patches=int(1e5),
                        verbose=True,
                        n_jobs=n_jobs)
    print "number of patches:", X.shape[1]

    se = sparse_encoder(algorithm='bomp',
                        params={'n_nonzero_coefs': n_nonzero_coefs},
                        n_jobs=n_jobs)

    odc = online_dictionary_coder(n_atoms=n_atoms,
                                  sparse_coder=se,
                                  n_epochs=2,
                                  batch_size=1000,
                                  non_neg=False,
                                  verbose=True,
                                  n_jobs=n_jobs)
    odc.fit(X)
    D = odc.D
    plt.figure(figsize=(4.2, 4))
    for i in range(n_plot_atoms):
        plt.subplot(15, 15, i + 1)
        plt.imshow(D[:, i].reshape(patch_shape), cmap=plt.cm.gray)
        plt.subplots_adjust(left=0.0,
                            bottom=0.0,
                            right=1.0,
                            top=1.0,
                            wspace=0.0,
                            hspace=0.0)
        plt.xticks(())
        plt.yticks(())
    plt.show()
Пример #3
0
def two_layer_network():

	batch_size = 1000
	dl = online_dictionary_coder(batch_size=batch_size,beta=None,non_neg=False,verbose=True)
	se = sparse_encoder(algorithm='bomp',params={})
	fe = feature_encoder(algorithm="soft_thresholding",params={'nonzero_percentage' : 0.1},verbose=True)

	param_grid = {'n_jobs' : 8,'n_layers' : 2,'n_atoms' : [64,128],'filter_sizes' : [3,3],
					'step_sizes' : [1,1],'pooling_sizes' : [2,None],'pooling_steps' : [2,None],
					'n_nonzero_coefs' : [1,2],'dict_learners' : [dl,dl],'feature_encoders' : [fe,fe],
					 'sparse_coders' : [se,se],'n_training_patches' : [int(1e6),int(2e5)]}

	workspace = workspace_manager()
	workspace.set_metadata(param_grid)
	build_hmp_net(param_grid,workspace)
Пример #4
0
def ScSPM_caltech101_l1():
    # path to the Caltech101 dataset
    # data_path =
    imd = img_dataset(data_path, maxdim=300, online=True, color=param_set.get("color"))
    imgs = imd()
    y = imd.y

    # define the architecture
    n_atoms = 1024
    patch_shape = (16, 16)
    step_size = 6
    n_descriptors = int(1e6)
    # the l1 penalty parameter
    _lambda = 0.15
    normalizer = l2_normalizer
    metadata = {'name': "ScSPM_caltech101",
                'desc': "ScSPM using sparse coding on top of SIFT features",
                'n_atoms': n_atoms,
                'lambda': _lambda,
                'patch_shape': patch_shape,
                'step_size': step_size,
                'n_descriptors': n_descriptors,
                'pooling': 'max pooling',
                'normalization': 'l2'}

    wm = workspace_manager(metadata=metadata)
    wm.set_metadata(metadata)
    wm.save("labels.npy", y)
    sp = spatial_pyramid("ScSPM", workspace=wm, mmap=False)
    n_jobs = 8
    # the sift extractor of the
    # initial phase of dictionary learning
    feature_extractor = sift_extractor(n_descriptors=n_descriptors, patch_shape=patch_shape, mmap=False, n_jobs=n_jobs)
    se = sparse_encoder(algorithm='lasso', params={'lambda': n_nonzero_coefs}, n_jobs=n_jobs)
    odc = online_dictionary_coder(n_atoms=n_atoms, sparse_coder=se,
                                  batch_size=int(1e3), verbose=True, mmap=False, n_jobs=n_jobs)

    # learn the dictionary
    sp.dict_learn(imgs, feature_extractor=feature_extractor, dict_learner=odc)
    se.n_jobs = 1
    # extract ScSPM features
    sc_spm = sc_spm_extractor(feature_extractor=dsift_extractor(step_size=step_size, patch_size=patch_shape[0]),
                              levels=(1, 2, 4), sparse_coder=se, pooling_operator=sc_max_pooling(),
                              normalizer=normalizer)

    sp.extract(imgs, pyramid_feat_extractor=sc_spm, n_jobs=n_jobs)
Пример #5
0
def one_layer_network():

	batch_size = 1000
	n_epochs = 1
	#non-negative dictionary learning
	dl = online_dictionary_coder(batch_size=batch_size,n_epochs=n_epochs,beta=None,non_neg=True,verbose=True)
	se = sparse_encoder(algorithm='nnomp',params={})
	#define the architecture. Here we also use soft thresholding that sets to zero 90% of the coefficients
	#and put a spatial pyramid with 1,2,4 cells in each layer respectively.
	fe = feature_encoder(algorithm="soft_thresholding",params={'nonzero_percentage' : 0.1},verbose=True)
	param_set = {'n_jobs' : 8,'n_layers' : 1,'n_atoms' : [256],'filter_sizes' : [7],
				'step_sizes' : [2],'pooling_sizes' : [None],'pooling_steps' : [None],
				'n_nonzero_coefs' : [2],'dict_learners' :[dl],'spm_levels' : (1,2,4),
				 'sparse_coders' : [se],'feature_encoders' : [fe],'n_training_patches' : [int(1e6)], 'color':True}


	workspace = workspace_manager()
	workspace.set_metadata(param_set)
	build_hmp_net(param_set,workspace=workspace)