Пример #1
0
def test_mini_batch_correct_shapes():
    rng = np.random.RandomState(0)
    X = rng.randn(12, 10)
    pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
    U = pca.fit_transform(X)
    assert_equal(pca.components_.shape, (8, 10))
    assert_equal(U.shape, (12, 8))
    # test overcomplete decomposition
    pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
    U = pca.fit_transform(X)
    assert_equal(pca.components_.shape, (13, 10))
    assert_equal(U.shape, (12, 13))
Пример #2
0
def test_mini_batch_correct_shapes():
    rng = np.random.RandomState(0)
    X = rng.randn(12, 10)
    pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
    U = pca.fit_transform(X)
    assert_equal(pca.components_.shape, (8, 10))
    assert_equal(U.shape, (12, 8))
    # test overcomplete decomposition
    pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
    U = pca.fit_transform(X)
    assert_equal(pca.components_.shape, (13, 10))
    assert_equal(U.shape, (12, 13))
Пример #3
0
def decompose_minibatch_sparse_pca(X,
                                   n_components,
                                   alpha=0.8,
                                   n_iter=100,
                                   batch_size=3,
                                   random_state=np.random.RandomState(42)):
    minibatch_sparse_pca = MiniBatchSparsePCA(
        n_components=n_components,
        alpha=alpha,
        n_iter=n_iter,
        batch_size=batch_size,
        random_state=random_state,
    )
    X_minibatch_sparse_pca = minibatch_sparse_pca.fit_transform(X)

    return X_minibatch_sparse_pca
Пример #4
0
def cluster_sk_mini_batch_sparse_pca(content):
    """ x """
    _config = MiniBatchSparsePCA(n_components=content['n_components'],
                                 alpha=content['alpha'],
                                 ridge_alpha=content['ridge_alpha'],
                                 n_iter=content['n_iter'],
                                 callback=None,
                                 batch_size=content['batch_size'],
                                 verbose=0,
                                 shuffle=content['shuffle'],
                                 n_jobs=-1,
                                 method=content['sk_method'],
                                 random_state=None)
    _result = _config.fit_transform(content['data'])
    return httpWrapper(
        json.dumps({
            'result': _result.tolist(),
            'components': _config.components_.tolist(),
            'iter': _config.n_iter_
        }))
Пример #5
0
				if idx in select_word_idx_list:
					# print idx,len(fea)
					li.append(fea[idx])
			ALL_FEA[i] = li

		NeedPCA = False
			
		if NeedPCA: 
			print 'Len of ALL_FEA: ',len(ALL_FEA)

			print 'Start PCA ... '

			# pca = KernelPCA(n_components = num_af_pca,)
			
			pca = MiniBatchSparsePCA(n_components = num_af_pca,n_jobs = 4,verbose = 1,batch_size = len(ALL_FEA)/10)
			new_all_fea = pca.fit_transform(np.array(ALL_FEA))

			# from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA

			# pca = LDA(n_components = num_af_pca)
			# new_all_fea = pca.fit_transform(np.array(ALL_FEA))

			ALL_FEA = new_all_fea

			print '\nFinish PCA ... '

		allSongs = []
		head = 0
		tail = 0

		for gen in range(sum(usedGenres)):