def nmf(data, components): nmf = NMF(n_components=components, init="random", random_state=0) W = nmf.fit_transform(data) H = nmf.components_ R = np.dot(W, H) return W
'MDS':MDS(), 'MLPClassifier':MLPClassifier(), 'MLPRegressor':MLPRegressor(), 'MaxAbsScaler':MaxAbsScaler(), 'MeanShift':MeanShift(), 'MinCovDet':MinCovDet(), 'MinMaxScaler':MinMaxScaler(), 'MiniBatchDictionaryLearning':MiniBatchDictionaryLearning(), 'MiniBatchKMeans':MiniBatchKMeans(), 'MiniBatchSparsePCA':MiniBatchSparsePCA(), 'MultiTaskElasticNet':MultiTaskElasticNet(), 'MultiTaskElasticNetCV':MultiTaskElasticNetCV(), 'MultiTaskLasso':MultiTaskLasso(), 'MultiTaskLassoCV':MultiTaskLassoCV(), 'MultinomialNB':MultinomialNB(), 'NMF':NMF(), 'NearestCentroid':NearestCentroid(), 'NearestNeighbors':NearestNeighbors(), 'Normalizer':Normalizer(), 'NuSVC':NuSVC(), 'NuSVR':NuSVR(), 'Nystroem':Nystroem(), 'OAS':OAS(), 'OneClassSVM':OneClassSVM(), 'OrthogonalMatchingPursuit':OrthogonalMatchingPursuit(), 'OrthogonalMatchingPursuitCV':OrthogonalMatchingPursuitCV(), 'PCA':PCA(), 'PLSCanonical':PLSCanonical(), 'PLSRegression':PLSRegression(), 'PLSSVD':PLSSVD(), 'PassiveAggressiveClassifier':PassiveAggressiveClassifier(),
def __init__(self, n_components): self.nmf = NMF(n_components=2, init='random', random_state=0) self.user_ids_dict = {} self.book_isbns_dict = {}
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5): timeset = defaultdict(lambda: []) err = defaultdict(lambda: []) for n_samples in samples_range: for n_features in features_range: print("%2d samples, %2d features" % (n_samples, n_features)) print('=======================') X = np.abs( make_low_rank_matrix(n_samples, n_features, effective_rank=rank, tail_strength=0.2)) gc.collect() print("benchmarking nndsvd-nmf: ") tstart = time() m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X) tend = time() - tstart timeset['nndsvd-nmf'].append(tend) err['nndsvd-nmf'].append(m.reconstruction_err_) report(m.reconstruction_err_, tend) gc.collect() print("benchmarking nndsvda-nmf: ") tstart = time() m = NMF(n_components=30, init='nndsvda', tol=tolerance).fit(X) tend = time() - tstart timeset['nndsvda-nmf'].append(tend) err['nndsvda-nmf'].append(m.reconstruction_err_) report(m.reconstruction_err_, tend) gc.collect() print("benchmarking nndsvdar-nmf: ") tstart = time() m = NMF(n_components=30, init='nndsvdar', tol=tolerance).fit(X) tend = time() - tstart timeset['nndsvdar-nmf'].append(tend) err['nndsvdar-nmf'].append(m.reconstruction_err_) report(m.reconstruction_err_, tend) gc.collect() print("benchmarking random-nmf") tstart = time() m = NMF(n_components=30, init='random', max_iter=1000, tol=tolerance).fit(X) tend = time() - tstart timeset['random-nmf'].append(tend) err['random-nmf'].append(m.reconstruction_err_) report(m.reconstruction_err_, tend) gc.collect() print("benchmarking alt-random-nmf") tstart = time() W, H = alt_nnmf(X, r=30, init='random', tol=tolerance) tend = time() - tstart timeset['alt-random-nmf'].append(tend) err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H))) report(norm(X - np.dot(W, H)), tend) return timeset, err
def compute_bench(samples_range, features_range, rank=50, tolerance=1e-7): it = 0 timeset = defaultdict(lambda: []) err = defaultdict(lambda: []) max_it = len(samples_range) * len(features_range) for n_samples in samples_range: for n_features in features_range: it += 1 print '====================' print 'Iteration %03d of %03d' % (it, max_it) print '====================' X = np.abs( make_low_rank_matrix(n_samples, n_features, effective_rank=rank, tail_strength=0.2)) gc.collect() print "benching nndsvd-nmf: " tstart = time() m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X) tend = time() - tstart timeset['nndsvd-nmf'].append(tend) err['nndsvd-nmf'].append(m.reconstruction_err_) print m.reconstruction_err_, tend gc.collect() print "benching nndsvda-nmf: " tstart = time() m = NMF(n_components=30, init='nndsvda', tol=tolerance).fit(X) tend = time() - tstart timeset['nndsvda-nmf'].append(tend) err['nndsvda-nmf'].append(m.reconstruction_err_) print m.reconstruction_err_, tend gc.collect() print "benching nndsvdar-nmf: " tstart = time() m = NMF(n_components=30, init='nndsvdar', tol=tolerance).fit(X) tend = time() - tstart timeset['nndsvdar-nmf'].append(tend) err['nndsvdar-nmf'].append(m.reconstruction_err_) print m.reconstruction_err_, tend gc.collect() print "benching random-nmf" tstart = time() m = NMF(n_components=30, init=None, max_iter=1000, tol=tolerance).fit(X) tend = time() - tstart timeset['random-nmf'].append(tend) err['random-nmf'].append(m.reconstruction_err_) print m.reconstruction_err_, tend gc.collect() print "benching alt-random-nmf" tstart = time() W, H = alt_nnmf(X, r=30, R=None, tol=tolerance) tend = time() - tstart timeset['alt-random-nmf'].append(tend) err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H))) print np.linalg.norm(X - np.dot(W, H)), tend return timeset, err
scorer = make_scorer(score_func=singleLabelScore, greater_is_better=False) # PREPROCESSING # SCALING minMaxScaler = MinMaxScaler(feature_range=(0.0, 1.0)) #normalizer = skprep.Normalizer() columnDeleter = fs.FeatureDeleter() # FEATURE SELECTION varianceThresholdSelector = VarianceThreshold(threshold=(0)) percentileSelector = SelectPercentile(score_func=f_classif, percentile=20) kBestSelector = SelectKBest(f_classif, 1000) # FEATURE EXTRACTION #rbmPipe = skpipe.Pipeline(steps=[('scaling', minMaxScaler), ('rbm', rbm)]) nmf = NMF(n_components=150) pca = PCA(n_components=80) sparse_pca = SparsePCA(n_components=700, max_iter=3, verbose=2) kernel_pca = KernelPCA(n_components=150) # Costs huge amounts of ram randomized_pca = RandomizedPCA(n_components=500) # REGRESSORS random_forest_regressor = RandomForestRegressor(n_estimators=256) gradient_boosting_regressor = GradientBoostingRegressor(n_estimators=60) support_vector_regressor = svm.SVR() # CLASSIFIERS support_vector_classifier = svm.SVC(probability=True, verbose=True) linear_support_vector_classifier = svm.LinearSVC(dual=False) nearest_neighbor_classifier = KNeighborsClassifier() extra_trees_classifier = ExtraTreesClassifier(n_estimators=256)