def test_lmdb_creation(self): params = nn_params.copy() params.update({'nclasses': n_classes}) # Create dataset X, Y = datasets.load_digits(return_X_y=True) X = preprocessing.StandardScaler().fit_transform(X) x_train, x_test, y_train, y_test = model_selection.train_test_split( X, Y, test_size=test_size, random_state=seed) # Save data in .svm format tr_svm_f, tr_lmdb_f = os.path.abspath('x_train.svm'), os.path.abspath( 'x_train.lmdb') te_svm_f, te_lmdb_f = os.path.abspath('x_test.svm'), os.path.abspath( 'x_test.lmdb') vocab_path = os.path.abspath('vocab.dat') datasets.dump_svmlight_file(x_train, y_train, tr_svm_f) datasets.dump_svmlight_file(x_test, y_test, te_svm_f) lmdb_utils.create_lmdb_from_svm(svm_path=tr_svm_f, lmdb_path=tr_lmdb_f, vocab_path=vocab_path, **params) lmdb_utils.create_lmdb_from_svm(svm_path=te_svm_f, lmdb_path=te_lmdb_f, **params) tr_lmdb = SVMConnector(path=tr_svm_f, lmdb_path=tr_lmdb_f, vocab_path=vocab_path) te_lmdb = SVMConnector(path=te_svm_f, lmdb_path=te_lmdb_f) optimizer = GenericSolver(solver_type='SGD', base_lr=0.01, iterations=100) clf = MLP(**params) clf.fit(tr_lmdb, validation_data=[te_lmdb], solver=optimizer) ytr_prob = clf.predict_proba(tr_lmdb) acc = metrics.accuracy_score(y_train, ytr_prob.argmax(-1)) assert acc > 0.7 os_utils._remove_files([tr_svm_f, te_svm_f, vocab_path]) os_utils._remove_dirs([tr_lmdb_f, te_lmdb_f])
def create_lmdb_from_svm(svm_path, lmdb_path, vocab_path=None, host='localhost', port=8085, nclasses=2, gpu=True, tmp_folder=None): if os.path.exists(lmdb_path): print("warning: {} exist, overwriting it".format(lmdb_path)) tmp_folder = tempfile.mkdtemp( prefix="pydd_", dir=tmp_folder) if tmp_folder else tempfile.mkdtemp( prefix="pydd_") train_data = SVMConnector(path=svm_path) optimizer = GenericSolver(solver_type='SGD', base_lr=0.01, iterations=1) clf = MLP(host=host, port=port, nclasses=nclasses, gpu=gpu, repository=tmp_folder) clf.fit(train_data, solver=optimizer) shutil.move(os.path.join(tmp_folder, "train.lmdb"), lmdb_path) if vocab_path: shutil.move(os.path.join(tmp_folder, "vocab.dat"), vocab_path) # delete service clf.delete_service(clf.sname, clear='lib') # delete tmp_folder shutil.rmtree(tmp_folder) return lmdb_path, vocab_path
# create dataset X, y = datasets.load_digits(n_class=n_classes, return_X_y=True) X = preprocessing.StandardScaler().fit_transform(X) xtr, xte, ytr, yte = model_selection.train_test_split(X, y, **split_params) # create and save train.svm and test.svm tr_f = os.path.abspath('x_train.svm') te_f = os.path.abspath('x_test.svm') datasets.dump_svmlight_file(xtr, ytr, tr_f) datasets.dump_svmlight_file(xte, yte, te_f) # Define models and class weights clf = MLP(**params) train_data, test_data = SVMConnector(path=tr_f), SVMConnector(path=te_f) logs = clf.fit(train_data, validation_data=[test_data], solver=solver, class_weights=class_weights, batch_size=128) params.update({"resume": True}) clf = MLP(**params) logs = clf.fit(train_data, validation_data=[test_data], solver=solver, class_weights=class_weights, batch_size=128) yte_pred = clf.predict(test_data)
X = preprocessing.StandardScaler().fit_transform(X) xtr, xte, ytr, yte = model_selection.train_test_split(X, Y, test_size=test_size, random_state=seed) tr_f = os.path.abspath('x_train.svm') te_f = os.path.abspath('x_test.svm') ##################### # create connectors # ##################### # array connector xtr_arr, xte_arr = ArrayConnector(xtr, ytr), ArrayConnector(xte, yte) # svm connector xtr_svm, xte_svm = SVMConnector(tr_f), SVMConnector(te_f) # array sparse connector xtr_sparse, xte_sparse = ArrayConnector(csc_matrix(xtr), ytr), ArrayConnector( csc_matrix(xte), yte) class TestSVM(object): def test_classification(self): params = nn_params.copy() params.update({'nclasses': n_classes}) optimizer = GenericSolver(**solver_param) datasets.dump_svmlight_file(xtr, ytr, tr_f) datasets.dump_svmlight_file(xte, yte, te_f)
port = 8080 iteration=100 lr=0.01 gpu=False X, y = datasets.load_digits(n_class=n_classes, return_X_y=True) X = preprocessing.StandardScaler().fit_transform(X) x_train, x_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=test_size, random_state=seed) tr_f = os.path.abspath('x_train.svm') te_f = os.path.abspath('x_test.svm') datasets.dump_svmlight_file(x_train, y_train, tr_f) datasets.dump_svmlight_file(x_test, y_test, te_f) # train_data = ArrayConnector(x_train, y_train) # val_data = ArrayConnector(x_train, y_train) train_data = SVMConnector(tr_f) val_data = SVMConnector(te_f) clf = MLP(host=host, port=port, nclasses=n_classes, layers=[100], gpu=gpu) solver = GenericSolver(iterations=iteration, test_interval=30, solver_type="SGD", base_lr=lr) clf.fit(train_data, validation_data=[val_data], solver=solver) clf.predict_proba(train_data) clf.fit(train_data, validation_data=[val_data], solver=solver) y_pred = clf.predict_proba(train_data) clf = LR(host=host, port=port, nclasses=n_classes, gpu=gpu) solver = GenericSolver(iterations=iteration, solver_type="SGD", base_lr=lr) clf.fit(train_data, solver=solver) y_pred = clf.predict_proba(train_data)
x_train, x_test, y_train, y_test = model_selection.train_test_split( X, Y, test_size=test_size, random_state=seed) # Save data in .svm format tr_svm_f, tr_lmdb_f = os.path.abspath('x_train.svm'), os.path.abspath( 'x_train.lmdb') te_svm_f, te_lmdb_f = os.path.abspath('x_test.svm'), os.path.abspath( 'x_test.lmdb') vocab_path = os.path.abspath('vocab.dat') datasets.dump_svmlight_file(x_train, y_train, tr_svm_f) datasets.dump_svmlight_file(x_test, y_test, te_svm_f) # create lmdb and vocab file create_lmdb_from_svm(svm_path=tr_svm_f, lmdb_path=tr_lmdb_f, vocab_path=vocab_path, **params) create_lmdb_from_svm(svm_path=te_svm_f, lmdb_path=te_lmdb_f, **params) tr_data = SVMConnector(path=tr_svm_f, lmdb_path=tr_lmdb_f, vocab_path=vocab_path) te_data = SVMConnector(path=tr_svm_f, lmdb_path=tr_lmdb_f) optimizer = GenericSolver(solver_type='SGD', base_lr=0.01, iterations=100) clf = MLP(**params) clf.fit(tr_data, validation_data=[te_data], solver=optimizer) y_pred_lmdb = clf.predict_proba(te_data)
# create and save train.svm and test.svm tr_f = os.path.abspath("{}/x_train.svm".format(folder)) te_f = os.path.abspath("{}/x_test.svm".format(folder)) datasets.dump_svmlight_file(xtr, ytr, tr_f) datasets.dump_svmlight_file(xte, yte, te_f) # create lmdb dataset tr_lmdb = os.path.abspath("{}/train.lmdb".format(folder)) te_lmdb = os.path.abspath("{}/test.lmdb".format(folder)) vocab_path = os.path.abspath("{}/vocab.dat".format(folder)) lmdb_utils.create_lmdb_from_svm(tr_f, tr_lmdb, vocab_path, **params) lmdb_utils.create_lmdb_from_svm(te_f, te_lmdb, **params) # create lmdb connectors train_data = SVMConnector(path=tr_f, lmdb_path=tr_lmdb, vocab_path=vocab_path) test_data = SVMConnector(path=te_f, lmdb_path=te_lmdb) # Training model from lmdb data clf = MLP(**params) optimizer = GenericSolver(solver_type='SGD', iterations=500, base_lr=0.01) logs = clf.fit(train_data, validation_data=[test_data], solver=optimizer) yte_pred = clf.predict(test_data) report = metrics.classification_report(yte, yte_pred) print(report) os_utils._remove_dirs([folder])
port = 8080 np.random.seed(seed) # for reproducibility split_params = {'test_size': 0.2, 'random_state': seed} booster_params = {"max_depth": 10, "subsample": 0.8, "eta": 0.3} # create dataset X, y = datasets.make_classification(n_samples=n_samples, class_sep=0.4, n_features=n_features, n_classes=n_classes, random_state=seed) x_train, x_test, y_train, y_test = model_selection.train_test_split( X, y, **split_params) # store dataset train_path = os.path.abspath('x_train.svm') test_path = os.path.abspath('x_test.svm') datasets.dump_svmlight_file(x_train, y_train, train_path) datasets.dump_svmlight_file(x_test, y_test, test_path) # train model train_data, val_data = SVMConnector(train_path), SVMConnector(test_path) clf = XGB(host=host, port=port, nclasses=n_classes) clf.fit(train_data, validation_data=[val_data], **booster_params) # predict/metrics y_test_prob = clf.predict_proba(test_path) y_test_pred = y_test_prob.argmax(-1) print(metrics.classification_report(y_test, y_test_pred))