def test_predict_from_model_array(self): params = nn_params.copy() params.update({'nclasses': n_classes}) optimizer = GenericSolver(**solver_param) datasets.dump_svmlight_file(xtr, ytr, tr_f) datasets.dump_svmlight_file(xte, yte, te_f) # Train model clf = MLP(**params) clf.fit(xtr_arr, validation_data=[xte_arr], solver=optimizer) y_pred_tr = clf.predict(xtr_arr) y_pred_te = clf.predict(xte_arr) # Load from tained model params = nn_params.copy() params.update({ 'finetuning': True, 'template': None, 'nclasses': n_classes }) clf = MLP(sname=clf.sname, repository=clf.model['repository'], **params) assert np.array_equal(y_pred_tr, clf.predict(xtr_arr)) assert np.array_equal(y_pred_te, clf.predict(xte_arr)) os_utils._remove_files([tr_f, te_f])
def test_lmdb_creation(self): params = nn_params.copy() params.update({'nclasses': n_classes}) # Create dataset X, Y = datasets.load_digits(return_X_y=True) X = preprocessing.StandardScaler().fit_transform(X) x_train, x_test, y_train, y_test = model_selection.train_test_split( X, Y, test_size=test_size, random_state=seed) # Save data in .svm format tr_svm_f, tr_lmdb_f = os.path.abspath('x_train.svm'), os.path.abspath( 'x_train.lmdb') te_svm_f, te_lmdb_f = os.path.abspath('x_test.svm'), os.path.abspath( 'x_test.lmdb') vocab_path = os.path.abspath('vocab.dat') datasets.dump_svmlight_file(x_train, y_train, tr_svm_f) datasets.dump_svmlight_file(x_test, y_test, te_svm_f) lmdb_utils.create_lmdb_from_svm(svm_path=tr_svm_f, lmdb_path=tr_lmdb_f, vocab_path=vocab_path, **params) lmdb_utils.create_lmdb_from_svm(svm_path=te_svm_f, lmdb_path=te_lmdb_f, **params) tr_lmdb = SVMConnector(path=tr_svm_f, lmdb_path=tr_lmdb_f, vocab_path=vocab_path) te_lmdb = SVMConnector(path=te_svm_f, lmdb_path=te_lmdb_f) optimizer = GenericSolver(solver_type='SGD', base_lr=0.01, iterations=100) clf = MLP(**params) clf.fit(tr_lmdb, validation_data=[te_lmdb], solver=optimizer) ytr_prob = clf.predict_proba(tr_lmdb) acc = metrics.accuracy_score(y_train, ytr_prob.argmax(-1)) assert acc > 0.7 os_utils._remove_files([tr_svm_f, te_svm_f, vocab_path]) os_utils._remove_dirs([tr_lmdb_f, te_lmdb_f])
def create_lmdb_from_svm(svm_path, lmdb_path, vocab_path=None, host='localhost', port=8085, nclasses=2, gpu=True, tmp_folder=None): if os.path.exists(lmdb_path): print("warning: {} exist, overwriting it".format(lmdb_path)) tmp_folder = tempfile.mkdtemp( prefix="pydd_", dir=tmp_folder) if tmp_folder else tempfile.mkdtemp( prefix="pydd_") train_data = SVMConnector(path=svm_path) optimizer = GenericSolver(solver_type='SGD', base_lr=0.01, iterations=1) clf = MLP(host=host, port=port, nclasses=nclasses, gpu=gpu, repository=tmp_folder) clf.fit(train_data, solver=optimizer) shutil.move(os.path.join(tmp_folder, "train.lmdb"), lmdb_path) if vocab_path: shutil.move(os.path.join(tmp_folder, "vocab.dat"), vocab_path) # delete service clf.delete_service(clf.sname, clear='lib') # delete tmp_folder shutil.rmtree(tmp_folder) return lmdb_path, vocab_path
X = preprocessing.StandardScaler().fit_transform(X) xtr, xte, ytr, yte = model_selection.train_test_split(X, y, **split_params) # create and save train.svm and test.svm tr_f = os.path.abspath('x_train.svm') te_f = os.path.abspath('x_test.svm') datasets.dump_svmlight_file(xtr, ytr, tr_f) datasets.dump_svmlight_file(xte, yte, te_f) # Define models and class weights clf = MLP(**params) train_data, test_data = SVMConnector(path=tr_f), SVMConnector(path=te_f) logs = clf.fit(train_data, validation_data=[test_data], solver=solver, class_weights=class_weights, batch_size=128) params.update({"resume": True}) clf = MLP(**params) logs = clf.fit(train_data, validation_data=[test_data], solver=solver, class_weights=class_weights, batch_size=128) yte_pred = clf.predict(test_data) report = metrics.classification_report(yte, yte_pred) print(report)
tr_f = os.path.abspath('x_train.svm') te_f = os.path.abspath('x_test.svm') datasets.dump_svmlight_file(xtr, ytr, tr_f) datasets.dump_svmlight_file(xte, yte, te_f) # create connectors xtr_svm, xte_svm = SVMConnector(tr_f), SVMConnector(te_f) # train model params = {'host': host, 'port': port, 'nclasses': nclasses, 'layers': [100]} optimizer = GenericSolver(solver_type='SGD', iterations=500, base_lr=0.1, snapshot=100) clf = MLP(sname=sname, repository=model_repo, **params) clf.fit(xtr_svm, validation_data=[xte_svm, xtr_svm], solver=optimizer) del clf # load pre trained model params = { 'host': host, 'port': port, 'nclasses': nclasses, 'finetuning': True, 'template': None } clf = MLP(sname=sname, repository=model_repo, **params) ytr_pred, yte_pred = clf.predict(xtr_svm), clf.predict(xte_svm) report = metrics.classification_report(yte, yte_pred) print(report)
x_train, x_test, y_train, y_test = model_selection.train_test_split( X, Y, test_size=test_size, random_state=seed) # Save data in .svm format tr_svm_f, tr_lmdb_f = os.path.abspath('x_train.svm'), os.path.abspath( 'x_train.lmdb') te_svm_f, te_lmdb_f = os.path.abspath('x_test.svm'), os.path.abspath( 'x_test.lmdb') vocab_path = os.path.abspath('vocab.dat') datasets.dump_svmlight_file(x_train, y_train, tr_svm_f) datasets.dump_svmlight_file(x_test, y_test, te_svm_f) # create lmdb and vocab file create_lmdb_from_svm(svm_path=tr_svm_f, lmdb_path=tr_lmdb_f, vocab_path=vocab_path, **params) create_lmdb_from_svm(svm_path=te_svm_f, lmdb_path=te_lmdb_f, **params) tr_data = SVMConnector(path=tr_svm_f, lmdb_path=tr_lmdb_f, vocab_path=vocab_path) te_data = SVMConnector(path=tr_svm_f, lmdb_path=tr_lmdb_f) optimizer = GenericSolver(solver_type='SGD', base_lr=0.01, iterations=100) clf = MLP(**params) clf.fit(tr_data, validation_data=[te_data], solver=optimizer) y_pred_lmdb = clf.predict_proba(te_data)
# create and save train.svm and test.svm tr_f = os.path.abspath("{}/x_train.svm".format(folder)) te_f = os.path.abspath("{}/x_test.svm".format(folder)) datasets.dump_svmlight_file(xtr, ytr, tr_f) datasets.dump_svmlight_file(xte, yte, te_f) # create lmdb dataset tr_lmdb = os.path.abspath("{}/train.lmdb".format(folder)) te_lmdb = os.path.abspath("{}/test.lmdb".format(folder)) vocab_path = os.path.abspath("{}/vocab.dat".format(folder)) lmdb_utils.create_lmdb_from_svm(tr_f, tr_lmdb, vocab_path, **params) lmdb_utils.create_lmdb_from_svm(te_f, te_lmdb, **params) # create lmdb connectors train_data = SVMConnector(path=tr_f, lmdb_path=tr_lmdb, vocab_path=vocab_path) test_data = SVMConnector(path=te_f, lmdb_path=te_lmdb) # Training model from lmdb data clf = MLP(**params) optimizer = GenericSolver(solver_type='SGD', iterations=500, base_lr=0.01) logs = clf.fit(train_data, validation_data=[test_data], solver=optimizer) yte_pred = clf.predict(test_data) report = metrics.classification_report(yte, yte_pred) print(report) os_utils._remove_dirs([folder])