def test_theanets_partial_fit(): clf_complete = TheanetsClassifier(layers=[2], trainers=[{ 'algo': 'rmsprop', 'learning_rate': 0.1 }, { 'algo': 'rprop', 'learning_rate': 0.1 }]) clf_partial = TheanetsClassifier(layers=[2], trainers=[{ 'algo': 'rmsprop', 'learning_rate': 0.1 }]) X, y, sample_weight = generate_classification_data() clf_complete.fit(X, y) clf_partial.fit(X, y) clf_partial.partial_fit(X, y, algo='rprop', learning_rate=0.1) assert clf_complete.trainers == clf_partial.trainers, 'trainers not saved in partial fit' auc_complete = roc_auc_score(y, clf_complete.predict_proba(X)[:, 1]) auc_partial = roc_auc_score(y, clf_partial.predict_proba(X)[:, 1]) # Known fail of theanets assert auc_complete == auc_partial, 'same networks return different results'
def test_theanets_reproducibility(): clf = TheanetsClassifier() X, y, sample_weight = generate_classification_data() clf.fit(X, y) auc = roc_auc_score(y, clf.predict_proba(X)[:, 1]) for i in range(2): clf.fit(X, y) curr_auc = roc_auc_score(y, clf.predict_proba(X)[:, 1]) assert auc == curr_auc, 'running a network twice produces different results' cloned_clf = clone(clf) cloned_clf.fit(X, y) cloned_auc = roc_auc_score(y, cloned_clf.predict_proba(X)[:, 1]) assert cloned_auc == auc, 'cloned network produces different result'
def test_theanets_partial_fit(): clf_complete = TheanetsClassifier(trainers=[{'optimize': 'rmsprop', 'patience': 0}, {'optimize': 'rprop'}]) clf_partial = TheanetsClassifier(trainers=[{'optimize': 'rmsprop', 'patience': 0}]) X, y, sample_weight = generate_classification_data() clf_complete.fit(X, y) clf_partial.fit(X, y) clf_partial.partial_fit(X, y, optimize='rprop') assert clf_complete.trainers == clf_partial.trainers, 'trainers not saved in partial fit' auc_complete = roc_auc_score(y, clf_complete.predict_proba(X)[:, 1]) auc_partial = roc_auc_score(y, clf_partial.predict_proba(X)[:, 1]) assert auc_complete == auc_partial, 'same networks return different results'
def test_theanets_reproducibility(): clf = TheanetsClassifier() X, y, sample_weight = generate_classification_data() clf.fit(X, y) auc = roc_auc_score(y, clf.predict_proba(X)[:, 1]) for i in range(2): clf.fit(X, y) curr_auc = roc_auc_score(y, clf.predict_proba(X)[:, 1]) assert auc == curr_auc, 'running a network twice produces different results' cloned_clf = clone(clf) cloned_clf.fit(X, y) cloned_auc = roc_auc_score(y, cloned_clf.predict_proba(X)[:, 1]) assert cloned_auc == auc, 'cloned network produces different result'
def test_theanets_partial_fit(): clf_complete = TheanetsClassifier(layers=[2], trainers=[{'algo': 'rmsprop', 'learning_rate': 0.1}, {'algo': 'rprop', 'learning_rate': 0.1}]) clf_partial = TheanetsClassifier(layers=[2], trainers=[{'algo': 'rmsprop', 'learning_rate': 0.1}]) X, y, sample_weight = generate_classification_data() clf_complete.fit(X, y) clf_partial.fit(X, y) clf_partial.partial_fit(X, y, algo='rprop', learning_rate=0.1) assert clf_complete.trainers == clf_partial.trainers, 'trainers not saved in partial fit' auc_complete = roc_auc_score(y, clf_complete.predict_proba(X)[:, 1]) auc_partial = roc_auc_score(y, clf_partial.predict_proba(X)[:, 1]) # Known fail of theanets assert auc_complete == auc_partial, 'same networks return different results'
def test_theanets_partial_fit(): clf_complete = TheanetsClassifier(trainers=[{ 'optimize': 'rmsprop' }, { 'optimize': 'rprop' }]) clf_partial = TheanetsClassifier(trainers=[{'optimize': 'rmsprop'}]) X, y, sample_weight = generate_classification_data() clf_complete.fit(X, y) clf_partial.fit(X, y) clf_partial.partial_fit(X, y, optimize='rprop') assert clf_complete.trainers == clf_partial.trainers, 'trainers not saved in partial fit' auc_complete = roc_auc_score(y, clf_complete.predict_proba(X)[:, 1]) auc_partial = roc_auc_score(y, clf_partial.predict_proba(X)[:, 1]) assert auc_complete == auc_partial, 'same networks return different results'