def test_continue_learning(): mlp = MLP(epochs=25, eta=0.5, hidden_layers=[5], optimizer='gradientdescent', activations=['logistic'], minibatches=1, random_seed=1) mlp.fit(X, y) assert np.sum(y == mlp.predict(X)) == 144, np.sum(y == mlp.predict(X)) mlp.fit(X, y, init_params=False) assert np.sum(y == mlp.predict(X)) == 150, np.sum(y == mlp.predict(X))
def _clf_mlp(trX,teX,trY,teY): print "MLP" print trX.shape,"trX shape" print "Enter Layer for MLP" layer=input() # print "enter delIdx" # delIdx=input() # while(delIdx): # trX=np.delete(trX,-1,axis=0) # trY=np.delete(trY,-1,axis=0) # delIdx=delIdx-1 print "factors",factors(trX.shape[0]) teY=teY.astype(np.int32) trY=trY.astype(np.int32) print trX.shape,"trX shape" print "enter no of mini batch" mini_batch=int(input()) mlp = TfMultiLayerPerceptron(eta=0.01, epochs=100, hidden_layers=layer, activations=['relu' for i in range(len(layer))], print_progress=3, minibatches=mini_batch, optimizer='adam', random_seed=1) mlp.fit(trX,trY) pred=mlp.predict(teX) print _f_count(teY),"test f count" pred=pred.astype(np.int32) print _f_count(pred),"pred f count" conf_mat=confusion_matrix(teY, pred) process_cm(conf_mat, to_print=True) print precision_score(teY,pred),"Precision Score" print recall_score(teY,pred),"Recall Score" print roc_auc_score(teY,pred), "ROC_AUC"
def test_fail_minibatches(): mlp = MLP(epochs=100, eta=0.5, hidden_layers=[5], optimizer='gradientdescent', activations=['logistic'], minibatches=13, random_seed=1) mlp.fit(X, y) assert (y == mlp.predict(X)).all()
def test_binary_sgd(): mlp = MLP(epochs=10, eta=0.5, hidden_layers=[5], optimizer='gradientdescent', activations=['logistic'], minibatches=len(y_bin), random_seed=1) mlp.fit(X_bin, y_bin) assert (y_bin == mlp.predict(X_bin)).all()
def test_multiclass_gd_acc(): mlp = MLP( epochs=100, eta=0.5, hidden_layers=[5], optimizer="gradientdescent", activations=["logistic"], minibatches=1, random_seed=1, ) mlp.fit(X, y) assert (y == mlp.predict(X)).all()
def test_binary_gd_relu(): mlp = MLP( epochs=100, eta=0.5, hidden_layers=[5], optimizer="gradientdescent", activations=["relu"], minibatches=1, random_seed=1, ) mlp.fit(X_bin, y_bin) assert (y_bin == mlp.predict(X_bin)).all()
def _clf_mlp(trX, teX, trY, teY): print "MLP" print trX.shape, "trX shape" print "Enter Layer for MLP" layer = input() # print "enter delIdx" # delIdx=input() # while(delIdx): # trX=np.delete(trX,-1,axis=0) # trY=np.delete(trY,-1,axis=0) # delIdx=delIdx-1 print "factors", factors(trX.shape[0]) teY = teY.astype(np.int32) trY = trY.astype(np.int32) print trX.shape, "trX shape" print "enter no of mini batch" mini_batch = int(input()) mlp = TfMultiLayerPerceptron( eta=0.01, epochs=100, hidden_layers=layer, activations=['relu' for i in range(len(layer))], print_progress=3, minibatches=mini_batch, optimizer='adam', random_seed=1) mlp.fit(trX, trY) pred = mlp.predict(teX) print _f_count(teY), "test f count" pred = pred.astype(np.int32) print _f_count(pred), "pred f count" conf_mat = confusion_matrix(teY, pred) process_cm(conf_mat, to_print=True) print precision_score(teY, pred), "Precision Score" print recall_score(teY, pred), "Recall Score" print roc_auc_score(teY, pred), "ROC_AUC"