Example #1
0
# <codecell>

#Plot learning curves to see possible overfitting of trained classifier
from sklearn.metrics import log_loss, roc_auc_score, average_precision_score

def log_loss(y_true, y_pred):
    return log_loss(y_true, y_pred)

def roc_auc(y_true, y_pred, sample_weight):
    return roc_auc_score(y_true, y_pred)  

def average_precision(y_true, y_pred, sample_weight):
    return average_precision_score(y_true, y_pred)  

figure(figsize=(10, 6))
lc_test = report.learning_curve( { 'roc_auc(test)':roc_auc}, steps=100)
lc_train = report_train.learning_curve( { 'roc_auc(train)':roc_auc}, steps=100)
lc_test.plots[0].plot()
lc_train.plots[0].plot()

# <codecell>

figure(figsize=(10, 6))
av_test = report.learning_curve( {  'average_precision(test)':average_precision}, steps=100)
av_train = report_train.learning_curve( {  'average_precision(train)':average_precision}, steps=100)
av_test.plots[0].plot()
av_train.plots[0].plot()

# <codecell>

figure(figsize=(10, 6))
Example #2
0
# <codecell>

#Plot learning curves to see possible overfitting of trained classifier
from sklearn.metrics import log_loss, roc_auc_score, average_precision_score

def deviance(y_true, y_pred, sample_weight):
    return gbc.base_classifier.loss_(y_true, y_pred)

def roc_auc(y_true, y_pred, sample_weight):
    return roc_auc_score(y_true, y_pred)  

def average_precision(y_true, y_pred, sample_weight):
    return average_precision_score(y_true, y_pred)  


report.learning_curve( { 'roc_auc':roc_auc, 'average_precision':average_precision}, steps=1).plot(figsize = (7,5))
plt.subplot(1,1,1)
report.learning_curve( {'devianse': deviance}, steps=1).plot(figsize = (7,5))
plt.subplot(1,1,1)

# <codecell>


plt.plot(gbc.base_classifier.train_score_)
plt.title('Train Score')
plt.xlabel('Iterations')
plt.ylabel('Deviance')
plt.show()

# <codecell>
Example #3
0
# <codecell>

#Plot learning curves to see possible overfitting of trained classifier
from sklearn.metrics import log_loss, roc_auc_score, average_precision_score

def deviance(y_true, y_pred, sample_weight):
    return gbc.base_classifier.loss_(y_true, y_pred)

def roc_auc(y_true, y_pred, sample_weight):
    return roc_auc_score(y_true, y_pred)  

def average_precision(y_true, y_pred, sample_weight):
    return average_precision_score(y_true, y_pred)  


report.learning_curve( { 'roc_auc':roc_auc, 'average_precision':average_precision}, steps=10).plot(figsize = (7,5))

# <codecell>

#Plot learning curves to see possible overfitting of trained classifier
%pylab inline
from sklearn.metrics import log_loss, roc_auc_score, average_precision_score

def log_loss(y_true, y_pred):
    return log_loss(y_true, y_pred)

def roc_auc(y_true, y_pred, sample_weight):
    return roc_auc_score(y_true, y_pred)  

def average_precision(y_true, y_pred, sample_weight):
    return average_precision_score(y_true, y_pred)