file.close() # weather_targs = [] # weather_preds = [] # for t in targs: # weather_targs.append(np.argmax(t[:4])) # for p in preds: # weather_preds.append(np.argmax(p[:4])) # print weather_preds[:10] # print weather_targs[:10] # print sklearn.metrics.confusion_matrix(weather_targs,weather_preds) print 'Calculating F2 scores' threshold = 0.53 qpreds = preds > threshold print app.f2_score(targs[:, :17], qpreds[:, :17]) print app.f2_score(targs[:, :17], qpreds[:, :17], average=None) # print 'Calculating F2 scores (argmax for weather class)' # w_pred = preds[:,:4] # cw_pred = np.argmax(w_pred,axis=1) # qw_pred = np.zeros((preds.shape[0],4)) # qw_pred[np.arange(preds.shape[0]),cw_pred] = 1 # qpreds[:,:4] = qw_pred print app.f2_score(targs[:, :17], qpreds[:, :17]) print app.f2_score(targs[:, :17], qpreds[:, :17], average=None) print 'Calculating F2 scores only for weather labels' print app.f2_score(targs[:, :4], qpreds[:, :4]) print app.f2_score(targs[:, :4], qpreds[:, :4], average=None) print 'loglosses' print app.logloss(preds.flatten(), targs.flatten())
def test_score(gts, preds, bias=app.get_biases()[label_id], epsilon=1.e-11): predictions = np.array(preds) / 0.5 * bias preds_cutoff = [1 if p > 0.5 else 0 for p in predictions] return app.f2_score(gts, preds_cutoff, average=None)
def score(gts, preds): preds_cutoff = [1 if p > 0.5 else 0 for p in preds] return app.f2_score(gts, preds_cutoff, average=None)
def test_score(gts, preds, bias=app.get_biases()[label_id], epsilon=1.e-11): predictions = expit(logit(preds) - logit(0.5) + logit(bias)) preds_cutoff = [1 if p > 0.5 else 0 for p in predictions] return app.f2_score(gts, preds_cutoff, average=None)
def score(gts, preds): return app.f2_score(gts, preds)
def score(gts, preds): preds_cutoff = np.digitize(preds,bins=[-0.01,0.5,1.01]) preds_cutoff = preds_cutoff-1 return app.f2_score(gts, preds_cutoff)