Ejemplo n.º 1
0
 def test_RMSE_offset_neg(self):
     '''Test that RMSE is correct for constant prediction offset from constant observation (with negatives)'''
     val, diff = -5, -2
     n_elements = 12
     predvec = [val] * n_elements
     obsvec = [val - diff] * n_elements
     self.assertEqual(verify.RMSE(predvec, obsvec), np.abs(diff))
Ejemplo n.º 2
0
 def test_RMSE_offset(self):
     '''Test that RMSE is correct for constant prediction offset from constant observation '''
     val, diff = 5, 2
     n_elements = 12
     predvec = [val] * n_elements
     obsvec = [val - diff] * n_elements
     self.assertEqual(verify.RMSE(predvec, obsvec), diff)
Ejemplo n.º 3
0
def run_for_TSS(model, trw):
    fdummy = "out/det.dummy.pred.%d.csv" % (trw)
    fname = "out/det.%s.pred.%d.csv" % (model, trw)
    _od = pd.read_csv(fdummy)
    _o = pd.read_csv(fname)
    _od = _od[(_od.prob_clsf != -1.) & (_od.y_pred != -1.) & (_od.y_pred >= 0)
              & (_od.y_pred <= 9.)]
    _o = _o[(_o.prob_clsf != -1.) & (_o.y_pred != -1.) & (_o.y_pred >= 0) &
            (_o.y_pred <= 9.)]
    _od.dn = pd.to_datetime(_od.dn)
    _o.dn = pd.to_datetime(_o.dn)

    stime = dt.datetime(1995, 2, 1)
    etime = dt.datetime(2016, 9, 20)
    d = stime
    skill = []
    t = []
    while (d < etime):
        try:
            t.append(d)
            dn = d + dt.timedelta(days=27)
            dum = _od[(_od.dn >= d) & (_od.dn < dn)]
            mod = _o[(_o.dn >= d) & (_o.dn < dn)]
            rmse_dum = verify.RMSE(dum.y_pred, dum.y_obs)
            rmse = verify.RMSE(mod.y_pred, mod.y_obs)
            print(d, rmse, rmse_dum, verify.skill(rmse, rmse_dum))
            skill.append(verify.skill(rmse, rmse_dum))
            d = d + dt.timedelta(days=1)
        except:
            pass
        pass
    skill = np.array(skill)
    #skill = nan_helper(skill)
    fmt = matplotlib.dates.DateFormatter("%d %b\n%Y")
    splot.style("spacepy")
    fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 6))
    ax.xaxis.set_major_formatter(fmt)
    ax.plot(t, skill, "k.", label="")
    #ax.plot(t,smooth(np.array(skill),101),"r.")
    #strx = "RMSE:%.2f\nr:%.2f"%(_eval_details["RMSE"],_eval_details["r"])
    #ax.text(0.2,0.8,strx,horizontalalignment='center',verticalalignment='center', transform=ax.transAxes)
    ax.set_ylabel(r"$TSS(\%)$")
    ax.set_xlabel(r"$Time$")
    ax.set_xlim(dt.datetime(1995, 1, 1), dt.datetime(2017, 1, 1))
    ax.set_ylim(0, 100)
    fig.savefig("out/stat/det.%s.tss.%d.png" % (model, trw))
Ejemplo n.º 4
0
def run_validation(pred, obs, year, model):
    pred, obs = np.array(pred), np.array(obs)
    _eval_details = {}
    _eval_details["range"] = "N"
    if max(pred) > 9. or min(pred) < 0.: _eval_details["range"] = "Y"
    try:
        _eval_details["bias"] = np.round(verify.bias(pred, obs), 2)
    except:
        _eval_details["bias"] = np.NaN
    try:
        _eval_details["meanPercentageError"] = np.round(
            verify.meanPercentageError(pred, obs), 2)
    except:
        _eval_details["meanPercentageError"] = np.NaN
    try:
        _eval_details["medianLogAccuracy"] = np.round(
            verify.medianLogAccuracy(pred, obs), 3)
    except:
        _eval_details["medianLogAccuracy"] = np.NaN
    try:
        _eval_details["symmetricSignedBias"] = np.round(
            verify.symmetricSignedBias(pred, obs), 3)
    except:
        _eval_details["symmetricSignedBias"] = np.NaN
    try:
        _eval_details["meanSquaredError"] = np.round(
            verify.meanSquaredError(pred, obs), 2)
    except:
        _eval_details["meanSquaredError"] = np.NaN
    try:
        _eval_details["RMSE"] = np.round(verify.RMSE(pred, obs), 2)
    except:
        _eval_details["RMSE"] = np.NaN
    try:
        _eval_details["meanAbsError"] = np.round(
            verify.meanAbsError(pred, obs), 2)
    except:
        _eval_details["meanAbsError"] = np.NaN
    try:
        _eval_details["medAbsError"] = np.round(verify.medAbsError(pred, obs),
                                                2)
    except:
        _eval_details["medAbsError"] = np.NaN

    try:
        _eval_details["nRMSE"] = np.round(verify.nRMSE(pred, obs), 2)
    except:
        _eval_details["nRMSE"] = np.NaN
    try:
        _eval_details["forecastError"] = np.round(
            np.mean(verify.forecastError(pred, obs)), 2)
    except:
        _eval_details["forecastError"] = np.NaN
    try:
        _eval_details["logAccuracy"] = np.round(
            np.mean(verify.logAccuracy(pred, obs)), 2)
    except:
        _eval_details["logAccuracy"] = np.NaN

    try:
        _eval_details["medSymAccuracy"] = np.round(
            verify.medSymAccuracy(pred, obs), 2)
    except:
        _eval_details["medSymAccuracy"] = np.NaN
    try:
        _eval_details["meanAPE"] = np.round(verify.meanAPE(pred, obs), 2)
    except:
        _eval_details["meanAPE"] = np.NaN
    try:
        _eval_details["medAbsDev"] = np.round(verify.medAbsDev(pred), 2)
    except:
        _eval_details["medAbsDev"] = np.NaN
    try:
        _eval_details["rSD"] = np.round(verify.rSD(pred), 2)
    except:
        _eval_details["rSD"] = np.NaN
    try:
        _eval_details["rCV"] = np.round(verify.rCV(pred), 2)
    except:
        _eval_details["rCV"] = np.NaN
    _eval_details["year"] = year
    _eval_details["model"] = model
    r, _ = pearsonr(pred, obs)
    _eval_details["r"] = r
    return _eval_details
Ejemplo n.º 5
0
 def test_RMSE_alt0(self):
     '''Test that RMSE is correct for prediction alternating evenly about observation '''
     self.assertEqual(verify.RMSE(self.predalt, self.obsalt), 2)
Ejemplo n.º 6
0
 def test_RMSE(self):
     '''Test estimation of RMSE for perfect forecast'''
     self.assertEqual(verify.RMSE(self.predvec, self.obsvec), 0)