def test_pc(): pc = mu.pc(pred_ui, obs) assert pc == 100
columns=["lower", "upper"], index=model.input_data.index, ) obs = pandas.DataFrame(model.vars[data_type]["p_obs"].value, columns=["value"], index=model.input_data.index) # subset only test data pred_test = pred.ix[test_ix] obs_test = obs.ix[test_ix] pred_ui_test = pred_ui.ix[test_ix] # record statistics for test data output["bias_" + rate_type] = mu.bias(pred_test, obs_test) output["rmse_" + rate_type] = mu.rmse(pred_test, obs_test) output["mae_" + rate_type] = mu.mae(pred_test, obs_test) output["mare_" + rate_type] = mu.mare(pred_test, obs_test) output["pc_" + rate_type] = mu.pc(pred_ui_test, obs_test) output["time_" + rate_type] = elapsed # save information output.to_csv("/clustertmp/dismod/model_comparison_" + str(model_num) + rate_type + str(replicate) + ".csv") # create and save conversion plots dismod3.graphics.plot_acorr(model) pl.savefig("/clustertmp/dismod/model_comparison_" + str(model_num) + rate_type + str(replicate) + "acorr.png") dismod3.graphics.plot_trace(model) pl.savefig("/clustertmp/dismod/model_comparison_" + str(model_num) + rate_type + str(replicate) + "trace.png") # save statistic types (only for 1st replicate) if replicate == 0: model_stats = pandas.DataFrame(["seed", "bias_", "rmse_", "mae_", "mare_", "pc_", "time_"], columns=["stats"]) model_stats.to_csv("/home/j/Project/Models/dismodmr_rate_validation/validity/model_stats.csv")
def test_pc_fail(): pred_ui.ix[0,'upper'] = 0 # change UI interval from [-1,1] to [-1,0] # now 1 of 10 observations are outside of the predicted UI bounds pc = mu.pc(pred_ui, obs) assert pc == 90