示例#1
0
 def test_grad(self):
     from scipy import optimize
     f = lambda z: crps_gaussian(self.obs[0, 0], z[0], z[1], grad=False)
     g = lambda z: crps_gaussian(self.obs[0, 0], z[0], z[1], grad=True)[1]
     x0 = np.array([self.mu.reshape(-1), self.sig.reshape(-1)]).T
     for x in x0:
         self.assertLessEqual(optimize.check_grad(f, g, x), 1e-6)
示例#2
0
 def test_grad(self):
     from scipy import optimize
     f = lambda z: crps_gaussian(self.obs[0, 0], z[0], z[1], grad=False)
     g = lambda z: crps_gaussian(self.obs[0, 0], z[0], z[1], grad=True)[1]
     x0 = np.array([self.mu.reshape(-1),
                    self.sig.reshape(-1)]).T
     for x in x0:
         self.assertLessEqual(optimize.check_grad(f, g, x), 1e-6)
示例#3
0
    def mdn_rules(self, model, X, y, samples):
        pis, mus, sigmas = model.eval_network(X)
        res_mu = np.sum(pis.T * mus.T, axis=0)
        sampled = np.array([
            self.sample_mixed(pis, mus, sigmas, j, size=samples)
            for j in range(y.shape[0])
        ])

        log_scores = -np.log(
            np.array([
                self.mixed_desnity(pis, mus, sigmas, y, j)
                for j, y in enumerate(y)
            ]).clip(0.001))
        crps_scores = np.array([
            ps.crps_gaussian(y_val, mu=sampled[j].mean(), sig=sampled[j].std())
            for j, y_val in enumerate(y.squeeze())
        ])  #fixed
        dss_scores = np.array([
            sc.dss_norm(y, loc=res_mu[j], scale=sampled[j, :].std())
            for j, y in enumerate(y)
        ])

        scores = dict()
        scores['DSS'] = dss_scores.mean()
        scores['CRPS'] = crps_scores.mean()
        scores['LS'] = log_scores.mean()

        scores_l = dict()
        scores_l['CRPS'] = crps_scores
        scores_l['LS'] = log_scores
        scores_l['DSS'] = dss_scores
        print(scores['DSS'])

        return scores, scores_l
示例#4
0
def redshift_evaluate(model, test_imgs, test_labels, max_val):
    """Evaluates the model using the metrics defined in https://arxiv.org/abs/1806.06607
    
    Args:
        model (keras.Model): Compiled and trained keras model.
        test_imgs (numpy.array): Array of test images
        test_labels (numpy.array): Array of redshift values
        max_val (float, optional): Maximum expected redshift value. Necessary for categorical
                conversion. Defaults to 3.5.
    
    Returns a dictionary with the following key-value pairs:
        'pred_bias' (float): Average bias of the model.
        'dev_MAD' (float): Deviation of the Median Absolute Deviation (MAD).
        'frac_outliers' (float): Fraction of predictions that were outliers (defined as
            having absolute bias >5x dev_MAD)
        'avg_crps' (float):  Average Continuous Ranked Probability Score (CRPS)
    """         
    pdfs = model.predict(test_imgs)
    
    step = max_val / model.num_classes
    bin_starts = np.arange(0,max_val,step)
    preds = np.sum((bin_starts+(step/2)) * pdfs, axis=1) # midpoints

    residuals = (preds - test_labels) / (test_labels + 1)
    pred_bias = np.average(residuals)
    dev_MAD = np.median(np.abs(residuals - np.median(residuals))) * 1.4826
    frac_outliers = np.count_nonzero(np.abs(residuals) > (dev_MAD * 5)) / len(residuals)
    crps = np.average(crps_gaussian(preds, np.mean(preds), np.std(preds)))

    return {'pred_bias' : pred_bias,
            'dev_MAD' : dev_MAD,
            'frac_outliers' : frac_outliers,
            'avg_crps' : crps}
示例#5
0
    def bnn_rules(self, model, X, y, samples):
        res_train = model.evaluate(X, samples)
        res_train = res_train.reshape(samples, X.shape[0])
        sampled = res_train.T

        log_scores = -np.log(
            np.array(
                [gaussian_kde(sampled[j]).pdf(y)
                 for j, y in enumerate(y)]).clip(0.001))  #fixed
        crps_scores = np.array([
            ps.crps_ensemble(y_val, sampled[j])
            for j, y_val in enumerate(y.squeeze())
        ])  #fixed
        crps_scores = np.array([
            ps.crps_gaussian(y_val, mu=sampled[j].mean(), sig=sampled[j].std())
            for j, y_val in enumerate(y.squeeze())
        ])  #fixed
        dss_scores = np.array([
            sc.dss_norm(y, loc=sampled[j].mean(), scale=sampled[j].std())
            for j, y in enumerate(y)
        ])

        scores = dict()
        scores['CRPS'] = crps_scores.mean()
        scores['LS'] = log_scores.mean()
        scores['DSS'] = dss_scores.mean()

        scores_l = dict()
        scores_l['CRPS'] = crps_scores
        scores_l['LS'] = log_scores
        scores_l['DSS'] = dss_scores

        return scores, scores_l
示例#6
0
def calc_crps(ts_ori, ts_means, ts_stds):
    crpss = np.zeros_like(ts_means)
    for i in range(crpss.shape[0]):
        for j in range(crpss.shape[1]):
            crpss[i, j] = ps.crps_gaussian(ts_ori[i, j],
                                           mu=ts_means[i, j],
                                           sig=ts_stds[i, j])
    return crpss
示例#7
0
def test_xr_crps_gaussian_dask(o_dask, f_dask):
    mu = f_dask.mean('member')
    sig = f_dask.std('member')
    actual = xr_crps_gaussian(o_dask, mu, sig)
    expected = crps_gaussian(o_dask, mu, sig)
    expected = xr.DataArray(expected, coords=o_dask.coords)
    # test for numerical identity of xr_crps and crps
    assert_allclose(actual, expected)
    # test that xr_crps_ensemble returns chunks
    assert actual.chunks is not None
    # show that crps_ensemble returns no chunks
    assert expected.chunks is None
示例#8
0
def test_xr_crps_gaussian_dask(a_dask, b_dask):
    mu = b_dask.mean('time')
    sig = b_dask.std('time')
    actual = xr_crps_gaussian(a_dask, mu, sig)
    expected = crps_gaussian(a_dask, mu, sig)
    expected = xr.DataArray(expected, coords=a_dask.coords)
    # test for numerical identity of xr_crps and crps
    assert_identical(actual, expected)
    # test that xr_crps_ensemble returns chunks
    assert actual.chunks is not None
    # show that crps_ensemble returns no chunks
    assert expected.chunks is None
def test_crps_gaussian_dask(o_dask, f_prob_dask, keep_attrs):
    mu = f_prob_dask.mean("member")
    sig = f_prob_dask.std("member")
    actual = crps_gaussian(o_dask, mu, sig, keep_attrs=keep_attrs)
    expected = properscoring.crps_gaussian(o_dask, mu, sig)
    expected = xr.DataArray(expected, coords=o_dask.coords).mean()
    # test for numerical identity of xskillscore crps and properscoring crps
    assert_allclose(actual, expected)
    # test that xskillscore crps_ensemble returns chunks
    assert actual.chunks is not None
    # show that properscoring crps_ensemble returns no chunks
    assert expected.chunks is None
示例#10
0
 def __call__(self, theta):
     self._coef_from_theta(theta)  #put theta into ceof array
     mu = vec_full_mu_res(self.rng,self.rng_fc,SPP_arr = self.SPP_arr,\
                          WS_arr = self.WS_arr,WD_arr = self.WD_arr,\
                          alpha = self.coef_arr[0],\
                          beta  = self.coef_arr[1],\
                          gamma = self.coef_arr[2],\
                          delta = self.coef_arr[3],\
                          d_fc = self.d_fc)
     sig = self.coef_arr[4][0] + self.vola_val * self.coef_arr[4][
         1]  #computes variance
     crps_vals = ps.crps_gaussian(self.SPP_fit, mu=mu, sig=sig)
     res = crps_vals.mean()
     return (res)
def test_crps_gaussian_api_and_inputs(o, f_prob, keep_attrs, input_type, chunk_bool):
    """Test that crps_gaussian keeps attributes, chunking, input types and equals
    properscoring.crps_gaussian."""
    o, f_prob = modify_inputs(o, f_prob, input_type, chunk_bool)
    mu = f_prob.mean("member")
    sig = f_prob.std("member")
    actual = crps_gaussian(o, mu, sig, keep_attrs=keep_attrs)
    if input_type == "DataArray":  # properscoring allows only DataArrays
        expected = properscoring.crps_gaussian(o, mu, sig)
        expected = xr.DataArray(expected, coords=o.coords).mean()
        # test for numerical identity of xskillscore crps and properscoring crps
        assert_allclose(actual, expected)
    # test that returns chunks
    assert_chunk(actual, chunk_bool)
    # test that attributes are kept
    assert_keep_attrs(actual, o, keep_attrs)
    # test that input types equal output types
    assign_type_input_output(actual, o)
示例#12
0
 def crpscostfunc(self, parameters, mu_ens, std_ens, obs):
     """
     Evaluates the analytical gaussian crps for vectorized input of training samples and 4 parameters
     Also analytical calculation of the gradient to each parameter, which is also returned.
     """
     mu = parameters[0] + parameters[1] * mu_ens
     std = np.exp(parameters[2]) * self.stdfunc(std_ens)**parameters[3]
     crps, grad = ps.crps_gaussian(
         obs, mu, std,
         grad=True)  # grad is returned as np.array([dmu, dsig])
     dcrps_d0 = grad[0, :]
     dcrps_d1 = grad[0, :] * mu_ens
     dcrps_d2 = grad[1, :] * std
     dcrps_d3 = dcrps_d2 * self.gradfunc(std_ens)
     return (crps.mean(),
             np.array([
                 dcrps_d0.mean(),
                 dcrps_d1.mean(),
                 dcrps_d2.mean(),
                 dcrps_d3.mean()
             ]))
示例#13
0
    def evaluate_empirical(self, samples=10000):
        """Evaluates a Empirical object with all of the mentioned techniques.
        """
        print("Evaluating empirical model")
        empirical_model = Emp("Empirical model")

        y = np.concatenate([self.y_train, self.y_test])

        start_pos = self.y_train.shape[0]
        end_pos = y.shape[0]

        res = np.array([
            empirical_model.build(y[0:i]).evaluate(samples).reshape(samples)
            for i in range(start_pos, end_pos)
        ],
                       dtype=np.float32)
        mus = res.mean(axis=1)
        sigmas = res.std(axis=1)

        print("Generating plot")
        #generate plot
        plt.figure(figsize=(15, 13), dpi=100)
        plt.title("Empirical Model")
        plt.xlabel("t")
        plt.ylabel(self.out_val)
        plt.plot(np.arange(y.shape[0]),
                 y,
                 '-b',
                 linewidth=1.0,
                 label="Station: " + self.res_name)
        plt.plot(np.arange(start_pos, end_pos),
                 mus,
                 '-r',
                 linewidth=1.1,
                 label='mean of posterior')
        plt.fill_between(np.arange(start_pos, end_pos),
                         np.percentile(res, 5, axis=1),
                         np.percentile(res, 95, axis=1),
                         color="red",
                         alpha=0.2,
                         label="90% confidence region")
        plt.legend()
        plt.savefig(self.directory + "/epirical_data_plot.png",
                    bbox_inches='tight')

        print("Calculating scoring rules")
        log_scores = -np.log(
            np.array([
                norm.pdf(y, loc=mus[j], scale=sigmas[j])
                for j, y in enumerate(self.y_test)
            ]))
        crps_scores = np.array([
            ps.crps_gaussian(y, mu=mus[j], sig=sigmas[j])
            for j, y in enumerate(self.y_test)
        ])
        dss_scores = np.array([
            sc.dss_norm(y, loc=mus[j], scale=sigmas[j])
            for j, y in enumerate(self.y_test)
        ])

        scores = dict()
        scores['CRPS'] = crps_scores.mean()
        scores['LS'] = log_scores.mean()
        scores['DSS'] = dss_scores.mean()

        scores_l = dict()
        scores_l['CRPS'] = crps_scores
        scores_l['LS'] = log_scores
        scores_l['DSS'] = dss_scores

        self.log_scores("empirical", scores,
                        self.directory + "/rules_scores_fix.csv",
                        "Results of Empirical on test set\n")
        self.log_scores_l("empirical", scores_l,
                          self.directory + "/rules_scores_l_fix.csv",
                          "Results of Empirical on test set\n")

        self.generate_rank_hist(
            self.y_test, res, self.directory + "/empirical_rank_hist_test.png",
            "Empirical model rank histogram on test set")
示例#14
0
 def test_crps_gaussian_broadcast(self):
     expected = crps_gaussian(np.array([0, 1, 2]), mu=0, sig=1)
     actual = crps_gaussian([0, 1, 2], mu=[0], sig=1)
     np.testing.assert_allclose(actual, expected)
示例#15
0
 def test_crps_gaussian_consistent(self):
     actual = crps_gaussian(self.obs, self.mu, self.sig)
     np.testing.assert_allclose(actual, self.expected, rtol=1e-4)
示例#16
0
def crps_minimization(std_dev_array, y, yHat_means):
    """yHat_variances is predictive variance, i.e. already includes tau"""
    return np.mean(ps.crps_gaussian(y, mu=yHat_means, sig=std_dev_array[0]))
示例#17
0
def crps(y, yHat_means, yHat_variances):
    """yHat_variances is predictive variance, i.e. already includes tau"""
    yHat_std = np.sqrt(yHat_variances)
    return np.mean(ps.crps_gaussian(y, mu=yHat_means, sig=yHat_std))
示例#18
0
def norm(label, pred):
    return ps.crps_gaussian(label, pred[0], pred[1])
示例#19
0
import numpy as np
import properscoring as ps
from scipy.stats import norm

obs = [-2, -1, 0, 1, 2]
baseline_score = ps.crps_ensemble(obs, [0, 0, 0, 0, 0]).mean()
forecast_score = ps.crps_gaussian(obs, mu=0, sig=1).mean()
skill = (baseline_score - forecast_score) / baseline_score
print(skill)
示例#20
0
def norm_data(label, pred):
    return ps.crps_gaussian(label, pred[:,0], pred[:,1])
示例#21
0
 def test_crps_gaussian_consistent(self):
     actual = crps_gaussian(self.obs, self.mu, self.sig)
     np.testing.assert_allclose(actual, self.expected, rtol=1e-4)
示例#22
0
 def test_crps_gaussian_broadcast(self):
     expected = crps_gaussian(np.array([0, 1, 2]), mu=0, sig=1)
     actual = crps_gaussian([0, 1, 2], mu=[0], sig=1)
     np.testing.assert_allclose(actual, expected)